holobench 1.41.0__py3-none-any.whl → 1.43.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. bencher/__init__.py +20 -2
  2. bencher/bench_cfg.py +262 -54
  3. bencher/bench_report.py +2 -2
  4. bencher/bench_runner.py +96 -10
  5. bencher/bencher.py +421 -89
  6. bencher/class_enum.py +70 -7
  7. bencher/example/example_dataframe.py +2 -2
  8. bencher/example/example_levels.py +17 -173
  9. bencher/example/example_pareto.py +107 -31
  10. bencher/example/example_rerun2.py +1 -1
  11. bencher/example/example_simple_bool.py +2 -2
  12. bencher/example/example_simple_float2d.py +6 -1
  13. bencher/example/example_video.py +2 -0
  14. bencher/example/experimental/example_hvplot_explorer.py +2 -2
  15. bencher/example/inputs_0D/example_0_in_1_out.py +25 -15
  16. bencher/example/inputs_0D/example_0_in_2_out.py +12 -3
  17. bencher/example/inputs_0_float/example_0_cat_in_2_out.py +88 -0
  18. bencher/example/inputs_0_float/example_1_cat_in_2_out.py +98 -0
  19. bencher/example/inputs_0_float/example_2_cat_in_2_out.py +107 -0
  20. bencher/example/inputs_0_float/example_3_cat_in_2_out.py +111 -0
  21. bencher/example/inputs_1D/example1d_common.py +48 -12
  22. bencher/example/inputs_1D/example_0_float_1_cat.py +33 -0
  23. bencher/example/inputs_1D/example_1_cat_in_2_out_repeats.py +68 -0
  24. bencher/example/inputs_1D/example_1_float_2_cat_repeats.py +3 -0
  25. bencher/example/inputs_1D/example_1_int_in_1_out.py +98 -0
  26. bencher/example/inputs_1D/example_1_int_in_2_out.py +101 -0
  27. bencher/example/inputs_1D/example_1_int_in_2_out_repeats.py +99 -0
  28. bencher/example/inputs_1_float/example_1_float_0_cat_in_2_out.py +117 -0
  29. bencher/example/inputs_1_float/example_1_float_1_cat_in_2_out.py +124 -0
  30. bencher/example/inputs_1_float/example_1_float_2_cat_in_2_out.py +132 -0
  31. bencher/example/inputs_1_float/example_1_float_3_cat_in_2_out.py +140 -0
  32. bencher/example/inputs_2D/example_2_cat_in_4_out_repeats.py +104 -0
  33. bencher/example/inputs_2_float/example_2_float_0_cat_in_2_out.py +98 -0
  34. bencher/example/inputs_2_float/example_2_float_1_cat_in_2_out.py +112 -0
  35. bencher/example/inputs_2_float/example_2_float_2_cat_in_2_out.py +122 -0
  36. bencher/example/inputs_2_float/example_2_float_3_cat_in_2_out.py +138 -0
  37. bencher/example/inputs_3_float/example_3_float_0_cat_in_2_out.py +111 -0
  38. bencher/example/inputs_3_float/example_3_float_1_cat_in_2_out.py +117 -0
  39. bencher/example/inputs_3_float/example_3_float_2_cat_in_2_out.py +124 -0
  40. bencher/example/inputs_3_float/example_3_float_3_cat_in_2_out.py +129 -0
  41. bencher/example/meta/generate_examples.py +118 -7
  42. bencher/example/meta/generate_meta.py +88 -40
  43. bencher/job.py +174 -9
  44. bencher/plotting/plot_filter.py +52 -17
  45. bencher/results/bench_result.py +117 -25
  46. bencher/results/bench_result_base.py +117 -8
  47. bencher/results/dataset_result.py +6 -200
  48. bencher/results/explorer_result.py +23 -0
  49. bencher/results/{hvplot_result.py → histogram_result.py} +3 -18
  50. bencher/results/holoview_results/__init__.py +0 -0
  51. bencher/results/holoview_results/bar_result.py +79 -0
  52. bencher/results/holoview_results/curve_result.py +110 -0
  53. bencher/results/holoview_results/distribution_result/__init__.py +0 -0
  54. bencher/results/holoview_results/distribution_result/box_whisker_result.py +73 -0
  55. bencher/results/holoview_results/distribution_result/distribution_result.py +109 -0
  56. bencher/results/holoview_results/distribution_result/scatter_jitter_result.py +92 -0
  57. bencher/results/holoview_results/distribution_result/violin_result.py +70 -0
  58. bencher/results/holoview_results/heatmap_result.py +319 -0
  59. bencher/results/holoview_results/holoview_result.py +346 -0
  60. bencher/results/holoview_results/line_result.py +240 -0
  61. bencher/results/holoview_results/scatter_result.py +107 -0
  62. bencher/results/holoview_results/surface_result.py +158 -0
  63. bencher/results/holoview_results/table_result.py +14 -0
  64. bencher/results/holoview_results/tabulator_result.py +20 -0
  65. bencher/results/optuna_result.py +30 -115
  66. bencher/results/video_controls.py +38 -0
  67. bencher/results/video_result.py +39 -36
  68. bencher/results/video_summary.py +2 -2
  69. bencher/results/{plotly_result.py → volume_result.py} +29 -8
  70. bencher/utils.py +175 -26
  71. bencher/variables/inputs.py +122 -15
  72. bencher/video_writer.py +2 -1
  73. bencher/worker_job.py +31 -3
  74. {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/METADATA +24 -24
  75. holobench-1.43.0.dist-info/RECORD +147 -0
  76. bencher/example/example_levels2.py +0 -37
  77. bencher/example/inputs_1D/example_1_in_1_out.py +0 -62
  78. bencher/example/inputs_1D/example_1_in_2_out.py +0 -63
  79. bencher/example/inputs_1D/example_1_in_2_out_repeats.py +0 -61
  80. bencher/results/holoview_result.py +0 -796
  81. bencher/results/panel_result.py +0 -41
  82. holobench-1.41.0.dist-info/RECORD +0 -114
  83. {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/WHEEL +0 -0
  84. {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/licenses/LICENSE +0 -0
@@ -5,14 +5,14 @@ import random
5
5
 
6
6
 
7
7
  class SimpleFloat0D(bch.ParametrizedSweep):
8
- """This class has 0 input dimensions and 1 output dimensions. It samples from a gaussian distribution"""
8
+ """This class has 0 input dimensions and 2 output dimensions. It samples from Gaussian distributions"""
9
9
 
10
10
  # This defines a variable that we want to plot
11
11
  output1 = bch.ResultVar(units="ul", doc="a sample from a gaussian distribution")
12
12
  output2 = bch.ResultVar(units="ul", doc="a sample from a gaussian distribution")
13
13
 
14
14
  def __call__(self, **kwargs) -> dict:
15
- """Generate a sample from a uniform distribution
15
+ """Generate samples from Gaussian distributions
16
16
 
17
17
  Returns:
18
18
  dict: a dictionary with all the result variables in the ParametrisedSweep class as named key value pairs.
@@ -26,7 +26,16 @@ class SimpleFloat0D(bch.ParametrizedSweep):
26
26
  def example_0_in_2_out(
27
27
  run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
28
28
  ) -> bch.Bench:
29
- """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
29
+ """This example shows how to sample a 0-dimensional variable (no input parameters)
30
+ that produces two output values and plot the results.
31
+
32
+ Args:
33
+ run_cfg: Configuration for the benchmark run
34
+ report: Report to append the results to
35
+
36
+ Returns:
37
+ bch.Bench: The benchmark object
38
+ """
30
39
 
31
40
  bench = SimpleFloat0D().to_bench(run_cfg, report)
32
41
  bench.plot_sweep()
@@ -0,0 +1,88 @@
1
+ """This file demonstrates benchmarking with 0 categorical inputs and 2 output variables.
2
+
3
+ It benchmarks a single Python operation configuration to showcase output variations
4
+ using simulated performance data to illustrate how benchmarking works with fixed inputs.
5
+ """
6
+
7
+ import random
8
+ import bencher as bch
9
+
10
+ random.seed(0)
11
+
12
+
13
+ class PythonOperations0CatBenchmark(bch.ParametrizedSweep):
14
+ """Example class for benchmarking with no categorical variables.
15
+
16
+ This class demonstrates how to structure a benchmark with fixed inputs
17
+ and multiple output metrics. It uses simulated performance data that follows realistic
18
+ patterns while being deterministic and reproducible.
19
+ """
20
+
21
+ # All inputs are fixed (list data structure, read operation, medium size)
22
+
23
+ execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
24
+ memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
25
+
26
+ def __call__(self, **kwargs) -> dict:
27
+ """Execute the benchmark for the given set of parameters.
28
+
29
+ Args:
30
+ **kwargs: Parameters to update before executing
31
+
32
+ Returns:
33
+ dict: Dictionary containing the benchmark results
34
+ """
35
+ self.update_params_from_kwargs(**kwargs)
36
+
37
+ # Base values for list read operation on medium data
38
+ base_time = 28.0 # ms
39
+ base_memory = 960.0 # KB
40
+
41
+ # Add significant variance to show distribution of results
42
+ # even with fixed inputs
43
+ self.execution_time = base_time * random.gauss(0.75, 1.25)
44
+ self.memory_peak = base_memory * random.gauss(0.80, 1.20)
45
+
46
+ return super().__call__(**kwargs)
47
+
48
+
49
+ def example_0_cat_in_2_out(
50
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
51
+ ) -> bch.Bench:
52
+ """This example demonstrates benchmarking with no categorical variables and multiple output metrics.
53
+
54
+ It creates a synthetic benchmark that simulates performance variations when repeatedly running
55
+ the same operation (list read operation on medium data). This example shows that even with
56
+ fixed inputs, repeated benchmark runs produce variations in performance metrics.
57
+
58
+ Args:
59
+ run_cfg: Configuration for the benchmark run
60
+ report: Report to append the results to
61
+
62
+ Returns:
63
+ bch.Bench: The benchmark object
64
+ """
65
+
66
+ if run_cfg is None:
67
+ run_cfg = bch.BenchRunCfg()
68
+ run_cfg.repeats = 100 # More repeats to show distribution
69
+ bench = PythonOperations0CatBenchmark().to_bench(run_cfg, report)
70
+ bench.plot_sweep(
71
+ title="Python List Read Operation Performance (Fixed Inputs)",
72
+ description="Distribution of execution time and peak memory usage across multiple runs",
73
+ post_description="""
74
+ This benchmark illustrates how performance metrics vary even with fixed inputs.
75
+
76
+ Key observations:
77
+ - Each run uses the same configuration: list data structure, read operation, medium data size
78
+ - Despite identical inputs, performance metrics show natural variations
79
+ - This variance simulates real-world system fluctuations that occur in benchmarking
80
+ - With no categorical variables, the benchmark helps establish baseline performance
81
+ distribution for a single configuration
82
+ """,
83
+ )
84
+ return bench
85
+
86
+
87
+ if __name__ == "__main__":
88
+ example_0_cat_in_2_out().report.show()
@@ -0,0 +1,98 @@
1
+ """This file demonstrates benchmarking with 1 categorical input and 2 output variables.
2
+
3
+ It benchmarks different Python data structures to compare their performance characteristics
4
+ using simulated performance data to illustrate how benchmarking works.
5
+ """
6
+
7
+ import random
8
+ import bencher as bch
9
+
10
+ random.seed(0)
11
+
12
+
13
+ class PythonOperations1CatBenchmark(bch.ParametrizedSweep):
14
+ """Example class for benchmarking different Python data structures using 1 categorical variable.
15
+
16
+ This class demonstrates how to structure a benchmark with a single input parameter
17
+ and multiple output metrics. It uses simulated performance data that follows realistic
18
+ patterns while being deterministic and reproducible.
19
+ """
20
+
21
+ data_structure = bch.StringSweep(["list", "dict"], doc="Type of data structure to operate on")
22
+
23
+ # Using fixed read operation and medium size data
24
+
25
+ execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
26
+ memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
27
+
28
+ def __call__(self, **kwargs) -> dict:
29
+ """Execute the benchmark for the given set of parameters.
30
+
31
+ Args:
32
+ **kwargs: Parameters to update before executing
33
+
34
+ Returns:
35
+ dict: Dictionary containing the benchmark results
36
+ """
37
+ self.update_params_from_kwargs(**kwargs)
38
+
39
+ # Use deterministic fake data based on parameters
40
+ # Base values (for read operation on medium data)
41
+ base_time = 35.0 # ms (medium size, read operation base)
42
+ base_memory = 800.0 # KB (medium size, read operation base)
43
+
44
+ # Adjust for data structure (lists are generally faster but use more memory)
45
+ if self.data_structure == "list":
46
+ time_factor = 0.8
47
+ memory_factor = 1.2
48
+ else: # dict
49
+ time_factor = 1.2
50
+ memory_factor = 0.9
51
+
52
+ # Calculate final metrics with significant variance to show differences
53
+ self.execution_time = base_time * time_factor * random.gauss(0.80, 1.20)
54
+ self.memory_peak = base_memory * memory_factor * random.gauss(0.85, 1.15)
55
+
56
+ return super().__call__(**kwargs)
57
+
58
+
59
+ def example_1_cat_in_2_out(
60
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
61
+ ) -> bch.Bench:
62
+ """This example demonstrates benchmarking with 1 categorical variable and multiple output metrics.
63
+
64
+ It creates a synthetic benchmark that simulates performance characteristics of different
65
+ Python data structures (list vs dict) using a fixed read operation and medium data size.
66
+ The benchmark produces realistic patterns of execution time and memory usage without
67
+ actually executing real operations, making it ideal for learning and demonstration.
68
+
69
+ Args:
70
+ run_cfg: Configuration for the benchmark run
71
+ report: Report to append the results to
72
+
73
+ Returns:
74
+ bch.Bench: The benchmark object
75
+ """
76
+
77
+ if run_cfg is None:
78
+ run_cfg = bch.BenchRunCfg()
79
+ run_cfg.repeats = 5 # Fewer repeats for a quicker benchmark
80
+ bench = PythonOperations1CatBenchmark().to_bench(run_cfg, report)
81
+ bench.plot_sweep(
82
+ title="Python Data Structure Performance Benchmark (1 Variable)",
83
+ description="Comparing execution time and peak memory usage between lists and dictionaries",
84
+ post_description="""
85
+ This benchmark illustrates how different data structures affect performance.
86
+
87
+ Key observations:
88
+ - Lists generally process faster than dictionaries for read operations
89
+ - However, lists consume more memory than dictionaries
90
+ - All tests were performed with read operations on medium-sized datasets
91
+ - Note that variance in the results simulates real-world measurement fluctuations
92
+ """,
93
+ )
94
+ return bench
95
+
96
+
97
+ if __name__ == "__main__":
98
+ example_1_cat_in_2_out().report.show()
@@ -0,0 +1,107 @@
1
+ """This file demonstrates benchmarking with 2 categorical inputs and 2 output variables.
2
+
3
+ It benchmarks different Python operations to compare their performance characteristics
4
+ using simulated performance data to illustrate how benchmarking works.
5
+ """
6
+
7
+ import random
8
+ import bencher as bch
9
+
10
+ random.seed(0)
11
+
12
+
13
+ class PythonOperations2CatBenchmark(bch.ParametrizedSweep):
14
+ """Example class for benchmarking different Python operations using 2 categorical variables.
15
+
16
+ This class demonstrates how to structure a benchmark with two input parameters
17
+ and multiple output metrics. It uses simulated performance data that follows realistic
18
+ patterns while being deterministic and reproducible.
19
+ """
20
+
21
+ data_structure = bch.StringSweep(["list", "dict"], doc="Type of data structure to operate on")
22
+ operation_type = bch.StringSweep(["read", "write"], doc="Type of operation to perform")
23
+
24
+ # Using fixed medium size data instead of a variable
25
+
26
+ execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
27
+ memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
28
+
29
+ def __call__(self, **kwargs) -> dict:
30
+ """Execute the benchmark for the given set of parameters.
31
+
32
+ Args:
33
+ **kwargs: Parameters to update before executing
34
+
35
+ Returns:
36
+ dict: Dictionary containing the benchmark results
37
+ """
38
+ self.update_params_from_kwargs(**kwargs)
39
+
40
+ # Use deterministic fake data based on parameters
41
+ # Base values that will be modified by our parameters
42
+ base_time = 50.0 # ms (medium size base)
43
+ base_memory = 1000.0 # KB (medium size base)
44
+
45
+ # Adjust for data structure (lists are generally faster but use more memory)
46
+ if self.data_structure == "list":
47
+ time_factor = 0.8
48
+ memory_factor = 1.2
49
+ else: # dict
50
+ time_factor = 1.2
51
+ memory_factor = 0.9
52
+
53
+ # Adjust for operation type (reads are faster than writes)
54
+ if self.operation_type == "read":
55
+ time_factor *= 0.7
56
+ memory_factor *= 0.8
57
+ else: # write
58
+ time_factor *= 1.4
59
+ memory_factor *= 1.3
60
+
61
+ # Calculate final metrics with variance
62
+ self.execution_time = base_time * time_factor * random.gauss(0.85, 1.15)
63
+ self.memory_peak = base_memory * memory_factor * random.gauss(0.90, 1.10)
64
+
65
+ return super().__call__(**kwargs)
66
+
67
+
68
+ def example_2_cat_in_2_out(
69
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
70
+ ) -> bch.Bench:
71
+ """This example demonstrates benchmarking with 2 categorical variables and multiple output metrics.
72
+
73
+ It creates a synthetic benchmark that simulates performance characteristics of different
74
+ Python operations, varying data structures and operation types using a fixed medium data size.
75
+ The benchmark produces realistic patterns of execution time and memory usage without actually
76
+ executing real operations, making it ideal for learning and demonstration.
77
+
78
+ Args:
79
+ run_cfg: Configuration for the benchmark run
80
+ report: Report to append the results to
81
+
82
+ Returns:
83
+ bch.Bench: The benchmark object
84
+ """
85
+
86
+ if run_cfg is None:
87
+ run_cfg = bch.BenchRunCfg()
88
+ run_cfg.repeats = 5 # Fewer repeats for a quicker benchmark
89
+ bench = PythonOperations2CatBenchmark().to_bench(run_cfg, report)
90
+ bench.plot_sweep(
91
+ title="Python Operations Performance Benchmark (2 Variables)",
92
+ description="Comparing execution time and peak memory usage across Python data structures and operations",
93
+ post_description="""
94
+ This benchmark illustrates how different data structures and operations affect performance.
95
+
96
+ Key observations:
97
+ - Lists generally process faster than dictionaries for these operations
98
+ - Read operations outperform write operations as expected
99
+ - All tests were performed with a fixed medium-sized dataset
100
+ - Note that variance in the results simulates real-world measurement fluctuations
101
+ """,
102
+ )
103
+ return bench
104
+
105
+
106
+ if __name__ == "__main__":
107
+ example_2_cat_in_2_out().report.show()
@@ -0,0 +1,111 @@
1
+ """This file demonstrates benchmarking with 3 categorical inputs and 2 output variables.
2
+
3
+ It benchmarks different Python operations to compare their performance characteristics
4
+ using simulated performance data to illustrate how benchmarking works.
5
+ """
6
+
7
+ import random
8
+ import bencher as bch
9
+
10
+ random.seed(0)
11
+
12
+
13
+ class PythonOperationsBenchmark(bch.ParametrizedSweep):
14
+ """Example class for benchmarking different Python operations using categorical variables.
15
+
16
+ This class demonstrates how to structure a benchmark with multiple input parameters
17
+ and multiple output metrics. It uses simulated performance data that follows realistic
18
+ patterns while being deterministic and reproducible.
19
+ """
20
+
21
+ data_structure = bch.StringSweep(["list", "dict"], doc="Type of data structure to operate on")
22
+ operation_type = bch.StringSweep(["read", "write"], doc="Type of operation to perform")
23
+ data_size = bch.StringSweep(["small", "medium"], doc="Size of data to process")
24
+
25
+ execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
26
+ memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
27
+
28
+ def __call__(self, **kwargs) -> dict:
29
+ """Execute the benchmark for the given set of parameters.
30
+
31
+ Args:
32
+ **kwargs: Parameters to update before executing
33
+
34
+ Returns:
35
+ dict: Dictionary containing the benchmark results
36
+ """
37
+ self.update_params_from_kwargs(**kwargs)
38
+
39
+ # Use deterministic fake data based on parameters
40
+ # Base values that will be modified by our parameters
41
+ base_time = 10.0 # ms
42
+ base_memory = 100.0 # KB
43
+
44
+ # Adjust for data structure (lists are generally faster but use more memory)
45
+ if self.data_structure == "list":
46
+ time_factor = 0.8
47
+ memory_factor = 1.2
48
+ else: # dict
49
+ time_factor = 1.2
50
+ memory_factor = 0.9
51
+
52
+ # Adjust for operation type (reads are faster than writes)
53
+ if self.operation_type == "read":
54
+ time_factor *= 0.7
55
+ memory_factor *= 0.8
56
+ else: # write
57
+ time_factor *= 1.4
58
+ memory_factor *= 1.3
59
+
60
+ # Adjust for data size
61
+ if self.data_size == "medium":
62
+ time_factor *= 5
63
+ memory_factor *= 10
64
+
65
+ # Calculate final metrics with increased variance
66
+ self.execution_time = base_time * time_factor * random.gauss(0.85, 1.15)
67
+ self.memory_peak = base_memory * memory_factor * random.gauss(0.90, 1.10)
68
+
69
+ return super().__call__(**kwargs)
70
+
71
+
72
+ def example_3_cat_in_2_out(
73
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
74
+ ) -> bch.Bench:
75
+ """This example demonstrates benchmarking with categorical variables and multiple output metrics.
76
+
77
+ It creates a synthetic benchmark that simulates performance characteristics of different
78
+ Python operations, varying data structures, operation types, and data sizes. The benchmark
79
+ produces realistic patterns of execution time and memory usage without actually executing
80
+ real operations, making it ideal for learning and demonstration.
81
+
82
+ Args:
83
+ run_cfg: Configuration for the benchmark run
84
+ report: Report to append the results to
85
+
86
+ Returns:
87
+ bch.Bench: The benchmark object
88
+ """
89
+
90
+ if run_cfg is None:
91
+ run_cfg = bch.BenchRunCfg()
92
+ run_cfg.repeats = 5 # Fewer repeats for a quicker benchmark
93
+ bench = PythonOperationsBenchmark().to_bench(run_cfg, report)
94
+ bench.plot_sweep(
95
+ title="Python Operations Performance Benchmark",
96
+ description="Comparing execution time and peak memory usage across Python data structures and operations",
97
+ post_description="""
98
+ This benchmark illustrates how different data structures and operations affect performance.
99
+
100
+ Key observations:
101
+ - Lists generally process faster than dictionaries for these operations
102
+ - Read operations outperform write operations as expected
103
+ - Medium-sized data requires significantly more resources than small data
104
+ - Note that variance in the results simulates real-world measurement fluctuations
105
+ """,
106
+ )
107
+ return bench
108
+
109
+
110
+ if __name__ == "__main__":
111
+ example_3_cat_in_2_out().report.show()
@@ -4,7 +4,10 @@ import bencher as bch
4
4
 
5
5
 
6
6
  class DataSource:
7
+ """A simple data source class that provides access to predefined data points."""
8
+
7
9
  def __init__(self):
10
+ """Initialize the data source with predefined values and call counts."""
8
11
  self.data = [
9
12
  [0, 0, 0, 0],
10
13
  [1, 1, 1, 1],
@@ -16,7 +19,16 @@ class DataSource:
16
19
 
17
20
  self.call_count = [0] * len(self.data)
18
21
 
19
- def call(self, index, repeat=None):
22
+ def call(self, index: int, repeat: int = None) -> int:
23
+ """Retrieve a data point at the specified index and repeat count.
24
+
25
+ Args:
26
+ index: The index of the data row to access
27
+ repeat: The specific repeat count to use. If None, uses and increments internal counter
28
+
29
+ Returns:
30
+ int: The value at the specified index and repeat position
31
+ """
20
32
  if repeat is None:
21
33
  self.call_count[index] += 1
22
34
  repeat = self.call_count[index]
@@ -24,16 +36,31 @@ class DataSource:
24
36
 
25
37
 
26
38
  class Example1D(bch.ParametrizedSweep):
27
- index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input angle", units="rad", samples=30)
28
- output = bch.ResultVar(units="v", doc="sin of theta")
29
- output2 = bch.ResultVar(units="v", doc="-sin of theta")
39
+ """Example 1D parameter sweep class with one input and two output dimensions."""
40
+
41
+ index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input index", units="rad", samples=30)
42
+ output = bch.ResultVar(units="v", doc="Output value from data source 1")
43
+ output2 = bch.ResultVar(units="v", doc="Negated output value from data source 2")
30
44
 
31
45
  def __init__(self, **params):
46
+ """Initialize the Example1D sweep with two data sources.
47
+
48
+ Args:
49
+ **params: Parameters to pass to the parent class constructor
50
+ """
32
51
  super().__init__(**params)
33
52
  self.data1 = DataSource()
34
53
  self.data2 = DataSource()
35
54
 
36
- def __call__(self, **kwargs):
55
+ def __call__(self, **kwargs) -> dict:
56
+ """Execute the parameter sweep for the given parameters.
57
+
58
+ Args:
59
+ **kwargs: Additional parameters to update before executing
60
+
61
+ Returns:
62
+ dict: Dictionary containing the outputs of the parameter sweep
63
+ """
37
64
  self.update_params_from_kwargs(**kwargs)
38
65
  self.output = self.data1.call(self.index)
39
66
  self.output2 = -self.data2.call(self.index)
@@ -43,7 +70,16 @@ class Example1D(bch.ParametrizedSweep):
43
70
  def example_1_in_2_out(
44
71
  run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
45
72
  ) -> bch.Bench:
46
- """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
73
+ """This example shows how to sample a 1-dimensional integer variable and plot
74
+ the result of two output variables from that parameter sweep.
75
+
76
+ Args:
77
+ run_cfg: Configuration for the benchmark run
78
+ report: Report to append the results to
79
+
80
+ Returns:
81
+ bch.Bench: The benchmark object
82
+ """
47
83
  bench = Example1D().to_bench(run_cfg, report)
48
84
  bench.plot_sweep()
49
85
 
@@ -53,17 +89,17 @@ def example_1_in_2_out(
53
89
 
54
90
  if __name__ == "__main__":
55
91
  run_config = bch.BenchRunCfg()
56
- reprt = bch.BenchReport()
57
- example_1_in_2_out(run_config, reprt)
92
+ report_obj = bch.BenchReport()
93
+ example_1_in_2_out(run_config, report_obj)
58
94
 
59
95
  run_config.repeats = 4
60
- example_1_in_2_out(run_config, reprt)
96
+ example_1_in_2_out(run_config, report_obj)
61
97
 
62
98
  # run_config.over_time = True
63
99
  # run_config.auto_plot = False
64
100
  # for i in range(4):
65
- # example_1_in_2_out(run_config, reprt)
101
+ # example_1_in_2_out(run_config, report_obj)
66
102
 
67
103
  # run_config.auto_plot = True
68
- # example_1_in_2_out(run_config, reprt)
69
- reprt.show()
104
+ # example_1_in_2_out(run_config, report_obj)
105
+ report_obj.show()
@@ -0,0 +1,33 @@
1
+ """This file demonstrates benchmarking with both categorical and float variables."""
2
+
3
+ import bencher as bch
4
+ from bencher.example.meta.example_meta import BenchableObject
5
+
6
+ run_cfg = bch.BenchRunCfg()
7
+ run_cfg.repeats = 2 # only shows distance
8
+ run_cfg.level = 4
9
+ bench = BenchableObject().to_bench(run_cfg)
10
+ # bench.worker_class_instance.float2=0.2
11
+ run_cfg.repeats = 1
12
+ # WORKS
13
+ # shows both distance and simple noise
14
+ res = bench.plot_sweep(input_vars=["float1"], result_vars=["distance", "sample_noise"])
15
+
16
+ # WORKS
17
+ # shows both distance and simple noise
18
+ res = bench.plot_sweep(input_vars=["noisy"], result_vars=["distance", "sample_noise"])
19
+
20
+
21
+ run_cfg.repeats = 10 # If i set repeats>1 then floating point variables still work but categorical variables do not
22
+ # WORKS
23
+ # shows both distance and simple noise
24
+ res = bench.plot_sweep(input_vars=["float1"], result_vars=["distance", "sample_noise"])
25
+
26
+ # BUG
27
+ # only shows distance result var, ignores sample_noise
28
+ res = bench.plot_sweep(input_vars=["noisy"], result_vars=["distance", "sample_noise"])
29
+
30
+
31
+ bench.report.append(res.to_tabulator())
32
+ # bench.report.append(res.to_scatter_jitter_single(BenchableObject.param.sample_noise))
33
+ bench.report.show()
@@ -0,0 +1,68 @@
1
+ """This file demonstrates benchmarking with categorical inputs and multiple outputs with repeats."""
2
+
3
+ import random
4
+ import bencher as bch
5
+
6
+ random.seed(0)
7
+
8
+
9
+ class DataStructureBenchmark(bch.ParametrizedSweep):
10
+ """Example class for comparing different data structure operations with two output variables."""
11
+
12
+ operation = bch.StringSweep(
13
+ ["list_append", "dict_insert"],
14
+ doc="Type of data structure operation to benchmark",
15
+ )
16
+ execution_time = bch.ResultVar(units="ms", doc="Time taken to complete operations")
17
+ memory_usage = bch.ResultVar(units="KB", doc="Memory used by the operation")
18
+
19
+ def __call__(self, **kwargs) -> dict:
20
+ """Execute the parameter sweep for the given data structure operation.
21
+
22
+ Args:
23
+ **kwargs: Additional parameters to update before executing
24
+
25
+ Returns:
26
+ dict: Dictionary containing the outputs of the parameter sweep
27
+ """
28
+ self.update_params_from_kwargs(**kwargs)
29
+
30
+ # Simple simulations of different data structure operations
31
+ # In a real benchmark, you would implement or measure actual operations
32
+
33
+ if self.operation == "list_append":
34
+ # List append operations (typically fast for adding elements)
35
+ self.execution_time = random.gauss(mu=5.0, sigma=1.0)
36
+ self.memory_usage = random.gauss(mu=120.0, sigma=20.0)
37
+ elif self.operation == "dict_insert":
38
+ # Dictionary insertions (hash table operations)
39
+ self.execution_time = random.gauss(mu=6.5, sigma=1.2)
40
+ self.memory_usage = random.gauss(mu=180.0, sigma=25.0)
41
+
42
+ return super().__call__(**kwargs)
43
+
44
+
45
+ def example_1_cat_in_2_out_repeats(
46
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
47
+ ) -> bch.Bench:
48
+ """This example shows how to benchmark different data structure operations with multiple repeats
49
+ and plot the results of execution time and memory usage.
50
+
51
+ Args:
52
+ run_cfg: Configuration for the benchmark run
53
+ report: Report to append the results to
54
+
55
+ Returns:
56
+ bch.Bench: The benchmark object
57
+ """
58
+
59
+ if run_cfg is None:
60
+ run_cfg = bch.BenchRunCfg()
61
+ run_cfg.repeats = 30 # Increased repeats for better statistical significance
62
+ bench = DataStructureBenchmark().to_bench(run_cfg, report)
63
+ bench.plot_sweep()
64
+ return bench
65
+
66
+
67
+ if __name__ == "__main__":
68
+ example_1_cat_in_2_out_repeats().report.show()
@@ -1,6 +1,9 @@
1
+ """This file demonstrates benchmarking with both float and categorical variables with repeats."""
2
+
1
3
  import bencher as bch
2
4
  from bencher.example.meta.example_meta import BenchableObject
3
5
 
6
+ # Configure and run a benchmark with multiple input types and repeats
4
7
  run_cfg = bch.BenchRunCfg()
5
8
  run_cfg.repeats = 20
6
9
  run_cfg.level = 4