holobench 1.41.0__py3-none-any.whl → 1.43.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bencher/__init__.py +20 -2
- bencher/bench_cfg.py +262 -54
- bencher/bench_report.py +2 -2
- bencher/bench_runner.py +96 -10
- bencher/bencher.py +421 -89
- bencher/class_enum.py +70 -7
- bencher/example/example_dataframe.py +2 -2
- bencher/example/example_levels.py +17 -173
- bencher/example/example_pareto.py +107 -31
- bencher/example/example_rerun2.py +1 -1
- bencher/example/example_simple_bool.py +2 -2
- bencher/example/example_simple_float2d.py +6 -1
- bencher/example/example_video.py +2 -0
- bencher/example/experimental/example_hvplot_explorer.py +2 -2
- bencher/example/inputs_0D/example_0_in_1_out.py +25 -15
- bencher/example/inputs_0D/example_0_in_2_out.py +12 -3
- bencher/example/inputs_0_float/example_0_cat_in_2_out.py +88 -0
- bencher/example/inputs_0_float/example_1_cat_in_2_out.py +98 -0
- bencher/example/inputs_0_float/example_2_cat_in_2_out.py +107 -0
- bencher/example/inputs_0_float/example_3_cat_in_2_out.py +111 -0
- bencher/example/inputs_1D/example1d_common.py +48 -12
- bencher/example/inputs_1D/example_0_float_1_cat.py +33 -0
- bencher/example/inputs_1D/example_1_cat_in_2_out_repeats.py +68 -0
- bencher/example/inputs_1D/example_1_float_2_cat_repeats.py +3 -0
- bencher/example/inputs_1D/example_1_int_in_1_out.py +98 -0
- bencher/example/inputs_1D/example_1_int_in_2_out.py +101 -0
- bencher/example/inputs_1D/example_1_int_in_2_out_repeats.py +99 -0
- bencher/example/inputs_1_float/example_1_float_0_cat_in_2_out.py +117 -0
- bencher/example/inputs_1_float/example_1_float_1_cat_in_2_out.py +124 -0
- bencher/example/inputs_1_float/example_1_float_2_cat_in_2_out.py +132 -0
- bencher/example/inputs_1_float/example_1_float_3_cat_in_2_out.py +140 -0
- bencher/example/inputs_2D/example_2_cat_in_4_out_repeats.py +104 -0
- bencher/example/inputs_2_float/example_2_float_0_cat_in_2_out.py +98 -0
- bencher/example/inputs_2_float/example_2_float_1_cat_in_2_out.py +112 -0
- bencher/example/inputs_2_float/example_2_float_2_cat_in_2_out.py +122 -0
- bencher/example/inputs_2_float/example_2_float_3_cat_in_2_out.py +138 -0
- bencher/example/inputs_3_float/example_3_float_0_cat_in_2_out.py +111 -0
- bencher/example/inputs_3_float/example_3_float_1_cat_in_2_out.py +117 -0
- bencher/example/inputs_3_float/example_3_float_2_cat_in_2_out.py +124 -0
- bencher/example/inputs_3_float/example_3_float_3_cat_in_2_out.py +129 -0
- bencher/example/meta/generate_examples.py +118 -7
- bencher/example/meta/generate_meta.py +88 -40
- bencher/job.py +174 -9
- bencher/plotting/plot_filter.py +52 -17
- bencher/results/bench_result.py +117 -25
- bencher/results/bench_result_base.py +117 -8
- bencher/results/dataset_result.py +6 -200
- bencher/results/explorer_result.py +23 -0
- bencher/results/{hvplot_result.py → histogram_result.py} +3 -18
- bencher/results/holoview_results/__init__.py +0 -0
- bencher/results/holoview_results/bar_result.py +79 -0
- bencher/results/holoview_results/curve_result.py +110 -0
- bencher/results/holoview_results/distribution_result/__init__.py +0 -0
- bencher/results/holoview_results/distribution_result/box_whisker_result.py +73 -0
- bencher/results/holoview_results/distribution_result/distribution_result.py +109 -0
- bencher/results/holoview_results/distribution_result/scatter_jitter_result.py +92 -0
- bencher/results/holoview_results/distribution_result/violin_result.py +70 -0
- bencher/results/holoview_results/heatmap_result.py +319 -0
- bencher/results/holoview_results/holoview_result.py +346 -0
- bencher/results/holoview_results/line_result.py +240 -0
- bencher/results/holoview_results/scatter_result.py +107 -0
- bencher/results/holoview_results/surface_result.py +158 -0
- bencher/results/holoview_results/table_result.py +14 -0
- bencher/results/holoview_results/tabulator_result.py +20 -0
- bencher/results/optuna_result.py +30 -115
- bencher/results/video_controls.py +38 -0
- bencher/results/video_result.py +39 -36
- bencher/results/video_summary.py +2 -2
- bencher/results/{plotly_result.py → volume_result.py} +29 -8
- bencher/utils.py +175 -26
- bencher/variables/inputs.py +122 -15
- bencher/video_writer.py +2 -1
- bencher/worker_job.py +31 -3
- {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/METADATA +24 -24
- holobench-1.43.0.dist-info/RECORD +147 -0
- bencher/example/example_levels2.py +0 -37
- bencher/example/inputs_1D/example_1_in_1_out.py +0 -62
- bencher/example/inputs_1D/example_1_in_2_out.py +0 -63
- bencher/example/inputs_1D/example_1_in_2_out_repeats.py +0 -61
- bencher/results/holoview_result.py +0 -796
- bencher/results/panel_result.py +0 -41
- holobench-1.41.0.dist-info/RECORD +0 -114
- {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/WHEEL +0 -0
- {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,98 @@
|
|
1
|
+
"""This file has some examples for how to perform basic benchmarking parameter sweeps"""
|
2
|
+
|
3
|
+
import bencher as bch
|
4
|
+
|
5
|
+
|
6
|
+
class DataSource:
|
7
|
+
"""A simple data source class that provides access to predefined data points."""
|
8
|
+
|
9
|
+
def __init__(self):
|
10
|
+
"""Initialize the data source with predefined values and call counts."""
|
11
|
+
self.data = [
|
12
|
+
[0, 0, 0, 0],
|
13
|
+
[1, 1, 1, 1],
|
14
|
+
[1, 1, 1, 1],
|
15
|
+
[2, 1, 1, 0],
|
16
|
+
[2, 2, 0, 0],
|
17
|
+
[2, 2, 1, 1],
|
18
|
+
]
|
19
|
+
|
20
|
+
self.call_count = [0] * len(self.data)
|
21
|
+
|
22
|
+
def call(self, index: int, repeat: int = None) -> int:
|
23
|
+
"""Retrieve a data point at the specified index and repeat count.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
index: The index of the data row to access
|
27
|
+
repeat: The specific repeat count to use. If None, uses and increments internal counter
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
int: The value at the specified index and repeat position
|
31
|
+
"""
|
32
|
+
if repeat is None:
|
33
|
+
self.call_count[index] += 1
|
34
|
+
repeat = self.call_count[index]
|
35
|
+
print(index, repeat)
|
36
|
+
return self.data[index][repeat - 1]
|
37
|
+
|
38
|
+
|
39
|
+
class Example1D(bch.ParametrizedSweep):
|
40
|
+
"""Example 1D parameter sweep class with one input dimension and one output dimension."""
|
41
|
+
|
42
|
+
index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input index", units="rad", samples=30)
|
43
|
+
output = bch.ResultVar(units="v", doc="Output value from data source")
|
44
|
+
|
45
|
+
def __init__(self, **params):
|
46
|
+
"""Initialize the Example1D sweep with a data source.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
**params: Parameters to pass to the parent class constructor
|
50
|
+
"""
|
51
|
+
super().__init__(**params)
|
52
|
+
self.data1 = DataSource()
|
53
|
+
|
54
|
+
def __call__(self, **kwargs) -> dict:
|
55
|
+
"""Execute the parameter sweep for the given parameters.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
**kwargs: Additional parameters to update before executing
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
dict: Dictionary containing the output of the parameter sweep
|
62
|
+
"""
|
63
|
+
self.update_params_from_kwargs(**kwargs)
|
64
|
+
self.output = self.data1.call(self.index)
|
65
|
+
return super().__call__(**kwargs)
|
66
|
+
|
67
|
+
|
68
|
+
def example_1_int_in_1_out(
|
69
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
70
|
+
) -> bch.Bench:
|
71
|
+
"""This example shows how to sample a 1-dimensional integer variable and plot
|
72
|
+
the result of that parameter sweep.
|
73
|
+
|
74
|
+
Args:
|
75
|
+
run_cfg: Configuration for the benchmark run
|
76
|
+
report: Report to append the results to
|
77
|
+
|
78
|
+
Returns:
|
79
|
+
bch.Bench: The benchmark object
|
80
|
+
"""
|
81
|
+
bench = Example1D().to_bench(run_cfg, report)
|
82
|
+
bench.plot_sweep()
|
83
|
+
return bench
|
84
|
+
|
85
|
+
|
86
|
+
if __name__ == "__main__":
|
87
|
+
run_config = bch.BenchRunCfg()
|
88
|
+
report_obj = bch.BenchReport()
|
89
|
+
example_1_int_in_1_out(run_config, report_obj)
|
90
|
+
|
91
|
+
run_config.repeats = 4
|
92
|
+
example_1_int_in_1_out(run_config, report_obj)
|
93
|
+
|
94
|
+
# run_cfg.over_time = True
|
95
|
+
# for i in range(4):
|
96
|
+
# example_1_in_2_out(run_cfg, report_obj)
|
97
|
+
|
98
|
+
report_obj.show()
|
@@ -0,0 +1,101 @@
|
|
1
|
+
"""This file has examples for how to perform basic benchmarking parameter sweeps"""
|
2
|
+
|
3
|
+
import bencher as bch
|
4
|
+
|
5
|
+
|
6
|
+
class DataSource:
|
7
|
+
"""A simple data source class that provides access to predefined data points."""
|
8
|
+
|
9
|
+
def __init__(self):
|
10
|
+
"""Initialize the data source with predefined values and call counts."""
|
11
|
+
self.data = [
|
12
|
+
[0, 0, 0, 0],
|
13
|
+
[1, 1, 1, 1],
|
14
|
+
[1, 1, 1, 1],
|
15
|
+
[2, 1, 1, 0],
|
16
|
+
[2, 2, 0, 0],
|
17
|
+
[2, 2, 1, 1],
|
18
|
+
]
|
19
|
+
|
20
|
+
self.call_count = [0] * len(self.data)
|
21
|
+
|
22
|
+
def call(self, index: int, repeat: int = None) -> int:
|
23
|
+
"""Retrieve a data point at the specified index and repeat count.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
index: The index of the data row to access
|
27
|
+
repeat: The specific repeat count to use. If None, uses and increments internal counter
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
int: The value at the specified index and repeat position
|
31
|
+
"""
|
32
|
+
if repeat is None:
|
33
|
+
self.call_count[index] += 1
|
34
|
+
repeat = self.call_count[index]
|
35
|
+
return self.data[index][repeat - 1]
|
36
|
+
|
37
|
+
|
38
|
+
class Example1D(bch.ParametrizedSweep):
|
39
|
+
"""Example 1D parameter sweep class with one input dimension and two output dimensions."""
|
40
|
+
|
41
|
+
index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input index", units="rad", samples=30)
|
42
|
+
output = bch.ResultVar(units="v", doc="Output value from data source 1")
|
43
|
+
output2 = bch.ResultVar(units="v", doc="Negated output value from data source 2")
|
44
|
+
|
45
|
+
def __init__(self, **params):
|
46
|
+
"""Initialize the Example1D sweep with two data sources.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
**params: Parameters to pass to the parent class constructor
|
50
|
+
"""
|
51
|
+
super().__init__(**params)
|
52
|
+
self.data1 = DataSource()
|
53
|
+
self.data2 = DataSource()
|
54
|
+
|
55
|
+
def __call__(self, **kwargs) -> dict:
|
56
|
+
"""Execute the parameter sweep for the given parameters.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
**kwargs: Additional parameters to update before executing
|
60
|
+
|
61
|
+
Returns:
|
62
|
+
dict: Dictionary containing the outputs of the parameter sweep
|
63
|
+
"""
|
64
|
+
self.update_params_from_kwargs(**kwargs)
|
65
|
+
self.output = self.data1.call(self.index)
|
66
|
+
self.output2 = -self.data2.call(self.index)
|
67
|
+
return super().__call__(**kwargs)
|
68
|
+
|
69
|
+
|
70
|
+
def example_1_int_in_2_out(
|
71
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
72
|
+
) -> bch.Bench:
|
73
|
+
"""This example shows how to sample a 1-dimensional integer variable and plot
|
74
|
+
the result of two output variables from that parameter sweep.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
run_cfg: Configuration for the benchmark run
|
78
|
+
report: Report to append the results to
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
bch.Bench: The benchmark object
|
82
|
+
"""
|
83
|
+
bench = Example1D().to_bench(run_cfg, report)
|
84
|
+
bench.plot_sweep()
|
85
|
+
|
86
|
+
# bench.report.append(bench.get_result().to_heatmap())
|
87
|
+
return bench
|
88
|
+
|
89
|
+
|
90
|
+
if __name__ == "__main__":
|
91
|
+
run_config = bch.BenchRunCfg()
|
92
|
+
report_obj = bch.BenchReport()
|
93
|
+
example_1_int_in_2_out(run_config, report_obj)
|
94
|
+
# run_config.over_time = True
|
95
|
+
# run_config.auto_plot = False
|
96
|
+
# for i in range(4):
|
97
|
+
# example_1_in_2_out(run_config, report_obj)
|
98
|
+
|
99
|
+
# run_config.auto_plot = True
|
100
|
+
# example_1_in_2_out(run_config, report_obj)
|
101
|
+
report_obj.show()
|
@@ -0,0 +1,99 @@
|
|
1
|
+
"""This file has examples for how to perform basic benchmarking parameter sweeps with repeats"""
|
2
|
+
|
3
|
+
import bencher as bch
|
4
|
+
|
5
|
+
|
6
|
+
class DataSource:
|
7
|
+
"""A simple data source class that provides access to predefined data points."""
|
8
|
+
|
9
|
+
def __init__(self):
|
10
|
+
"""Initialize the data source with predefined values and call counts."""
|
11
|
+
self.data = [
|
12
|
+
[0, 0, 0, 0],
|
13
|
+
[1, 1, 1, 1],
|
14
|
+
[1, 1, 1, 1],
|
15
|
+
[2, 1, 1, 0],
|
16
|
+
[2, 2, 0, 0],
|
17
|
+
[2, 2, 1, 1],
|
18
|
+
]
|
19
|
+
|
20
|
+
self.call_count = [0] * len(self.data)
|
21
|
+
|
22
|
+
def call(self, index: int, repeat: int = None) -> int:
|
23
|
+
"""Retrieve a data point at the specified index and repeat count.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
index: The index of the data row to access
|
27
|
+
repeat: The specific repeat count to use. If None, uses and increments internal counter
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
int: The value at the specified index and repeat position
|
31
|
+
"""
|
32
|
+
if repeat is None:
|
33
|
+
self.call_count[index] += 1
|
34
|
+
repeat = self.call_count[index]
|
35
|
+
return self.data[index][repeat - 1]
|
36
|
+
|
37
|
+
|
38
|
+
class Example1D(bch.ParametrizedSweep):
|
39
|
+
"""Example 1D parameter sweep class with one input dimension and two output dimensions."""
|
40
|
+
|
41
|
+
index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input index", units="rad", samples=30)
|
42
|
+
output = bch.ResultVar(units="v", doc="Output value from data source 1")
|
43
|
+
output2 = bch.ResultVar(units="v", doc="Negated output value from data source 2")
|
44
|
+
|
45
|
+
def __init__(self, **params):
|
46
|
+
"""Initialize the Example1D sweep with two data sources.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
**params: Parameters to pass to the parent class constructor
|
50
|
+
"""
|
51
|
+
super().__init__(**params)
|
52
|
+
self.data1 = DataSource()
|
53
|
+
self.data2 = DataSource()
|
54
|
+
|
55
|
+
def __call__(self, **kwargs) -> dict:
|
56
|
+
"""Execute the parameter sweep for the given parameters.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
**kwargs: Additional parameters to update before executing
|
60
|
+
|
61
|
+
Returns:
|
62
|
+
dict: Dictionary containing the outputs of the parameter sweep
|
63
|
+
"""
|
64
|
+
self.update_params_from_kwargs(**kwargs)
|
65
|
+
self.output = self.data1.call(self.index)
|
66
|
+
self.output2 = -self.data2.call(self.index)
|
67
|
+
return super().__call__(**kwargs)
|
68
|
+
|
69
|
+
|
70
|
+
def example_1_int_in_2_out_repeats(
|
71
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
72
|
+
) -> bch.Bench:
|
73
|
+
"""This example shows how to sample a 1-dimensional integer variable with multiple repeats
|
74
|
+
and plot the result of two output variables from that parameter sweep.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
run_cfg: Configuration for the benchmark run
|
78
|
+
report: Report to append the results to
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
bch.Bench: The benchmark object
|
82
|
+
"""
|
83
|
+
|
84
|
+
from importlib.metadata import version
|
85
|
+
|
86
|
+
print(version("holobench"))
|
87
|
+
if run_cfg is None:
|
88
|
+
run_cfg = bch.BenchRunCfg()
|
89
|
+
run_cfg.repeats = 4
|
90
|
+
bench = Example1D().to_bench(run_cfg, report)
|
91
|
+
bench.plot_sweep()
|
92
|
+
return bench
|
93
|
+
|
94
|
+
|
95
|
+
if __name__ == "__main__":
|
96
|
+
run_config = bch.BenchRunCfg()
|
97
|
+
report_obj = bch.BenchReport()
|
98
|
+
example_1_int_in_2_out_repeats(run_config, report_obj)
|
99
|
+
report_obj.show()
|
@@ -0,0 +1,117 @@
|
|
1
|
+
"""This file demonstrates benchmarking with 1 float input and 0 categorical inputs with 2 output variables.
|
2
|
+
|
3
|
+
It benchmarks a single algorithm configuration across different problem sizes to show
|
4
|
+
how performance scales, using simulated performance data to illustrate benchmarking basics.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import random
|
8
|
+
import math
|
9
|
+
import bencher as bch
|
10
|
+
|
11
|
+
random.seed(0)
|
12
|
+
|
13
|
+
|
14
|
+
class Algorithm0CatBenchmark(bch.ParametrizedSweep):
|
15
|
+
"""Example class for benchmarking algorithm performance with just problem size.
|
16
|
+
|
17
|
+
This class demonstrates how to structure a benchmark with one float parameter and
|
18
|
+
no categorical parameters, producing multiple output metrics. It uses simulated
|
19
|
+
performance data that follows realistic patterns while being deterministic.
|
20
|
+
"""
|
21
|
+
|
22
|
+
# Float input parameter
|
23
|
+
problem_size = bch.FloatSweep(default=100, bounds=[1, 100], doc="Size of the problem to solve")
|
24
|
+
|
25
|
+
# Using fixed "iterative" algorithm, "array" data structure, and "basic" optimization level
|
26
|
+
|
27
|
+
# Output metrics
|
28
|
+
execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
|
29
|
+
memory_usage = bch.ResultVar(units="MB", doc="Memory usage in megabytes")
|
30
|
+
|
31
|
+
def __call__(self, **kwargs) -> dict:
|
32
|
+
"""Execute the benchmark for the given set of parameters.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
**kwargs: Parameters to update before executing
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
dict: Dictionary containing the benchmark results
|
39
|
+
"""
|
40
|
+
self.update_params_from_kwargs(**kwargs)
|
41
|
+
|
42
|
+
# Base values for calculation
|
43
|
+
base_time = 1.0 # ms
|
44
|
+
base_memory = 0.1 # MB
|
45
|
+
|
46
|
+
# Size factor (non-linear relationship with problem size)
|
47
|
+
size_factor_time = math.log10(self.problem_size) ** 1.5
|
48
|
+
size_factor_memory = math.sqrt(self.problem_size) / 10
|
49
|
+
|
50
|
+
# Fixed "iterative" algorithm factors (from previous example)
|
51
|
+
algo_time_factor = 0.8 # Iterative is faster
|
52
|
+
algo_memory_factor = 0.7 # Iterative uses less memory
|
53
|
+
|
54
|
+
# Fixed "array" data structure factors (from previous example)
|
55
|
+
ds_time_factor = 0.9 # Arrays have faster access
|
56
|
+
ds_memory_factor = 1.1 # Arrays use slightly more contiguous memory
|
57
|
+
|
58
|
+
# Fixed "basic" optimization level factors (from previous example)
|
59
|
+
opt_time_factor = 1.0
|
60
|
+
opt_memory_factor = 1.0
|
61
|
+
|
62
|
+
# Calculate final metrics with some random variation
|
63
|
+
time_multiplier = (
|
64
|
+
algo_time_factor * ds_time_factor * opt_time_factor * random.uniform(0.9, 1.1)
|
65
|
+
)
|
66
|
+
memory_multiplier = (
|
67
|
+
algo_memory_factor * ds_memory_factor * opt_memory_factor * random.uniform(0.95, 1.05)
|
68
|
+
)
|
69
|
+
|
70
|
+
self.execution_time = base_time * size_factor_time * time_multiplier
|
71
|
+
self.memory_usage = base_memory * size_factor_memory * memory_multiplier
|
72
|
+
|
73
|
+
return super().__call__(**kwargs)
|
74
|
+
|
75
|
+
|
76
|
+
def example_1_float_0_cat_in_2_out(
|
77
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
78
|
+
) -> bch.Bench:
|
79
|
+
"""This example demonstrates benchmarking with 1 float input and 0 categorical inputs.
|
80
|
+
|
81
|
+
It creates a synthetic benchmark that simulates performance characteristics of an
|
82
|
+
algorithm configuration across different problem sizes. The benchmark uses fixed
|
83
|
+
"iterative" algorithm, "array" data structure, and "basic" optimization level,
|
84
|
+
producing realistic patterns of execution time and memory usage without actually
|
85
|
+
executing real algorithms.
|
86
|
+
|
87
|
+
Args:
|
88
|
+
run_cfg: Configuration for the benchmark run
|
89
|
+
report: Report to append the results to
|
90
|
+
|
91
|
+
Returns:
|
92
|
+
bch.Bench: The benchmark object
|
93
|
+
"""
|
94
|
+
|
95
|
+
if run_cfg is None:
|
96
|
+
run_cfg = bch.BenchRunCfg()
|
97
|
+
run_cfg.repeats = 5 # Slightly more repeats to show variance
|
98
|
+
bench = Algorithm0CatBenchmark().to_bench(run_cfg, report)
|
99
|
+
bench.plot_sweep(
|
100
|
+
title="Algorithm Performance Benchmark (1 Float, 0 Categorical Variables)",
|
101
|
+
description="Analyzing how execution time and memory usage scale with problem size",
|
102
|
+
post_description="""
|
103
|
+
This benchmark illustrates how algorithm performance scales with problem size.
|
104
|
+
|
105
|
+
Key observations:
|
106
|
+
- Execution time increases non-linearly with problem size (logarithmic relationship)
|
107
|
+
- Memory usage scales more gradually (square root relationship)
|
108
|
+
- All tests were performed with an iterative algorithm on array data structure with basic optimization
|
109
|
+
- Small variations in performance metrics simulate the natural fluctuations seen in real benchmarks
|
110
|
+
- This type of benchmark is useful for understanding scaling properties of a single algorithm configuration
|
111
|
+
""",
|
112
|
+
)
|
113
|
+
return bench
|
114
|
+
|
115
|
+
|
116
|
+
if __name__ == "__main__":
|
117
|
+
example_1_float_0_cat_in_2_out().report.show()
|
@@ -0,0 +1,124 @@
|
|
1
|
+
"""This file demonstrates benchmarking with 1 float and 1 categorical input with 2 output variables.
|
2
|
+
|
3
|
+
It benchmarks different algorithmic implementations to compare their performance characteristics
|
4
|
+
using simulated performance data to illustrate how benchmarking works.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import random
|
8
|
+
import math
|
9
|
+
import bencher as bch
|
10
|
+
|
11
|
+
random.seed(0)
|
12
|
+
|
13
|
+
|
14
|
+
class Algorithm1CatBenchmark(bch.ParametrizedSweep):
|
15
|
+
"""Example class for benchmarking algorithm performance with fewer parameters.
|
16
|
+
|
17
|
+
This class demonstrates how to structure a benchmark with one float parameter and
|
18
|
+
one categorical parameter, producing multiple output metrics. It uses simulated
|
19
|
+
performance data that follows realistic patterns while being deterministic.
|
20
|
+
"""
|
21
|
+
|
22
|
+
# Float input parameter
|
23
|
+
problem_size = bch.FloatSweep(default=100, bounds=[1, 100], doc="Size of the problem to solve")
|
24
|
+
|
25
|
+
# Categorical input parameter
|
26
|
+
algorithm_type = bch.StringSweep(
|
27
|
+
["recursive", "iterative"], doc="Type of algorithm implementation"
|
28
|
+
)
|
29
|
+
|
30
|
+
# Using fixed "array" data structure and "basic" optimization level
|
31
|
+
|
32
|
+
# Output metrics
|
33
|
+
execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
|
34
|
+
memory_usage = bch.ResultVar(units="MB", doc="Memory usage in megabytes")
|
35
|
+
|
36
|
+
def __call__(self, **kwargs) -> dict:
|
37
|
+
"""Execute the benchmark for the given set of parameters.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
**kwargs: Parameters to update before executing
|
41
|
+
|
42
|
+
Returns:
|
43
|
+
dict: Dictionary containing the benchmark results
|
44
|
+
"""
|
45
|
+
self.update_params_from_kwargs(**kwargs)
|
46
|
+
|
47
|
+
# Base values for calculation
|
48
|
+
base_time = 1.0 # ms
|
49
|
+
base_memory = 0.1 # MB
|
50
|
+
|
51
|
+
# Size factor (non-linear relationship with problem size)
|
52
|
+
size_factor_time = math.log10(self.problem_size) ** 1.5
|
53
|
+
size_factor_memory = math.sqrt(self.problem_size) / 10
|
54
|
+
|
55
|
+
# Algorithm type factor
|
56
|
+
if self.algorithm_type == "recursive":
|
57
|
+
algo_time_factor = 1.2 # Recursive is slower
|
58
|
+
algo_memory_factor = 1.5 # Recursive uses more memory (stack)
|
59
|
+
else: # iterative
|
60
|
+
algo_time_factor = 0.8 # Iterative is faster
|
61
|
+
algo_memory_factor = 0.7 # Iterative uses less memory
|
62
|
+
|
63
|
+
# Fixed "array" data structure factors (from previous example)
|
64
|
+
ds_time_factor = 0.9 # Arrays have faster access
|
65
|
+
ds_memory_factor = 1.1 # Arrays use slightly more contiguous memory
|
66
|
+
|
67
|
+
# Fixed "basic" optimization level factors (from previous example)
|
68
|
+
opt_time_factor = 1.0
|
69
|
+
opt_memory_factor = 1.0
|
70
|
+
|
71
|
+
# Calculate final metrics with some random variation
|
72
|
+
time_multiplier = (
|
73
|
+
algo_time_factor * ds_time_factor * opt_time_factor * random.uniform(0.9, 1.1)
|
74
|
+
)
|
75
|
+
memory_multiplier = (
|
76
|
+
algo_memory_factor * ds_memory_factor * opt_memory_factor * random.uniform(0.95, 1.05)
|
77
|
+
)
|
78
|
+
|
79
|
+
self.execution_time = base_time * size_factor_time * time_multiplier
|
80
|
+
self.memory_usage = base_memory * size_factor_memory * memory_multiplier
|
81
|
+
|
82
|
+
return super().__call__(**kwargs)
|
83
|
+
|
84
|
+
|
85
|
+
def example_1_float_1_cat_in_2_out(
|
86
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
87
|
+
) -> bch.Bench:
|
88
|
+
"""This example demonstrates benchmarking with 1 float and 1 categorical input.
|
89
|
+
|
90
|
+
It creates a synthetic benchmark that simulates performance characteristics of different
|
91
|
+
algorithm implementations, varying problem size (float) and algorithm type. The benchmark
|
92
|
+
uses fixed "array" data structure and "basic" optimization level, producing realistic
|
93
|
+
patterns of execution time and memory usage without actually executing real algorithms.
|
94
|
+
|
95
|
+
Args:
|
96
|
+
run_cfg: Configuration for the benchmark run
|
97
|
+
report: Report to append the results to
|
98
|
+
|
99
|
+
Returns:
|
100
|
+
bch.Bench: The benchmark object
|
101
|
+
"""
|
102
|
+
|
103
|
+
if run_cfg is None:
|
104
|
+
run_cfg = bch.BenchRunCfg()
|
105
|
+
run_cfg.repeats = 3 # Fewer repeats for a quicker benchmark
|
106
|
+
bench = Algorithm1CatBenchmark().to_bench(run_cfg, report)
|
107
|
+
bench.plot_sweep(
|
108
|
+
title="Algorithm Performance Benchmark (1 Float, 1 Categorical Variable)",
|
109
|
+
description="Comparing execution time and memory usage across problem sizes and algorithm types",
|
110
|
+
post_description="""
|
111
|
+
This benchmark illustrates how different algorithm implementations affect performance across problem sizes.
|
112
|
+
|
113
|
+
Key observations:
|
114
|
+
- Execution time increases non-linearly with problem size
|
115
|
+
- Iterative algorithms typically outperform recursive ones in both time and memory usage
|
116
|
+
- All tests were performed with array data structure and basic optimization level
|
117
|
+
- The performance gap between recursive and iterative approaches widens as problem size increases
|
118
|
+
""",
|
119
|
+
)
|
120
|
+
return bench
|
121
|
+
|
122
|
+
|
123
|
+
if __name__ == "__main__":
|
124
|
+
example_1_float_1_cat_in_2_out().report.show()
|
@@ -0,0 +1,132 @@
|
|
1
|
+
"""This file demonstrates benchmarking with 1 float and 2 categorical inputs with 2 output variables.
|
2
|
+
|
3
|
+
It benchmarks different algorithmic configurations to compare their performance characteristics
|
4
|
+
using simulated performance data to illustrate how benchmarking works.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import random
|
8
|
+
import math
|
9
|
+
import bencher as bch
|
10
|
+
|
11
|
+
random.seed(0)
|
12
|
+
|
13
|
+
|
14
|
+
class Algorithm2CatBenchmark(bch.ParametrizedSweep):
|
15
|
+
"""Example class for benchmarking algorithm performance with various parameters.
|
16
|
+
|
17
|
+
This class demonstrates how to structure a benchmark with one float parameter and
|
18
|
+
two categorical parameters, producing multiple output metrics. It uses simulated
|
19
|
+
performance data that follows realistic patterns while being deterministic.
|
20
|
+
"""
|
21
|
+
|
22
|
+
# Float input parameter
|
23
|
+
problem_size = bch.FloatSweep(default=100, bounds=[1, 100], doc="Size of the problem to solve")
|
24
|
+
|
25
|
+
# Categorical input parameters
|
26
|
+
algorithm_type = bch.StringSweep(
|
27
|
+
["recursive", "iterative"], doc="Type of algorithm implementation"
|
28
|
+
)
|
29
|
+
data_structure = bch.StringSweep(
|
30
|
+
["array", "linked_list"], doc="Underlying data structure to use"
|
31
|
+
)
|
32
|
+
|
33
|
+
# Using fixed "basic" optimization level
|
34
|
+
|
35
|
+
# Output metrics
|
36
|
+
execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
|
37
|
+
memory_usage = bch.ResultVar(units="MB", doc="Memory usage in megabytes")
|
38
|
+
|
39
|
+
def __call__(self, **kwargs) -> dict:
|
40
|
+
"""Execute the benchmark for the given set of parameters.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
**kwargs: Parameters to update before executing
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
dict: Dictionary containing the benchmark results
|
47
|
+
"""
|
48
|
+
self.update_params_from_kwargs(**kwargs)
|
49
|
+
|
50
|
+
# Base values for calculation
|
51
|
+
base_time = 1.0 # ms
|
52
|
+
base_memory = 0.1 # MB
|
53
|
+
|
54
|
+
# Size factor (non-linear relationship with problem size)
|
55
|
+
size_factor_time = math.log10(self.problem_size) ** 1.5
|
56
|
+
size_factor_memory = math.sqrt(self.problem_size) / 10
|
57
|
+
|
58
|
+
# Algorithm type factor
|
59
|
+
if self.algorithm_type == "recursive":
|
60
|
+
algo_time_factor = 1.2 # Recursive is slower
|
61
|
+
algo_memory_factor = 1.5 # Recursive uses more memory (stack)
|
62
|
+
else: # iterative
|
63
|
+
algo_time_factor = 0.8 # Iterative is faster
|
64
|
+
algo_memory_factor = 0.7 # Iterative uses less memory
|
65
|
+
|
66
|
+
# Data structure factor
|
67
|
+
if self.data_structure == "array":
|
68
|
+
ds_time_factor = 0.9 # Arrays have faster access
|
69
|
+
ds_memory_factor = 1.1 # Arrays use slightly more contiguous memory
|
70
|
+
else: # linked_list
|
71
|
+
ds_time_factor = 1.3 # Linked lists have slower access
|
72
|
+
ds_memory_factor = 0.9 # Linked lists might use less memory
|
73
|
+
|
74
|
+
# Fixed "basic" optimization level factors (from previous example)
|
75
|
+
opt_time_factor = 1.0
|
76
|
+
opt_memory_factor = 1.0
|
77
|
+
|
78
|
+
# Calculate final metrics with some random variation
|
79
|
+
time_multiplier = (
|
80
|
+
algo_time_factor * ds_time_factor * opt_time_factor * random.uniform(0.9, 1.1)
|
81
|
+
)
|
82
|
+
memory_multiplier = (
|
83
|
+
algo_memory_factor * ds_memory_factor * opt_memory_factor * random.uniform(0.95, 1.05)
|
84
|
+
)
|
85
|
+
|
86
|
+
self.execution_time = base_time * size_factor_time * time_multiplier
|
87
|
+
self.memory_usage = base_memory * size_factor_memory * memory_multiplier
|
88
|
+
|
89
|
+
return super().__call__(**kwargs)
|
90
|
+
|
91
|
+
|
92
|
+
def example_1_float_2_cat_in_2_out(
|
93
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
94
|
+
) -> bch.Bench:
|
95
|
+
"""This example demonstrates benchmarking with 1 float and 2 categorical inputs.
|
96
|
+
|
97
|
+
It creates a synthetic benchmark that simulates performance characteristics of different
|
98
|
+
algorithm configurations, varying problem size (float), algorithm type, and data structure.
|
99
|
+
The benchmark uses a fixed "basic" optimization level and produces realistic patterns
|
100
|
+
of execution time and memory usage without actually executing real algorithms.
|
101
|
+
|
102
|
+
Args:
|
103
|
+
run_cfg: Configuration for the benchmark run
|
104
|
+
report: Report to append the results to
|
105
|
+
|
106
|
+
Returns:
|
107
|
+
bch.Bench: The benchmark object
|
108
|
+
"""
|
109
|
+
|
110
|
+
if run_cfg is None:
|
111
|
+
run_cfg = bch.BenchRunCfg()
|
112
|
+
run_cfg.repeats = 3 # Fewer repeats for a quicker benchmark
|
113
|
+
bench = Algorithm2CatBenchmark().to_bench(run_cfg, report)
|
114
|
+
bench.plot_sweep(
|
115
|
+
title="Algorithm Performance Benchmark (1 Float, 2 Categorical Variables)",
|
116
|
+
description="Comparing execution time and memory usage across problem sizes, algorithm types, and data structures",
|
117
|
+
post_description="""
|
118
|
+
This benchmark illustrates how different algorithm configurations affect performance across problem sizes.
|
119
|
+
|
120
|
+
Key observations:
|
121
|
+
- Execution time generally increases non-linearly with problem size
|
122
|
+
- Iterative algorithms typically outperform recursive ones in both time and memory
|
123
|
+
- Arrays provide faster access than linked lists but may use more memory
|
124
|
+
- All tests were performed with basic optimization level
|
125
|
+
- The combination of algorithm type and data structure creates distinct performance profiles
|
126
|
+
""",
|
127
|
+
)
|
128
|
+
return bench
|
129
|
+
|
130
|
+
|
131
|
+
if __name__ == "__main__":
|
132
|
+
example_1_float_2_cat_in_2_out().report.show()
|