holobench 1.40.1__py3-none-any.whl → 1.42.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +10 -0
- bencher/__init__.py +20 -2
- bencher/bench_cfg.py +265 -61
- bencher/bench_report.py +2 -2
- bencher/bench_runner.py +96 -10
- bencher/bencher.py +421 -89
- bencher/caching.py +1 -4
- bencher/class_enum.py +70 -7
- bencher/example/example_composable_container_image.py +60 -0
- bencher/example/example_composable_container_video.py +49 -0
- bencher/example/example_dataframe.py +2 -2
- bencher/example/example_image.py +17 -21
- bencher/example/example_image1.py +16 -20
- bencher/example/example_levels.py +17 -173
- bencher/example/example_pareto.py +107 -31
- bencher/example/example_rerun2.py +1 -1
- bencher/example/example_simple_bool.py +2 -2
- bencher/example/example_simple_float2d.py +6 -1
- bencher/example/example_video.py +35 -17
- bencher/example/experimental/example_hvplot_explorer.py +3 -4
- bencher/example/inputs_0D/example_0_in_1_out.py +25 -15
- bencher/example/inputs_0D/example_0_in_2_out.py +12 -3
- bencher/example/inputs_0_float/example_0_cat_in_2_out.py +88 -0
- bencher/example/inputs_0_float/example_1_cat_in_2_out.py +98 -0
- bencher/example/inputs_0_float/example_2_cat_in_2_out.py +107 -0
- bencher/example/inputs_0_float/example_3_cat_in_2_out.py +111 -0
- bencher/example/inputs_1D/example1d_common.py +48 -12
- bencher/example/inputs_1D/example_0_float_1_cat.py +33 -0
- bencher/example/inputs_1D/example_1_cat_in_2_out_repeats.py +68 -0
- bencher/example/inputs_1D/example_1_float_2_cat_repeats.py +15 -0
- bencher/example/inputs_1D/example_1_int_in_1_out.py +98 -0
- bencher/example/inputs_1D/example_1_int_in_2_out.py +101 -0
- bencher/example/inputs_1D/example_1_int_in_2_out_repeats.py +99 -0
- bencher/example/inputs_1_float/example_1_float_0_cat_in_2_out.py +117 -0
- bencher/example/inputs_1_float/example_1_float_1_cat_in_2_out.py +124 -0
- bencher/example/inputs_1_float/example_1_float_2_cat_in_2_out.py +132 -0
- bencher/example/inputs_1_float/example_1_float_3_cat_in_2_out.py +140 -0
- bencher/example/inputs_2D/example_2_cat_in_4_out_repeats.py +104 -0
- bencher/example/inputs_2_float/example_2_float_0_cat_in_2_out.py +98 -0
- bencher/example/inputs_2_float/example_2_float_1_cat_in_2_out.py +112 -0
- bencher/example/inputs_2_float/example_2_float_2_cat_in_2_out.py +122 -0
- bencher/example/inputs_2_float/example_2_float_3_cat_in_2_out.py +138 -0
- bencher/example/inputs_3_float/example_3_float_0_cat_in_2_out.py +111 -0
- bencher/example/inputs_3_float/example_3_float_1_cat_in_2_out.py +117 -0
- bencher/example/inputs_3_float/example_3_float_2_cat_in_2_out.py +124 -0
- bencher/example/inputs_3_float/example_3_float_3_cat_in_2_out.py +129 -0
- bencher/example/meta/generate_examples.py +124 -7
- bencher/example/meta/generate_meta.py +88 -40
- bencher/job.py +175 -12
- bencher/plotting/plot_filter.py +52 -17
- bencher/results/bench_result.py +119 -26
- bencher/results/bench_result_base.py +119 -10
- bencher/results/composable_container/composable_container_video.py +39 -12
- bencher/results/dataset_result.py +6 -200
- bencher/results/explorer_result.py +23 -0
- bencher/results/{hvplot_result.py → histogram_result.py} +3 -18
- bencher/results/holoview_results/__init__.py +0 -0
- bencher/results/holoview_results/bar_result.py +79 -0
- bencher/results/holoview_results/curve_result.py +110 -0
- bencher/results/holoview_results/distribution_result/__init__.py +0 -0
- bencher/results/holoview_results/distribution_result/box_whisker_result.py +73 -0
- bencher/results/holoview_results/distribution_result/distribution_result.py +109 -0
- bencher/results/holoview_results/distribution_result/scatter_jitter_result.py +92 -0
- bencher/results/holoview_results/distribution_result/violin_result.py +70 -0
- bencher/results/holoview_results/heatmap_result.py +319 -0
- bencher/results/holoview_results/holoview_result.py +346 -0
- bencher/results/holoview_results/line_result.py +240 -0
- bencher/results/holoview_results/scatter_result.py +107 -0
- bencher/results/holoview_results/surface_result.py +158 -0
- bencher/results/holoview_results/table_result.py +14 -0
- bencher/results/holoview_results/tabulator_result.py +20 -0
- bencher/results/laxtex_result.py +42 -35
- bencher/results/optuna_result.py +30 -115
- bencher/results/video_controls.py +38 -0
- bencher/results/video_result.py +39 -36
- bencher/results/video_summary.py +2 -2
- bencher/results/{plotly_result.py → volume_result.py} +29 -8
- bencher/utils.py +176 -30
- bencher/variables/inputs.py +122 -15
- bencher/video_writer.py +38 -2
- bencher/worker_job.py +34 -7
- {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/METADATA +21 -25
- holobench-1.42.0.dist-info/RECORD +147 -0
- bencher/example/example_composable_container.py +0 -106
- bencher/example/example_levels2.py +0 -37
- bencher/example/inputs_1D/example_1_in_1_out.py +0 -62
- bencher/example/inputs_1D/example_1_in_2_out.py +0 -63
- bencher/example/inputs_1D/example_1_in_2_out_repeats.py +0 -61
- bencher/results/holoview_result.py +0 -787
- bencher/results/panel_result.py +0 -41
- holobench-1.40.1.dist-info/RECORD +0 -111
- {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/WHEEL +0 -0
- {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,107 @@
|
|
1
|
+
"""This file demonstrates benchmarking with 2 categorical inputs and 2 output variables.
|
2
|
+
|
3
|
+
It benchmarks different Python operations to compare their performance characteristics
|
4
|
+
using simulated performance data to illustrate how benchmarking works.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import random
|
8
|
+
import bencher as bch
|
9
|
+
|
10
|
+
random.seed(0)
|
11
|
+
|
12
|
+
|
13
|
+
class PythonOperations2CatBenchmark(bch.ParametrizedSweep):
|
14
|
+
"""Example class for benchmarking different Python operations using 2 categorical variables.
|
15
|
+
|
16
|
+
This class demonstrates how to structure a benchmark with two input parameters
|
17
|
+
and multiple output metrics. It uses simulated performance data that follows realistic
|
18
|
+
patterns while being deterministic and reproducible.
|
19
|
+
"""
|
20
|
+
|
21
|
+
data_structure = bch.StringSweep(["list", "dict"], doc="Type of data structure to operate on")
|
22
|
+
operation_type = bch.StringSweep(["read", "write"], doc="Type of operation to perform")
|
23
|
+
|
24
|
+
# Using fixed medium size data instead of a variable
|
25
|
+
|
26
|
+
execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
|
27
|
+
memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
|
28
|
+
|
29
|
+
def __call__(self, **kwargs) -> dict:
|
30
|
+
"""Execute the benchmark for the given set of parameters.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
**kwargs: Parameters to update before executing
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
dict: Dictionary containing the benchmark results
|
37
|
+
"""
|
38
|
+
self.update_params_from_kwargs(**kwargs)
|
39
|
+
|
40
|
+
# Use deterministic fake data based on parameters
|
41
|
+
# Base values that will be modified by our parameters
|
42
|
+
base_time = 50.0 # ms (medium size base)
|
43
|
+
base_memory = 1000.0 # KB (medium size base)
|
44
|
+
|
45
|
+
# Adjust for data structure (lists are generally faster but use more memory)
|
46
|
+
if self.data_structure == "list":
|
47
|
+
time_factor = 0.8
|
48
|
+
memory_factor = 1.2
|
49
|
+
else: # dict
|
50
|
+
time_factor = 1.2
|
51
|
+
memory_factor = 0.9
|
52
|
+
|
53
|
+
# Adjust for operation type (reads are faster than writes)
|
54
|
+
if self.operation_type == "read":
|
55
|
+
time_factor *= 0.7
|
56
|
+
memory_factor *= 0.8
|
57
|
+
else: # write
|
58
|
+
time_factor *= 1.4
|
59
|
+
memory_factor *= 1.3
|
60
|
+
|
61
|
+
# Calculate final metrics with variance
|
62
|
+
self.execution_time = base_time * time_factor * random.gauss(0.85, 1.15)
|
63
|
+
self.memory_peak = base_memory * memory_factor * random.gauss(0.90, 1.10)
|
64
|
+
|
65
|
+
return super().__call__(**kwargs)
|
66
|
+
|
67
|
+
|
68
|
+
def example_2_cat_in_2_out(
|
69
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
70
|
+
) -> bch.Bench:
|
71
|
+
"""This example demonstrates benchmarking with 2 categorical variables and multiple output metrics.
|
72
|
+
|
73
|
+
It creates a synthetic benchmark that simulates performance characteristics of different
|
74
|
+
Python operations, varying data structures and operation types using a fixed medium data size.
|
75
|
+
The benchmark produces realistic patterns of execution time and memory usage without actually
|
76
|
+
executing real operations, making it ideal for learning and demonstration.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
run_cfg: Configuration for the benchmark run
|
80
|
+
report: Report to append the results to
|
81
|
+
|
82
|
+
Returns:
|
83
|
+
bch.Bench: The benchmark object
|
84
|
+
"""
|
85
|
+
|
86
|
+
if run_cfg is None:
|
87
|
+
run_cfg = bch.BenchRunCfg()
|
88
|
+
run_cfg.repeats = 5 # Fewer repeats for a quicker benchmark
|
89
|
+
bench = PythonOperations2CatBenchmark().to_bench(run_cfg, report)
|
90
|
+
bench.plot_sweep(
|
91
|
+
title="Python Operations Performance Benchmark (2 Variables)",
|
92
|
+
description="Comparing execution time and peak memory usage across Python data structures and operations",
|
93
|
+
post_description="""
|
94
|
+
This benchmark illustrates how different data structures and operations affect performance.
|
95
|
+
|
96
|
+
Key observations:
|
97
|
+
- Lists generally process faster than dictionaries for these operations
|
98
|
+
- Read operations outperform write operations as expected
|
99
|
+
- All tests were performed with a fixed medium-sized dataset
|
100
|
+
- Note that variance in the results simulates real-world measurement fluctuations
|
101
|
+
""",
|
102
|
+
)
|
103
|
+
return bench
|
104
|
+
|
105
|
+
|
106
|
+
if __name__ == "__main__":
|
107
|
+
example_2_cat_in_2_out().report.show()
|
@@ -0,0 +1,111 @@
|
|
1
|
+
"""This file demonstrates benchmarking with 3 categorical inputs and 2 output variables.
|
2
|
+
|
3
|
+
It benchmarks different Python operations to compare their performance characteristics
|
4
|
+
using simulated performance data to illustrate how benchmarking works.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import random
|
8
|
+
import bencher as bch
|
9
|
+
|
10
|
+
random.seed(0)
|
11
|
+
|
12
|
+
|
13
|
+
class PythonOperationsBenchmark(bch.ParametrizedSweep):
|
14
|
+
"""Example class for benchmarking different Python operations using categorical variables.
|
15
|
+
|
16
|
+
This class demonstrates how to structure a benchmark with multiple input parameters
|
17
|
+
and multiple output metrics. It uses simulated performance data that follows realistic
|
18
|
+
patterns while being deterministic and reproducible.
|
19
|
+
"""
|
20
|
+
|
21
|
+
data_structure = bch.StringSweep(["list", "dict"], doc="Type of data structure to operate on")
|
22
|
+
operation_type = bch.StringSweep(["read", "write"], doc="Type of operation to perform")
|
23
|
+
data_size = bch.StringSweep(["small", "medium"], doc="Size of data to process")
|
24
|
+
|
25
|
+
execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
|
26
|
+
memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
|
27
|
+
|
28
|
+
def __call__(self, **kwargs) -> dict:
|
29
|
+
"""Execute the benchmark for the given set of parameters.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
**kwargs: Parameters to update before executing
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
dict: Dictionary containing the benchmark results
|
36
|
+
"""
|
37
|
+
self.update_params_from_kwargs(**kwargs)
|
38
|
+
|
39
|
+
# Use deterministic fake data based on parameters
|
40
|
+
# Base values that will be modified by our parameters
|
41
|
+
base_time = 10.0 # ms
|
42
|
+
base_memory = 100.0 # KB
|
43
|
+
|
44
|
+
# Adjust for data structure (lists are generally faster but use more memory)
|
45
|
+
if self.data_structure == "list":
|
46
|
+
time_factor = 0.8
|
47
|
+
memory_factor = 1.2
|
48
|
+
else: # dict
|
49
|
+
time_factor = 1.2
|
50
|
+
memory_factor = 0.9
|
51
|
+
|
52
|
+
# Adjust for operation type (reads are faster than writes)
|
53
|
+
if self.operation_type == "read":
|
54
|
+
time_factor *= 0.7
|
55
|
+
memory_factor *= 0.8
|
56
|
+
else: # write
|
57
|
+
time_factor *= 1.4
|
58
|
+
memory_factor *= 1.3
|
59
|
+
|
60
|
+
# Adjust for data size
|
61
|
+
if self.data_size == "medium":
|
62
|
+
time_factor *= 5
|
63
|
+
memory_factor *= 10
|
64
|
+
|
65
|
+
# Calculate final metrics with increased variance
|
66
|
+
self.execution_time = base_time * time_factor * random.gauss(0.85, 1.15)
|
67
|
+
self.memory_peak = base_memory * memory_factor * random.gauss(0.90, 1.10)
|
68
|
+
|
69
|
+
return super().__call__(**kwargs)
|
70
|
+
|
71
|
+
|
72
|
+
def example_3_cat_in_2_out(
|
73
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
74
|
+
) -> bch.Bench:
|
75
|
+
"""This example demonstrates benchmarking with categorical variables and multiple output metrics.
|
76
|
+
|
77
|
+
It creates a synthetic benchmark that simulates performance characteristics of different
|
78
|
+
Python operations, varying data structures, operation types, and data sizes. The benchmark
|
79
|
+
produces realistic patterns of execution time and memory usage without actually executing
|
80
|
+
real operations, making it ideal for learning and demonstration.
|
81
|
+
|
82
|
+
Args:
|
83
|
+
run_cfg: Configuration for the benchmark run
|
84
|
+
report: Report to append the results to
|
85
|
+
|
86
|
+
Returns:
|
87
|
+
bch.Bench: The benchmark object
|
88
|
+
"""
|
89
|
+
|
90
|
+
if run_cfg is None:
|
91
|
+
run_cfg = bch.BenchRunCfg()
|
92
|
+
run_cfg.repeats = 5 # Fewer repeats for a quicker benchmark
|
93
|
+
bench = PythonOperationsBenchmark().to_bench(run_cfg, report)
|
94
|
+
bench.plot_sweep(
|
95
|
+
title="Python Operations Performance Benchmark",
|
96
|
+
description="Comparing execution time and peak memory usage across Python data structures and operations",
|
97
|
+
post_description="""
|
98
|
+
This benchmark illustrates how different data structures and operations affect performance.
|
99
|
+
|
100
|
+
Key observations:
|
101
|
+
- Lists generally process faster than dictionaries for these operations
|
102
|
+
- Read operations outperform write operations as expected
|
103
|
+
- Medium-sized data requires significantly more resources than small data
|
104
|
+
- Note that variance in the results simulates real-world measurement fluctuations
|
105
|
+
""",
|
106
|
+
)
|
107
|
+
return bench
|
108
|
+
|
109
|
+
|
110
|
+
if __name__ == "__main__":
|
111
|
+
example_3_cat_in_2_out().report.show()
|
@@ -4,7 +4,10 @@ import bencher as bch
|
|
4
4
|
|
5
5
|
|
6
6
|
class DataSource:
|
7
|
+
"""A simple data source class that provides access to predefined data points."""
|
8
|
+
|
7
9
|
def __init__(self):
|
10
|
+
"""Initialize the data source with predefined values and call counts."""
|
8
11
|
self.data = [
|
9
12
|
[0, 0, 0, 0],
|
10
13
|
[1, 1, 1, 1],
|
@@ -16,7 +19,16 @@ class DataSource:
|
|
16
19
|
|
17
20
|
self.call_count = [0] * len(self.data)
|
18
21
|
|
19
|
-
def call(self, index, repeat=None):
|
22
|
+
def call(self, index: int, repeat: int = None) -> int:
|
23
|
+
"""Retrieve a data point at the specified index and repeat count.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
index: The index of the data row to access
|
27
|
+
repeat: The specific repeat count to use. If None, uses and increments internal counter
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
int: The value at the specified index and repeat position
|
31
|
+
"""
|
20
32
|
if repeat is None:
|
21
33
|
self.call_count[index] += 1
|
22
34
|
repeat = self.call_count[index]
|
@@ -24,16 +36,31 @@ class DataSource:
|
|
24
36
|
|
25
37
|
|
26
38
|
class Example1D(bch.ParametrizedSweep):
|
27
|
-
|
28
|
-
|
29
|
-
|
39
|
+
"""Example 1D parameter sweep class with one input and two output dimensions."""
|
40
|
+
|
41
|
+
index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input index", units="rad", samples=30)
|
42
|
+
output = bch.ResultVar(units="v", doc="Output value from data source 1")
|
43
|
+
output2 = bch.ResultVar(units="v", doc="Negated output value from data source 2")
|
30
44
|
|
31
45
|
def __init__(self, **params):
|
46
|
+
"""Initialize the Example1D sweep with two data sources.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
**params: Parameters to pass to the parent class constructor
|
50
|
+
"""
|
32
51
|
super().__init__(**params)
|
33
52
|
self.data1 = DataSource()
|
34
53
|
self.data2 = DataSource()
|
35
54
|
|
36
|
-
def __call__(self, **kwargs):
|
55
|
+
def __call__(self, **kwargs) -> dict:
|
56
|
+
"""Execute the parameter sweep for the given parameters.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
**kwargs: Additional parameters to update before executing
|
60
|
+
|
61
|
+
Returns:
|
62
|
+
dict: Dictionary containing the outputs of the parameter sweep
|
63
|
+
"""
|
37
64
|
self.update_params_from_kwargs(**kwargs)
|
38
65
|
self.output = self.data1.call(self.index)
|
39
66
|
self.output2 = -self.data2.call(self.index)
|
@@ -43,7 +70,16 @@ class Example1D(bch.ParametrizedSweep):
|
|
43
70
|
def example_1_in_2_out(
|
44
71
|
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
45
72
|
) -> bch.Bench:
|
46
|
-
"""This example shows how to sample a 1
|
73
|
+
"""This example shows how to sample a 1-dimensional integer variable and plot
|
74
|
+
the result of two output variables from that parameter sweep.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
run_cfg: Configuration for the benchmark run
|
78
|
+
report: Report to append the results to
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
bch.Bench: The benchmark object
|
82
|
+
"""
|
47
83
|
bench = Example1D().to_bench(run_cfg, report)
|
48
84
|
bench.plot_sweep()
|
49
85
|
|
@@ -53,17 +89,17 @@ def example_1_in_2_out(
|
|
53
89
|
|
54
90
|
if __name__ == "__main__":
|
55
91
|
run_config = bch.BenchRunCfg()
|
56
|
-
|
57
|
-
example_1_in_2_out(run_config,
|
92
|
+
report_obj = bch.BenchReport()
|
93
|
+
example_1_in_2_out(run_config, report_obj)
|
58
94
|
|
59
95
|
run_config.repeats = 4
|
60
|
-
example_1_in_2_out(run_config,
|
96
|
+
example_1_in_2_out(run_config, report_obj)
|
61
97
|
|
62
98
|
# run_config.over_time = True
|
63
99
|
# run_config.auto_plot = False
|
64
100
|
# for i in range(4):
|
65
|
-
# example_1_in_2_out(run_config,
|
101
|
+
# example_1_in_2_out(run_config, report_obj)
|
66
102
|
|
67
103
|
# run_config.auto_plot = True
|
68
|
-
# example_1_in_2_out(run_config,
|
69
|
-
|
104
|
+
# example_1_in_2_out(run_config, report_obj)
|
105
|
+
report_obj.show()
|
@@ -0,0 +1,33 @@
|
|
1
|
+
"""This file demonstrates benchmarking with both categorical and float variables."""
|
2
|
+
|
3
|
+
import bencher as bch
|
4
|
+
from bencher.example.meta.example_meta import BenchableObject
|
5
|
+
|
6
|
+
run_cfg = bch.BenchRunCfg()
|
7
|
+
run_cfg.repeats = 2 # only shows distance
|
8
|
+
run_cfg.level = 4
|
9
|
+
bench = BenchableObject().to_bench(run_cfg)
|
10
|
+
# bench.worker_class_instance.float2=0.2
|
11
|
+
run_cfg.repeats = 1
|
12
|
+
# WORKS
|
13
|
+
# shows both distance and simple noise
|
14
|
+
res = bench.plot_sweep(input_vars=["float1"], result_vars=["distance", "sample_noise"])
|
15
|
+
|
16
|
+
# WORKS
|
17
|
+
# shows both distance and simple noise
|
18
|
+
res = bench.plot_sweep(input_vars=["noisy"], result_vars=["distance", "sample_noise"])
|
19
|
+
|
20
|
+
|
21
|
+
run_cfg.repeats = 10 # If i set repeats>1 then floating point variables still work but categorical variables do not
|
22
|
+
# WORKS
|
23
|
+
# shows both distance and simple noise
|
24
|
+
res = bench.plot_sweep(input_vars=["float1"], result_vars=["distance", "sample_noise"])
|
25
|
+
|
26
|
+
# BUG
|
27
|
+
# only shows distance result var, ignores sample_noise
|
28
|
+
res = bench.plot_sweep(input_vars=["noisy"], result_vars=["distance", "sample_noise"])
|
29
|
+
|
30
|
+
|
31
|
+
bench.report.append(res.to_tabulator())
|
32
|
+
# bench.report.append(res.to_scatter_jitter_single(BenchableObject.param.sample_noise))
|
33
|
+
bench.report.show()
|
@@ -0,0 +1,68 @@
|
|
1
|
+
"""This file demonstrates benchmarking with categorical inputs and multiple outputs with repeats."""
|
2
|
+
|
3
|
+
import random
|
4
|
+
import bencher as bch
|
5
|
+
|
6
|
+
random.seed(0)
|
7
|
+
|
8
|
+
|
9
|
+
class DataStructureBenchmark(bch.ParametrizedSweep):
|
10
|
+
"""Example class for comparing different data structure operations with two output variables."""
|
11
|
+
|
12
|
+
operation = bch.StringSweep(
|
13
|
+
["list_append", "dict_insert"],
|
14
|
+
doc="Type of data structure operation to benchmark",
|
15
|
+
)
|
16
|
+
execution_time = bch.ResultVar(units="ms", doc="Time taken to complete operations")
|
17
|
+
memory_usage = bch.ResultVar(units="KB", doc="Memory used by the operation")
|
18
|
+
|
19
|
+
def __call__(self, **kwargs) -> dict:
|
20
|
+
"""Execute the parameter sweep for the given data structure operation.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
**kwargs: Additional parameters to update before executing
|
24
|
+
|
25
|
+
Returns:
|
26
|
+
dict: Dictionary containing the outputs of the parameter sweep
|
27
|
+
"""
|
28
|
+
self.update_params_from_kwargs(**kwargs)
|
29
|
+
|
30
|
+
# Simple simulations of different data structure operations
|
31
|
+
# In a real benchmark, you would implement or measure actual operations
|
32
|
+
|
33
|
+
if self.operation == "list_append":
|
34
|
+
# List append operations (typically fast for adding elements)
|
35
|
+
self.execution_time = random.gauss(mu=5.0, sigma=1.0)
|
36
|
+
self.memory_usage = random.gauss(mu=120.0, sigma=20.0)
|
37
|
+
elif self.operation == "dict_insert":
|
38
|
+
# Dictionary insertions (hash table operations)
|
39
|
+
self.execution_time = random.gauss(mu=6.5, sigma=1.2)
|
40
|
+
self.memory_usage = random.gauss(mu=180.0, sigma=25.0)
|
41
|
+
|
42
|
+
return super().__call__(**kwargs)
|
43
|
+
|
44
|
+
|
45
|
+
def example_1_cat_in_2_out_repeats(
|
46
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
47
|
+
) -> bch.Bench:
|
48
|
+
"""This example shows how to benchmark different data structure operations with multiple repeats
|
49
|
+
and plot the results of execution time and memory usage.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
run_cfg: Configuration for the benchmark run
|
53
|
+
report: Report to append the results to
|
54
|
+
|
55
|
+
Returns:
|
56
|
+
bch.Bench: The benchmark object
|
57
|
+
"""
|
58
|
+
|
59
|
+
if run_cfg is None:
|
60
|
+
run_cfg = bch.BenchRunCfg()
|
61
|
+
run_cfg.repeats = 30 # Increased repeats for better statistical significance
|
62
|
+
bench = DataStructureBenchmark().to_bench(run_cfg, report)
|
63
|
+
bench.plot_sweep()
|
64
|
+
return bench
|
65
|
+
|
66
|
+
|
67
|
+
if __name__ == "__main__":
|
68
|
+
example_1_cat_in_2_out_repeats().report.show()
|
@@ -0,0 +1,15 @@
|
|
1
|
+
"""This file demonstrates benchmarking with both float and categorical variables with repeats."""
|
2
|
+
|
3
|
+
import bencher as bch
|
4
|
+
from bencher.example.meta.example_meta import BenchableObject
|
5
|
+
|
6
|
+
# Configure and run a benchmark with multiple input types and repeats
|
7
|
+
run_cfg = bch.BenchRunCfg()
|
8
|
+
run_cfg.repeats = 20
|
9
|
+
run_cfg.level = 4
|
10
|
+
bench = BenchableObject().to_bench(run_cfg)
|
11
|
+
res = bench.plot_sweep(
|
12
|
+
input_vars=["float1", "noisy", "noise_distribution"], result_vars=["distance", "sample_noise"]
|
13
|
+
)
|
14
|
+
|
15
|
+
bench.report.show()
|
@@ -0,0 +1,98 @@
|
|
1
|
+
"""This file has some examples for how to perform basic benchmarking parameter sweeps"""
|
2
|
+
|
3
|
+
import bencher as bch
|
4
|
+
|
5
|
+
|
6
|
+
class DataSource:
|
7
|
+
"""A simple data source class that provides access to predefined data points."""
|
8
|
+
|
9
|
+
def __init__(self):
|
10
|
+
"""Initialize the data source with predefined values and call counts."""
|
11
|
+
self.data = [
|
12
|
+
[0, 0, 0, 0],
|
13
|
+
[1, 1, 1, 1],
|
14
|
+
[1, 1, 1, 1],
|
15
|
+
[2, 1, 1, 0],
|
16
|
+
[2, 2, 0, 0],
|
17
|
+
[2, 2, 1, 1],
|
18
|
+
]
|
19
|
+
|
20
|
+
self.call_count = [0] * len(self.data)
|
21
|
+
|
22
|
+
def call(self, index: int, repeat: int = None) -> int:
|
23
|
+
"""Retrieve a data point at the specified index and repeat count.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
index: The index of the data row to access
|
27
|
+
repeat: The specific repeat count to use. If None, uses and increments internal counter
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
int: The value at the specified index and repeat position
|
31
|
+
"""
|
32
|
+
if repeat is None:
|
33
|
+
self.call_count[index] += 1
|
34
|
+
repeat = self.call_count[index]
|
35
|
+
print(index, repeat)
|
36
|
+
return self.data[index][repeat - 1]
|
37
|
+
|
38
|
+
|
39
|
+
class Example1D(bch.ParametrizedSweep):
|
40
|
+
"""Example 1D parameter sweep class with one input dimension and one output dimension."""
|
41
|
+
|
42
|
+
index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input index", units="rad", samples=30)
|
43
|
+
output = bch.ResultVar(units="v", doc="Output value from data source")
|
44
|
+
|
45
|
+
def __init__(self, **params):
|
46
|
+
"""Initialize the Example1D sweep with a data source.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
**params: Parameters to pass to the parent class constructor
|
50
|
+
"""
|
51
|
+
super().__init__(**params)
|
52
|
+
self.data1 = DataSource()
|
53
|
+
|
54
|
+
def __call__(self, **kwargs) -> dict:
|
55
|
+
"""Execute the parameter sweep for the given parameters.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
**kwargs: Additional parameters to update before executing
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
dict: Dictionary containing the output of the parameter sweep
|
62
|
+
"""
|
63
|
+
self.update_params_from_kwargs(**kwargs)
|
64
|
+
self.output = self.data1.call(self.index)
|
65
|
+
return super().__call__(**kwargs)
|
66
|
+
|
67
|
+
|
68
|
+
def example_1_int_in_1_out(
|
69
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
70
|
+
) -> bch.Bench:
|
71
|
+
"""This example shows how to sample a 1-dimensional integer variable and plot
|
72
|
+
the result of that parameter sweep.
|
73
|
+
|
74
|
+
Args:
|
75
|
+
run_cfg: Configuration for the benchmark run
|
76
|
+
report: Report to append the results to
|
77
|
+
|
78
|
+
Returns:
|
79
|
+
bch.Bench: The benchmark object
|
80
|
+
"""
|
81
|
+
bench = Example1D().to_bench(run_cfg, report)
|
82
|
+
bench.plot_sweep()
|
83
|
+
return bench
|
84
|
+
|
85
|
+
|
86
|
+
if __name__ == "__main__":
|
87
|
+
run_config = bch.BenchRunCfg()
|
88
|
+
report_obj = bch.BenchReport()
|
89
|
+
example_1_int_in_1_out(run_config, report_obj)
|
90
|
+
|
91
|
+
run_config.repeats = 4
|
92
|
+
example_1_int_in_1_out(run_config, report_obj)
|
93
|
+
|
94
|
+
# run_cfg.over_time = True
|
95
|
+
# for i in range(4):
|
96
|
+
# example_1_in_2_out(run_cfg, report_obj)
|
97
|
+
|
98
|
+
report_obj.show()
|
@@ -0,0 +1,101 @@
|
|
1
|
+
"""This file has examples for how to perform basic benchmarking parameter sweeps"""
|
2
|
+
|
3
|
+
import bencher as bch
|
4
|
+
|
5
|
+
|
6
|
+
class DataSource:
|
7
|
+
"""A simple data source class that provides access to predefined data points."""
|
8
|
+
|
9
|
+
def __init__(self):
|
10
|
+
"""Initialize the data source with predefined values and call counts."""
|
11
|
+
self.data = [
|
12
|
+
[0, 0, 0, 0],
|
13
|
+
[1, 1, 1, 1],
|
14
|
+
[1, 1, 1, 1],
|
15
|
+
[2, 1, 1, 0],
|
16
|
+
[2, 2, 0, 0],
|
17
|
+
[2, 2, 1, 1],
|
18
|
+
]
|
19
|
+
|
20
|
+
self.call_count = [0] * len(self.data)
|
21
|
+
|
22
|
+
def call(self, index: int, repeat: int = None) -> int:
|
23
|
+
"""Retrieve a data point at the specified index and repeat count.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
index: The index of the data row to access
|
27
|
+
repeat: The specific repeat count to use. If None, uses and increments internal counter
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
int: The value at the specified index and repeat position
|
31
|
+
"""
|
32
|
+
if repeat is None:
|
33
|
+
self.call_count[index] += 1
|
34
|
+
repeat = self.call_count[index]
|
35
|
+
return self.data[index][repeat - 1]
|
36
|
+
|
37
|
+
|
38
|
+
class Example1D(bch.ParametrizedSweep):
|
39
|
+
"""Example 1D parameter sweep class with one input dimension and two output dimensions."""
|
40
|
+
|
41
|
+
index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input index", units="rad", samples=30)
|
42
|
+
output = bch.ResultVar(units="v", doc="Output value from data source 1")
|
43
|
+
output2 = bch.ResultVar(units="v", doc="Negated output value from data source 2")
|
44
|
+
|
45
|
+
def __init__(self, **params):
|
46
|
+
"""Initialize the Example1D sweep with two data sources.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
**params: Parameters to pass to the parent class constructor
|
50
|
+
"""
|
51
|
+
super().__init__(**params)
|
52
|
+
self.data1 = DataSource()
|
53
|
+
self.data2 = DataSource()
|
54
|
+
|
55
|
+
def __call__(self, **kwargs) -> dict:
|
56
|
+
"""Execute the parameter sweep for the given parameters.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
**kwargs: Additional parameters to update before executing
|
60
|
+
|
61
|
+
Returns:
|
62
|
+
dict: Dictionary containing the outputs of the parameter sweep
|
63
|
+
"""
|
64
|
+
self.update_params_from_kwargs(**kwargs)
|
65
|
+
self.output = self.data1.call(self.index)
|
66
|
+
self.output2 = -self.data2.call(self.index)
|
67
|
+
return super().__call__(**kwargs)
|
68
|
+
|
69
|
+
|
70
|
+
def example_1_int_in_2_out(
|
71
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
72
|
+
) -> bch.Bench:
|
73
|
+
"""This example shows how to sample a 1-dimensional integer variable and plot
|
74
|
+
the result of two output variables from that parameter sweep.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
run_cfg: Configuration for the benchmark run
|
78
|
+
report: Report to append the results to
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
bch.Bench: The benchmark object
|
82
|
+
"""
|
83
|
+
bench = Example1D().to_bench(run_cfg, report)
|
84
|
+
bench.plot_sweep()
|
85
|
+
|
86
|
+
# bench.report.append(bench.get_result().to_heatmap())
|
87
|
+
return bench
|
88
|
+
|
89
|
+
|
90
|
+
if __name__ == "__main__":
|
91
|
+
run_config = bch.BenchRunCfg()
|
92
|
+
report_obj = bch.BenchReport()
|
93
|
+
example_1_int_in_2_out(run_config, report_obj)
|
94
|
+
# run_config.over_time = True
|
95
|
+
# run_config.auto_plot = False
|
96
|
+
# for i in range(4):
|
97
|
+
# example_1_in_2_out(run_config, report_obj)
|
98
|
+
|
99
|
+
# run_config.auto_plot = True
|
100
|
+
# example_1_in_2_out(run_config, report_obj)
|
101
|
+
report_obj.show()
|