holobench 1.41.0__py3-none-any.whl → 1.43.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bencher/__init__.py +20 -2
- bencher/bench_cfg.py +262 -54
- bencher/bench_report.py +2 -2
- bencher/bench_runner.py +96 -10
- bencher/bencher.py +421 -89
- bencher/class_enum.py +70 -7
- bencher/example/example_dataframe.py +2 -2
- bencher/example/example_levels.py +17 -173
- bencher/example/example_pareto.py +107 -31
- bencher/example/example_rerun2.py +1 -1
- bencher/example/example_simple_bool.py +2 -2
- bencher/example/example_simple_float2d.py +6 -1
- bencher/example/example_video.py +2 -0
- bencher/example/experimental/example_hvplot_explorer.py +2 -2
- bencher/example/inputs_0D/example_0_in_1_out.py +25 -15
- bencher/example/inputs_0D/example_0_in_2_out.py +12 -3
- bencher/example/inputs_0_float/example_0_cat_in_2_out.py +88 -0
- bencher/example/inputs_0_float/example_1_cat_in_2_out.py +98 -0
- bencher/example/inputs_0_float/example_2_cat_in_2_out.py +107 -0
- bencher/example/inputs_0_float/example_3_cat_in_2_out.py +111 -0
- bencher/example/inputs_1D/example1d_common.py +48 -12
- bencher/example/inputs_1D/example_0_float_1_cat.py +33 -0
- bencher/example/inputs_1D/example_1_cat_in_2_out_repeats.py +68 -0
- bencher/example/inputs_1D/example_1_float_2_cat_repeats.py +3 -0
- bencher/example/inputs_1D/example_1_int_in_1_out.py +98 -0
- bencher/example/inputs_1D/example_1_int_in_2_out.py +101 -0
- bencher/example/inputs_1D/example_1_int_in_2_out_repeats.py +99 -0
- bencher/example/inputs_1_float/example_1_float_0_cat_in_2_out.py +117 -0
- bencher/example/inputs_1_float/example_1_float_1_cat_in_2_out.py +124 -0
- bencher/example/inputs_1_float/example_1_float_2_cat_in_2_out.py +132 -0
- bencher/example/inputs_1_float/example_1_float_3_cat_in_2_out.py +140 -0
- bencher/example/inputs_2D/example_2_cat_in_4_out_repeats.py +104 -0
- bencher/example/inputs_2_float/example_2_float_0_cat_in_2_out.py +98 -0
- bencher/example/inputs_2_float/example_2_float_1_cat_in_2_out.py +112 -0
- bencher/example/inputs_2_float/example_2_float_2_cat_in_2_out.py +122 -0
- bencher/example/inputs_2_float/example_2_float_3_cat_in_2_out.py +138 -0
- bencher/example/inputs_3_float/example_3_float_0_cat_in_2_out.py +111 -0
- bencher/example/inputs_3_float/example_3_float_1_cat_in_2_out.py +117 -0
- bencher/example/inputs_3_float/example_3_float_2_cat_in_2_out.py +124 -0
- bencher/example/inputs_3_float/example_3_float_3_cat_in_2_out.py +129 -0
- bencher/example/meta/generate_examples.py +118 -7
- bencher/example/meta/generate_meta.py +88 -40
- bencher/job.py +174 -9
- bencher/plotting/plot_filter.py +52 -17
- bencher/results/bench_result.py +117 -25
- bencher/results/bench_result_base.py +117 -8
- bencher/results/dataset_result.py +6 -200
- bencher/results/explorer_result.py +23 -0
- bencher/results/{hvplot_result.py → histogram_result.py} +3 -18
- bencher/results/holoview_results/__init__.py +0 -0
- bencher/results/holoview_results/bar_result.py +79 -0
- bencher/results/holoview_results/curve_result.py +110 -0
- bencher/results/holoview_results/distribution_result/__init__.py +0 -0
- bencher/results/holoview_results/distribution_result/box_whisker_result.py +73 -0
- bencher/results/holoview_results/distribution_result/distribution_result.py +109 -0
- bencher/results/holoview_results/distribution_result/scatter_jitter_result.py +92 -0
- bencher/results/holoview_results/distribution_result/violin_result.py +70 -0
- bencher/results/holoview_results/heatmap_result.py +319 -0
- bencher/results/holoview_results/holoview_result.py +346 -0
- bencher/results/holoview_results/line_result.py +240 -0
- bencher/results/holoview_results/scatter_result.py +107 -0
- bencher/results/holoview_results/surface_result.py +158 -0
- bencher/results/holoview_results/table_result.py +14 -0
- bencher/results/holoview_results/tabulator_result.py +20 -0
- bencher/results/optuna_result.py +30 -115
- bencher/results/video_controls.py +38 -0
- bencher/results/video_result.py +39 -36
- bencher/results/video_summary.py +2 -2
- bencher/results/{plotly_result.py → volume_result.py} +29 -8
- bencher/utils.py +175 -26
- bencher/variables/inputs.py +122 -15
- bencher/video_writer.py +2 -1
- bencher/worker_job.py +31 -3
- {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/METADATA +24 -24
- holobench-1.43.0.dist-info/RECORD +147 -0
- bencher/example/example_levels2.py +0 -37
- bencher/example/inputs_1D/example_1_in_1_out.py +0 -62
- bencher/example/inputs_1D/example_1_in_2_out.py +0 -63
- bencher/example/inputs_1D/example_1_in_2_out_repeats.py +0 -61
- bencher/results/holoview_result.py +0 -796
- bencher/results/panel_result.py +0 -41
- holobench-1.41.0.dist-info/RECORD +0 -114
- {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/WHEEL +0 -0
- {holobench-1.41.0.dist-info → holobench-1.43.0.dist-info}/licenses/LICENSE +0 -0
bencher/bench_runner.py
CHANGED
@@ -13,15 +13,28 @@ class Benchable(Protocol):
|
|
13
13
|
|
14
14
|
|
15
15
|
class BenchRunner:
|
16
|
-
"""A class to manage running multiple benchmarks in groups, or running the same benchmark but at multiple resolutions
|
16
|
+
"""A class to manage running multiple benchmarks in groups, or running the same benchmark but at multiple resolutions.
|
17
|
+
|
18
|
+
BenchRunner provides a framework for organizing, configuring, and executing multiple
|
19
|
+
benchmark runs with different parameters. It supports progressive refinement of benchmark
|
20
|
+
resolution, caching of results, and publication of results to various formats.
|
21
|
+
"""
|
17
22
|
|
18
23
|
def __init__(
|
19
24
|
self,
|
20
25
|
name: str,
|
21
|
-
bench_class=None,
|
26
|
+
bench_class: ParametrizedSweep = None,
|
22
27
|
run_cfg: BenchRunCfg = BenchRunCfg(),
|
23
28
|
publisher: Callable = None,
|
24
29
|
) -> None:
|
30
|
+
"""Initialize a BenchRunner instance.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
name (str): The name of the benchmark runner, used for reports and caching
|
34
|
+
bench_class (ParametrizedSweep, optional): An initial benchmark class to add. Defaults to None.
|
35
|
+
run_cfg (BenchRunCfg, optional): Configuration for benchmark execution. Defaults to BenchRunCfg().
|
36
|
+
publisher (Callable, optional): Function to publish results. Defaults to None.
|
37
|
+
"""
|
25
38
|
self.name = name
|
26
39
|
self.run_cfg = BenchRunner.setup_run_cfg(run_cfg)
|
27
40
|
self.bench_fns = []
|
@@ -33,8 +46,21 @@ class BenchRunner:
|
|
33
46
|
|
34
47
|
@staticmethod
|
35
48
|
def setup_run_cfg(
|
36
|
-
run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, cache_results=True
|
49
|
+
run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, cache_results: bool = True
|
37
50
|
) -> BenchRunCfg:
|
51
|
+
"""Configure benchmark run settings with reasonable defaults.
|
52
|
+
|
53
|
+
Creates a copy of the provided configuration with the specified level and
|
54
|
+
caching behavior settings applied.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
run_cfg (BenchRunCfg, optional): Base configuration to modify. Defaults to BenchRunCfg().
|
58
|
+
level (int, optional): Benchmark sampling resolution level. Defaults to 2.
|
59
|
+
cache_results (bool, optional): Whether to enable result caching. Defaults to True.
|
60
|
+
|
61
|
+
Returns:
|
62
|
+
BenchRunCfg: A new configuration object with the specified settings
|
63
|
+
"""
|
38
64
|
run_cfg_out = deepcopy(run_cfg)
|
39
65
|
run_cfg_out.cache_samples = cache_results
|
40
66
|
run_cfg_out.only_hash_tag = cache_results
|
@@ -46,7 +72,17 @@ class BenchRunner:
|
|
46
72
|
class_instance: ParametrizedSweep,
|
47
73
|
run_cfg: BenchRunCfg = BenchRunCfg(),
|
48
74
|
report: BenchReport = BenchReport(),
|
49
|
-
):
|
75
|
+
) -> Bench:
|
76
|
+
"""Create a Bench instance from a ParametrizedSweep class.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
class_instance (ParametrizedSweep): The parametrized sweep class instance to benchmark
|
80
|
+
run_cfg (BenchRunCfg, optional): Configuration for benchmark execution. Defaults to BenchRunCfg().
|
81
|
+
report (BenchReport, optional): Report to store benchmark results. Defaults to BenchReport().
|
82
|
+
|
83
|
+
Returns:
|
84
|
+
Bench: A configured Bench instance ready to run the benchmark
|
85
|
+
"""
|
50
86
|
return Bench(
|
51
87
|
f"bench_{class_instance.name}",
|
52
88
|
class_instance,
|
@@ -55,9 +91,23 @@ class BenchRunner:
|
|
55
91
|
)
|
56
92
|
|
57
93
|
def add_run(self, bench_fn: Benchable) -> None:
|
94
|
+
"""Add a benchmark function to be executed by this runner.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
bench_fn (Benchable): A callable that implements the Benchable protocol
|
98
|
+
"""
|
58
99
|
self.bench_fns.append(bench_fn)
|
59
100
|
|
60
101
|
def add_bench(self, class_instance: ParametrizedSweep) -> None:
|
102
|
+
"""Add a parametrized sweep class instance as a benchmark.
|
103
|
+
|
104
|
+
Creates and adds a function that will create a Bench instance from the
|
105
|
+
provided parametrized sweep class when executed.
|
106
|
+
|
107
|
+
Args:
|
108
|
+
class_instance (ParametrizedSweep): The parametrized sweep to benchmark
|
109
|
+
"""
|
110
|
+
|
61
111
|
def cb(run_cfg: BenchRunCfg, report: BenchReport) -> BenchCfg:
|
62
112
|
bench = BenchRunner.from_parametrized_sweep(
|
63
113
|
class_instance, run_cfg=run_cfg, report=report
|
@@ -79,7 +129,7 @@ class BenchRunner:
|
|
79
129
|
save: bool = False,
|
80
130
|
grouped: bool = True,
|
81
131
|
cache_results: bool = True,
|
82
|
-
) -> List[
|
132
|
+
) -> List[BenchCfg]:
|
83
133
|
"""This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default cache_results=True so that previous values are reused.
|
84
134
|
|
85
135
|
Args:
|
@@ -89,14 +139,14 @@ class BenchRunner:
|
|
89
139
|
repeats (int, optional): The number of times to run the entire benchmarking procedure. Defaults to 1.
|
90
140
|
run_cfg (BenchRunCfg, optional): benchmark run configuration. Defaults to None.
|
91
141
|
publish (bool, optional): Publish the results to git, requires a publish url to be set up. Defaults to False.
|
92
|
-
debug (bool, optional):
|
142
|
+
debug (bool, optional): Enable debug output during publishing. Defaults to False.
|
93
143
|
show (bool, optional): show the results in the local web browser. Defaults to False.
|
94
144
|
save (bool, optional): save the results to disk in index.html. Defaults to False.
|
95
145
|
grouped (bool, optional): Produce a single html page with all the benchmarks included. Defaults to True.
|
96
146
|
cache_results (bool, optional): Use the sample cache to reused previous results. Defaults to True.
|
97
147
|
|
98
148
|
Returns:
|
99
|
-
List[BenchCfg]: A list of
|
149
|
+
List[BenchCfg]: A list of benchmark configuration objects with results
|
100
150
|
"""
|
101
151
|
if run_cfg is None:
|
102
152
|
run_cfg = deepcopy(self.run_cfg)
|
@@ -126,7 +176,18 @@ class BenchRunner:
|
|
126
176
|
self.show_publish(report_level, show, publish, save, debug)
|
127
177
|
return self.results
|
128
178
|
|
129
|
-
def show_publish(
|
179
|
+
def show_publish(
|
180
|
+
self, report: BenchReport, show: bool, publish: bool, save: bool, debug: bool
|
181
|
+
) -> None:
|
182
|
+
"""Handle publishing, saving, and displaying of a benchmark report.
|
183
|
+
|
184
|
+
Args:
|
185
|
+
report (BenchReport): The benchmark report to process
|
186
|
+
show (bool): Whether to display the report in a browser
|
187
|
+
publish (bool): Whether to publish the report
|
188
|
+
save (bool): Whether to save the report to disk
|
189
|
+
debug (bool): Whether to enable debug mode for publishing
|
190
|
+
"""
|
130
191
|
if save:
|
131
192
|
report.save_index()
|
132
193
|
if publish and self.publisher is not None:
|
@@ -145,7 +206,22 @@ class BenchRunner:
|
|
145
206
|
publish: bool = False,
|
146
207
|
save: bool = False,
|
147
208
|
debug: bool = False,
|
148
|
-
):
|
209
|
+
) -> None:
|
210
|
+
"""Display or publish a specific benchmark report.
|
211
|
+
|
212
|
+
This is a convenience method to show, publish, or save a specific report.
|
213
|
+
If no report is provided, it will use the most recent result.
|
214
|
+
|
215
|
+
Args:
|
216
|
+
report (BenchReport, optional): The report to process. Defaults to None (most recent).
|
217
|
+
show (bool, optional): Whether to display in browser. Defaults to True.
|
218
|
+
publish (bool, optional): Whether to publish the report. Defaults to False.
|
219
|
+
save (bool, optional): Whether to save to disk. Defaults to False.
|
220
|
+
debug (bool, optional): Enable debug mode for publishing. Defaults to False.
|
221
|
+
|
222
|
+
Raises:
|
223
|
+
RuntimeError: If no report is specified and no results are available
|
224
|
+
"""
|
149
225
|
if report is None:
|
150
226
|
if len(self.results) > 0:
|
151
227
|
report = self.results[-1].report
|
@@ -153,9 +229,19 @@ class BenchRunner:
|
|
153
229
|
raise RuntimeError("no reports to show")
|
154
230
|
self.show_publish(report=report, show=show, publish=publish, save=save, debug=debug)
|
155
231
|
|
156
|
-
def shutdown(self):
|
232
|
+
def shutdown(self) -> None:
|
233
|
+
"""Stop all running panel servers launched by this benchmark runner.
|
234
|
+
|
235
|
+
This method ensures that any web servers started to display benchmark results
|
236
|
+
are properly shut down.
|
237
|
+
"""
|
157
238
|
while self.servers:
|
158
239
|
self.servers.pop().stop()
|
159
240
|
|
160
241
|
def __del__(self) -> None:
|
242
|
+
"""Destructor that ensures proper cleanup of resources.
|
243
|
+
|
244
|
+
Automatically calls shutdown() to stop any running servers when the
|
245
|
+
BenchRunner instance is garbage collected.
|
246
|
+
"""
|
161
247
|
self.shutdown()
|