holobench 1.28.1__py3-none-any.whl → 1.30.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bencher/__init__.py +1 -0
- bencher/bench_cfg.py +9 -9
- bencher/bench_plot_server.py +1 -1
- bencher/bench_runner.py +1 -1
- bencher/bencher.py +23 -11
- bencher/example/benchmark_data.py +1 -1
- bencher/example/example_categorical.py +1 -1
- bencher/example/example_custom_sweep.py +1 -1
- bencher/example/example_custom_sweep2.py +1 -1
- bencher/example/example_dataframe.py +47 -0
- bencher/example/example_image.py +5 -7
- bencher/example/example_image1.py +80 -0
- bencher/example/example_levels.py +1 -1
- bencher/example/example_levels2.py +1 -1
- bencher/example/example_pareto.py +1 -1
- bencher/example/example_sample_cache_context.py +2 -2
- bencher/example/example_simple.py +5 -5
- bencher/example/meta/example_meta.py +1 -1
- bencher/example/shelved/example_kwargs.py +1 -1
- bencher/plotting/plot_filter.py +2 -2
- bencher/plotting/plt_cnt_cfg.py +10 -3
- bencher/results/bench_result.py +3 -1
- bencher/results/bench_result_base.py +58 -9
- bencher/results/composable_container/composable_container_base.py +2 -2
- bencher/results/composable_container/composable_container_dataframe.py +52 -0
- bencher/results/dataset_result.py +227 -0
- bencher/results/optuna_result.py +7 -6
- bencher/variables/inputs.py +5 -5
- bencher/variables/parametrised_sweep.py +2 -2
- bencher/variables/results.py +29 -1
- bencher/variables/sweep_base.py +1 -1
- bencher/variables/time.py +3 -3
- bencher/video_writer.py +1 -1
- {holobench-1.28.1.dist-info → holobench-1.30.2.dist-info}/METADATA +72 -39
- {holobench-1.28.1.dist-info → holobench-1.30.2.dist-info}/RECORD +38 -36
- {holobench-1.28.1.dist-info → holobench-1.30.2.dist-info}/WHEEL +1 -2
- holobench-1.28.1.data/data/share/bencher/package.xml +0 -33
- holobench-1.28.1.dist-info/top_level.txt +0 -1
- {holobench-1.28.1.dist-info → holobench-1.30.2.dist-info/licenses}/LICENSE +0 -0
- {holobench-1.28.1.data/data/share/ament_index/resource_index/packages → resource}/bencher +0 -0
bencher/__init__.py
CHANGED
bencher/bench_cfg.py
CHANGED
@@ -69,7 +69,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
|
|
69
69
|
|
70
70
|
serve_pandas_flat: bool = param.Boolean(
|
71
71
|
True,
|
72
|
-
doc="Serve a
|
72
|
+
doc="Serve a flattened pandas summary on the results webpage. If you have a large dataset consider setting this to false if the page loading is slow",
|
73
73
|
)
|
74
74
|
|
75
75
|
serve_xarray: bool = param.Boolean(
|
@@ -78,7 +78,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
|
|
78
78
|
)
|
79
79
|
|
80
80
|
auto_plot: bool = param.Boolean(
|
81
|
-
True, doc="
|
81
|
+
True, doc=" Automatically dedeuce the best type of plot for the results."
|
82
82
|
)
|
83
83
|
|
84
84
|
raise_duplicate_exception: bool = param.Boolean(False, doc=" Used to debug unique plot names.")
|
@@ -131,13 +131,13 @@ class BenchRunCfg(BenchPlotSrvCfg):
|
|
131
131
|
|
132
132
|
render_plotly = param.Boolean(
|
133
133
|
True,
|
134
|
-
doc="Plotly and Bokeh don't play nicely together, so by default pre-render plotly figures to a non dynamic version so that bokeh plots correctly. If you want interactive 3D graphs, set this to true but be aware that your 2D interactive graphs will
|
134
|
+
doc="Plotly and Bokeh don't play nicely together, so by default pre-render plotly figures to a non dynamic version so that bokeh plots correctly. If you want interactive 3D graphs, set this to true but be aware that your 2D interactive graphs will probably stop working.",
|
135
135
|
)
|
136
136
|
|
137
137
|
level = param.Integer(
|
138
138
|
default=0,
|
139
139
|
bounds=[0, 12],
|
140
|
-
doc="The level parameter is a method of defining the number samples to sweep over in a variable agnostic way, i.e you don't need to
|
140
|
+
doc="The level parameter is a method of defining the number samples to sweep over in a variable agnostic way, i.e you don't need to specify the number of samples for each variable as they are calculated dynamically from the sampling level. See example_level.py for more information.",
|
141
141
|
)
|
142
142
|
|
143
143
|
run_tag = param.String(
|
@@ -163,10 +163,10 @@ class BenchRunCfg(BenchPlotSrvCfg):
|
|
163
163
|
plot_size = param.Integer(default=None, doc="Sets the width and height of the plot")
|
164
164
|
plot_width = param.Integer(
|
165
165
|
default=None,
|
166
|
-
doc="Sets with width of the plots, this will
|
166
|
+
doc="Sets with width of the plots, this will override the plot_size parameter",
|
167
167
|
)
|
168
168
|
plot_height = param.Integer(
|
169
|
-
default=None, doc="Sets the height of the plot, this will
|
169
|
+
default=None, doc="Sets the height of the plot, this will override the plot_size parameter"
|
170
170
|
)
|
171
171
|
|
172
172
|
@staticmethod
|
@@ -291,7 +291,7 @@ class BenchCfg(BenchRunCfg):
|
|
291
291
|
|
292
292
|
tag: str = param.String(
|
293
293
|
"",
|
294
|
-
doc="Use tags to group different benchmarks together. By default benchmarks are considered distinct from
|
294
|
+
doc="Use tags to group different benchmarks together. By default benchmarks are considered distinct from each other and are identified by the hash of their name and inputs, constants and results and tag, but you can optionally change the hash value to only depend on the tag. This way you can have multiple unrelated benchmarks share values with each other based only on the tag value.",
|
295
295
|
)
|
296
296
|
|
297
297
|
hash_value: str = param.String(
|
@@ -311,10 +311,10 @@ class BenchCfg(BenchRunCfg):
|
|
311
311
|
self.iv_repeat = None
|
312
312
|
|
313
313
|
def hash_persistent(self, include_repeats) -> str:
|
314
|
-
"""override the default hash function
|
314
|
+
"""override the default hash function because the default hash function does not return the same value for the same inputs. It references internal variables that are unique per instance of BenchCfg
|
315
315
|
|
316
316
|
Args:
|
317
|
-
include_repeats (bool) : by default include repeats as part of the hash
|
317
|
+
include_repeats (bool) : by default include repeats as part of the hash except with using the sample cache
|
318
318
|
"""
|
319
319
|
|
320
320
|
if include_repeats:
|
bencher/bench_plot_server.py
CHANGED
@@ -84,7 +84,7 @@ class BenchPlotServer:
|
|
84
84
|
Args:
|
85
85
|
bench_cfg (BenchCfg): benchmark results
|
86
86
|
plots_instance (List[pn.panel]): list of panel objects to display
|
87
|
-
port (int): use a fixed port to
|
87
|
+
port (int): use a fixed port to launch the server
|
88
88
|
"""
|
89
89
|
|
90
90
|
# suppress verbose tornado and bokeh output
|
bencher/bench_runner.py
CHANGED
@@ -85,7 +85,7 @@ class BenchRunner:
|
|
85
85
|
run_cfg (BenchRunCfg, optional): benchmark run configuration. Defaults to None.
|
86
86
|
publish (bool, optional): Publish the results to git, requires a publish url to be set up. Defaults to False.
|
87
87
|
debug (bool, optional): _description_. Defaults to False.
|
88
|
-
show (bool, optional): show the results in the local web
|
88
|
+
show (bool, optional): show the results in the local web browser. Defaults to False.
|
89
89
|
save (bool, optional): save the results to disk in index.html. Defaults to False.
|
90
90
|
grouped (bool, optional): Produce a single html page with all the benchmarks included. Defaults to True.
|
91
91
|
use_cache (bool, optional): Use the sample cache to reused previous results. Defaults to True.
|
bencher/bencher.py
CHANGED
@@ -30,6 +30,7 @@ from bencher.variables.results import (
|
|
30
30
|
ResultString,
|
31
31
|
ResultContainer,
|
32
32
|
ResultReference,
|
33
|
+
ResultDataSet,
|
33
34
|
)
|
34
35
|
from bencher.results.bench_result import BenchResult
|
35
36
|
from bencher.variables.parametrised_sweep import ParametrizedSweep
|
@@ -177,7 +178,7 @@ class Bench(BenchPlotServer):
|
|
177
178
|
self.plot = True
|
178
179
|
|
179
180
|
def add_plot_callback(self, callback: Callable[[BenchResult], pn.panel], **kwargs) -> None:
|
180
|
-
"""Add a plotting callback that will be called on any result produced when calling a sweep
|
181
|
+
"""Add a plotting callback that will be called on any result produced when calling a sweep function. You can pass additional arguments to the plotting function with kwargs. e.g. add_plot_callback(bch.BenchResult.to_video_grid,)
|
181
182
|
|
182
183
|
Args:
|
183
184
|
callback (Callable[[BenchResult], pn.panel]): _description_
|
@@ -497,7 +498,7 @@ class Bench(BenchPlotServer):
|
|
497
498
|
"""check that a variable is a subclass of param
|
498
499
|
|
499
500
|
Args:
|
500
|
-
variable (param.Parameter): the
|
501
|
+
variable (param.Parameter): the variable to check
|
501
502
|
var_type (str): a string representation of the variable type for better error messages
|
502
503
|
|
503
504
|
Raises:
|
@@ -559,7 +560,7 @@ class Bench(BenchPlotServer):
|
|
559
560
|
"""Load historical data from a cache if over_time=true
|
560
561
|
|
561
562
|
Args:
|
562
|
-
ds (xr.Dataset): Freshly
|
563
|
+
ds (xr.Dataset): Freshly calculated data
|
563
564
|
bench_cfg_hash (int): Hash of the input variables used to generate the data
|
564
565
|
clear_history (bool): Optionally clear the history
|
565
566
|
|
@@ -592,7 +593,7 @@ class Bench(BenchPlotServer):
|
|
592
593
|
time_src (datetime | str): a representation of the sample time
|
593
594
|
|
594
595
|
Returns:
|
595
|
-
tuple[BenchResult, List, List]: bench_result, function
|
596
|
+
tuple[BenchResult, List, List]: bench_result, function inputs, dimension names
|
596
597
|
"""
|
597
598
|
|
598
599
|
if time_src is None:
|
@@ -610,14 +611,15 @@ class Bench(BenchPlotServer):
|
|
610
611
|
function_inputs = list(
|
611
612
|
zip(product(*dims_cfg.dim_ranges_index), product(*dims_cfg.dim_ranges))
|
612
613
|
)
|
613
|
-
# xarray stores K N-dimensional arrays of data. Each array is named and in this case we have
|
614
|
+
# xarray stores K N-dimensional arrays of data. Each array is named and in this case we have an ND array for each result variable
|
614
615
|
data_vars = {}
|
616
|
+
dataset_list = []
|
615
617
|
|
616
618
|
for rv in bench_cfg.result_vars:
|
617
619
|
if isinstance(rv, ResultVar):
|
618
620
|
result_data = np.full(dims_cfg.dims_size, np.nan, dtype=float)
|
619
621
|
data_vars[rv.name] = (dims_cfg.dims_name, result_data)
|
620
|
-
if isinstance(rv, ResultReference):
|
622
|
+
if isinstance(rv, (ResultReference, ResultDataSet)):
|
621
623
|
result_data = np.full(dims_cfg.dims_size, -1, dtype=int)
|
622
624
|
data_vars[rv.name] = (dims_cfg.dims_name, result_data)
|
623
625
|
if isinstance(
|
@@ -625,7 +627,8 @@ class Bench(BenchPlotServer):
|
|
625
627
|
):
|
626
628
|
result_data = np.full(dims_cfg.dims_size, "NAN", dtype=object)
|
627
629
|
data_vars[rv.name] = (dims_cfg.dims_name, result_data)
|
628
|
-
|
630
|
+
|
631
|
+
elif type(rv) is ResultVec:
|
629
632
|
for i in range(rv.size):
|
630
633
|
result_data = np.full(dims_cfg.dims_size, np.nan)
|
631
634
|
data_vars[rv.index_name(i)] = (dims_cfg.dims_name, result_data)
|
@@ -633,6 +636,7 @@ class Bench(BenchPlotServer):
|
|
633
636
|
bench_res = BenchResult(bench_cfg)
|
634
637
|
bench_res.ds = xr.Dataset(data_vars=data_vars, coords=dims_cfg.coords)
|
635
638
|
bench_res.ds_dynamic = self.ds_dynamic
|
639
|
+
bench_res.dataset_list = dataset_list
|
636
640
|
bench_res.setup_object_index()
|
637
641
|
|
638
642
|
return bench_res, function_inputs, dims_cfg.dims_name
|
@@ -770,6 +774,13 @@ class Bench(BenchPlotServer):
|
|
770
774
|
),
|
771
775
|
):
|
772
776
|
set_xarray_multidim(bench_res.ds[rv.name], worker_job.index_tuple, result_value)
|
777
|
+
elif isinstance(rv, ResultDataSet):
|
778
|
+
bench_res.dataset_list.append(result_value)
|
779
|
+
set_xarray_multidim(
|
780
|
+
bench_res.ds[rv.name],
|
781
|
+
worker_job.index_tuple,
|
782
|
+
len(bench_res.dataset_list) - 1,
|
783
|
+
)
|
773
784
|
elif isinstance(rv, ResultReference):
|
774
785
|
bench_res.object_index.append(result_value)
|
775
786
|
set_xarray_multidim(
|
@@ -777,6 +788,7 @@ class Bench(BenchPlotServer):
|
|
777
788
|
worker_job.index_tuple,
|
778
789
|
len(bench_res.object_index) - 1,
|
779
790
|
)
|
791
|
+
|
780
792
|
elif isinstance(rv, ResultVec):
|
781
793
|
if isinstance(result_value, (list, np.ndarray)):
|
782
794
|
if len(result_value) == rv.size:
|
@@ -818,14 +830,14 @@ class Bench(BenchPlotServer):
|
|
818
830
|
|
819
831
|
Args:
|
820
832
|
bench_cfg (BenchCfg):
|
821
|
-
input_var (ParametrizedSweep): The
|
833
|
+
input_var (ParametrizedSweep): The variable to extract metadata from
|
822
834
|
"""
|
823
835
|
|
824
836
|
for rv in bench_res.bench_cfg.result_vars:
|
825
|
-
if type(rv)
|
837
|
+
if type(rv) is ResultVar:
|
826
838
|
bench_res.ds[rv.name].attrs["units"] = rv.units
|
827
839
|
bench_res.ds[rv.name].attrs["long_name"] = rv.name
|
828
|
-
elif type(rv)
|
840
|
+
elif type(rv) is ResultVec:
|
829
841
|
for i in range(rv.size):
|
830
842
|
bench_res.ds[rv.index_name(i)].attrs["units"] = rv.units
|
831
843
|
bench_res.ds[rv.index_name(i)].attrs["long_name"] = rv.name
|
@@ -840,7 +852,7 @@ class Bench(BenchPlotServer):
|
|
840
852
|
dsvar.attrs["description"] = input_var.__doc__
|
841
853
|
|
842
854
|
def report_results(self, bench_cfg: BenchCfg, print_xarray: bool, print_pandas: bool):
|
843
|
-
"""Optionally display the
|
855
|
+
"""Optionally display the calculated benchmark data as either as pandas, xarray or plot
|
844
856
|
|
845
857
|
Args:
|
846
858
|
bench_cfg (BenchCfg):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"""This file contains an example of how to define benchmarking parameters sweeps. Categorical values are defined as enums and passed to EnumSweep classes, other types of sweeps are defined by their respective classes.
|
2
2
|
|
3
|
-
You can define a subclass which contains an input configuration which can be passed to a function in a type safe way. You can combine the subclass with a higher level class which contains more
|
3
|
+
You can define a subclass which contains an input configuration which can be passed to a function in a type safe way. You can combine the subclass with a higher level class which contains more configuration parameters. This is to help manage the complexity of large configuration/parameter spaces.
|
4
4
|
"""
|
5
5
|
|
6
6
|
import math
|
@@ -86,7 +86,7 @@ def example_categorical(
|
|
86
86
|
],
|
87
87
|
title="Categorical 3D Example Over Time",
|
88
88
|
result_vars=[ExampleBenchCfgOut.param.out_sin],
|
89
|
-
description="""Lastly, what if you want to track these distributions over time? Set over_time=True and bencher will cache and display historical
|
89
|
+
description="""Lastly, what if you want to track these distributions over time? Set over_time=True and bencher will cache and display historical results alongside the latest result. Use clear_history=True to clear that cache.""",
|
90
90
|
post_description="The output shows faceted line plot with confidence intervals for the mean value over time.",
|
91
91
|
run_cfg=run_cfg,
|
92
92
|
)
|
@@ -23,7 +23,7 @@ class Square(bch.ParametrizedSweep):
|
|
23
23
|
def example_custom_sweep(
|
24
24
|
run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
|
25
25
|
) -> bch.Bench:
|
26
|
-
"""This example shows how to define a custom set of value to sample from
|
26
|
+
"""This example shows how to define a custom set of value to sample from instead of a uniform sweep
|
27
27
|
|
28
28
|
Args:
|
29
29
|
run_cfg (BenchRunCfg): configuration of how to perform the param sweep
|
@@ -17,7 +17,7 @@ class Square(bch.ParametrizedSweep):
|
|
17
17
|
def example_custom_sweep2(
|
18
18
|
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
19
19
|
) -> bch.Bench:
|
20
|
-
"""This example shows how to define a custom set of value to sample from
|
20
|
+
"""This example shows how to define a custom set of value to sample from instead of a uniform sweep
|
21
21
|
|
22
22
|
Args:
|
23
23
|
run_cfg (BenchRunCfg): configuration of how to perform the param sweep
|
@@ -0,0 +1,47 @@
|
|
1
|
+
import bencher as bch
|
2
|
+
|
3
|
+
import xarray as xr
|
4
|
+
import numpy as np
|
5
|
+
import holoviews as hv
|
6
|
+
|
7
|
+
|
8
|
+
class ExampleMergeDataset(bch.ParametrizedSweep):
|
9
|
+
value = bch.FloatSweep(default=0, bounds=[0, 10])
|
10
|
+
repeats_x = bch.IntSweep(default=2, bounds=[2, 4])
|
11
|
+
# repeats_y = bch.IntSweep(default=2, bounds=[2, 4])
|
12
|
+
|
13
|
+
result_df = bch.ResultDataSet()
|
14
|
+
|
15
|
+
def __call__(self, **kwargs):
|
16
|
+
self.update_params_from_kwargs(**kwargs)
|
17
|
+
# First, create a DataArray from the vector
|
18
|
+
vector = [v + self.value for v in range(1, self.repeats_x)]
|
19
|
+
data_array = xr.DataArray(vector, dims=["index"], coords={"index": np.arange(len(vector))})
|
20
|
+
# Convert the DataArray to a Dataset
|
21
|
+
result_df = xr.Dataset({"result_df": data_array})
|
22
|
+
self.result_df = bch.ResultDataSet(result_df.to_pandas())
|
23
|
+
return super().__call__(**kwargs)
|
24
|
+
|
25
|
+
|
26
|
+
def example_dataset(run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None):
|
27
|
+
bench = ExampleMergeDataset().to_bench(run_cfg, report)
|
28
|
+
res = bench.plot_sweep(input_vars=["value"], const_vars=dict(repeats_x=4))
|
29
|
+
# bench.report.append(res.to_panes(target_dimension=1))
|
30
|
+
# bench.report.append(res.to_panes(target_dimension=2))
|
31
|
+
# bench.reprt.append(res.to_video_grid
|
32
|
+
# # bch.BenchResult.to_video_grid,
|
33
|
+
# target_duration=0.06,
|
34
|
+
# compose_method_list=[
|
35
|
+
# bch.ComposeType.right,
|
36
|
+
# bch.ComposeType.right,
|
37
|
+
# bch.ComposeType.sequence,
|
38
|
+
# ],
|
39
|
+
# )
|
40
|
+
# bench.report.append(res.to_panes(container=hv.Bars,target_dimension=1))
|
41
|
+
# bench.report.append(res.to_panes(container=hv.Curve))
|
42
|
+
bench.report.append(res.to_dataset1(container=hv.Curve))
|
43
|
+
return bench
|
44
|
+
|
45
|
+
|
46
|
+
if __name__ == "__main__":
|
47
|
+
example_dataset().report.show()
|
bencher/example/example_image.py
CHANGED
@@ -121,14 +121,12 @@ if __name__ == "__main__":
|
|
121
121
|
|
122
122
|
# res = bench.sweep(input_vars=["sides", "radius"])
|
123
123
|
|
124
|
-
# bench.report.append(res.to_heatmap(target_dimension=3))
|
125
|
-
|
126
124
|
bench.plot_sweep(input_vars=["sides"])
|
127
125
|
bench.plot_sweep(input_vars=["sides", "color"])
|
128
126
|
|
129
|
-
bench.plot_sweep(input_vars=["sides", "radius"])
|
130
|
-
|
131
|
-
|
127
|
+
res = bench.plot_sweep(input_vars=["sides", "radius"])
|
128
|
+
bench.report.append(res.to_heatmap(target_dimension=3))
|
129
|
+
bench.report.append(res.to_line(target_dimension=1))
|
132
130
|
|
133
131
|
return bench
|
134
132
|
|
@@ -148,8 +146,8 @@ if __name__ == "__main__":
|
|
148
146
|
# ex_run_cfg.debug = True
|
149
147
|
# ex_run_cfg.repeats = 2
|
150
148
|
ex_run_cfg.level = 4
|
151
|
-
example_image_vid(ex_run_cfg).report.show()
|
152
|
-
|
149
|
+
# example_image_vid(ex_run_cfg).report.show()
|
150
|
+
simple().report.show()
|
153
151
|
|
154
152
|
# example_image_vid_sequential(ex_run_cfg).report.show()
|
155
153
|
# example_image(ex_run_cfg).report.show()
|
@@ -0,0 +1,80 @@
|
|
1
|
+
import bencher as bch
|
2
|
+
import numpy as np
|
3
|
+
import math
|
4
|
+
import matplotlib.pyplot as plt
|
5
|
+
|
6
|
+
|
7
|
+
def polygon_points(radius: float, sides: int, start_angle: float):
|
8
|
+
points = []
|
9
|
+
for ang in np.linspace(0, 360, sides + 1):
|
10
|
+
angle = math.radians(start_angle + ang)
|
11
|
+
points.append(([math.sin(angle) * radius, math.cos(angle) * radius]))
|
12
|
+
return points
|
13
|
+
|
14
|
+
|
15
|
+
class BenchPolygons(bch.ParametrizedSweep):
|
16
|
+
sides = bch.IntSweep(default=3, bounds=(3, 7))
|
17
|
+
radius = bch.FloatSweep(default=1, bounds=(0.2, 1))
|
18
|
+
linewidth = bch.FloatSweep(default=1, bounds=(1, 10))
|
19
|
+
linestyle = bch.StringSweep(["solid", "dashed", "dotted"])
|
20
|
+
color = bch.StringSweep(["red", "green", "blue"])
|
21
|
+
start_angle = bch.FloatSweep(default=0, bounds=[0, 360])
|
22
|
+
polygon = bch.ResultImage()
|
23
|
+
polygon_small = bch.ResultImage()
|
24
|
+
|
25
|
+
area = bch.ResultVar()
|
26
|
+
side_length = bch.ResultVar()
|
27
|
+
|
28
|
+
def __call__(self, **kwargs):
|
29
|
+
self.update_params_from_kwargs(**kwargs)
|
30
|
+
points = polygon_points(self.radius, self.sides, self.start_angle)
|
31
|
+
# self.hmap = hv.Curve(points)
|
32
|
+
self.polygon = self.points_to_polygon_png(points, bch.gen_image_path("polygon"), dpi=30)
|
33
|
+
self.polygon_small = self.points_to_polygon_png(
|
34
|
+
points, bch.gen_image_path("polygon"), dpi=10
|
35
|
+
)
|
36
|
+
|
37
|
+
self.side_length = 2 * self.radius * math.sin(math.pi / self.sides)
|
38
|
+
self.area = (self.sides * self.side_length**2) / (4 * math.tan(math.pi / self.sides))
|
39
|
+
return super().__call__()
|
40
|
+
|
41
|
+
def points_to_polygon_png(self, points: list[float], filename: str, dpi):
|
42
|
+
"""Draw a closed polygon and save to png"""
|
43
|
+
fig = plt.figure(frameon=False)
|
44
|
+
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0], frameon=False)
|
45
|
+
ax.set_axis_off()
|
46
|
+
ax.plot(
|
47
|
+
[p[0] for p in points],
|
48
|
+
[p[1] for p in points],
|
49
|
+
linewidth=self.linewidth,
|
50
|
+
linestyle=self.linestyle,
|
51
|
+
color=self.color,
|
52
|
+
)
|
53
|
+
ax.set_xlim(-1, 1)
|
54
|
+
ax.set_ylim(-1, 1)
|
55
|
+
|
56
|
+
ax.set_aspect("equal")
|
57
|
+
fig.add_axes(ax)
|
58
|
+
fig.savefig(filename, dpi=dpi)
|
59
|
+
|
60
|
+
return filename
|
61
|
+
|
62
|
+
|
63
|
+
def example_image_vid_sequential1(
|
64
|
+
run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
|
65
|
+
) -> bch.Bench:
|
66
|
+
bench = BenchPolygons().to_bench(run_cfg, report)
|
67
|
+
res = bench.plot_sweep(input_vars=["sides"])
|
68
|
+
|
69
|
+
bench.report.append(res.to_panes(zip_results=True))
|
70
|
+
|
71
|
+
return bench
|
72
|
+
|
73
|
+
|
74
|
+
if __name__ == "__main__":
|
75
|
+
ex_run_cfg = bch.BenchRunCfg()
|
76
|
+
ex_run_cfg.use_sample_cache = True
|
77
|
+
ex_run_cfg.overwrite_sample_cache = True
|
78
|
+
ex_run_cfg.level = 3
|
79
|
+
|
80
|
+
example_image_vid_sequential1(ex_run_cfg).report.show()
|
@@ -81,7 +81,7 @@ def run_levels_1D(bench: bch.Bench) -> bch.Bench:
|
|
81
81
|
bench.report.append(row)
|
82
82
|
|
83
83
|
bench.report.append_markdown(
|
84
|
-
"Level 1 returns a single point at the lower bound of the parameter. Level 2 uses the
|
84
|
+
"Level 1 returns a single point at the lower bound of the parameter. Level 2 uses the upper and lower bounds of the parameter. All subsequent levels are created by adding a sample between each previously calculated sample to ensure that all previous values can be reused while retaining an equal sample spacing. The following plots show the sample points as circles and the corresponding plot of a sin function sampled at that level.",
|
85
85
|
width=600,
|
86
86
|
)
|
87
87
|
|
@@ -15,7 +15,7 @@ class Square(bch.ParametrizedSweep):
|
|
15
15
|
|
16
16
|
|
17
17
|
def example_levels2(run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None) -> bch.Bench:
|
18
|
-
"""This example shows how to define a custom set of value to sample from
|
18
|
+
"""This example shows how to define a custom set of value to sample from instead of a uniform sweep
|
19
19
|
|
20
20
|
Args:
|
21
21
|
run_cfg (BenchRunCfg): configuration of how to perform the param sweep
|
@@ -29,7 +29,7 @@ def example_pareto(
|
|
29
29
|
|
30
30
|
res = bench.plot_sweep(
|
31
31
|
title="Pareto Optimisation with Optuna",
|
32
|
-
description="This example shows how to plot the pareto front of the tradeoff between multiple criteria. When multiple result variable are defined, and use_optuna=True a pareto plot and the relative importance of each input variable on the output criteria is plotted. A summary of the points on the pareto front is printed as well. You can use the pareto plot to decide the how to trade off one objective for another. Pareto plots are
|
32
|
+
description="This example shows how to plot the pareto front of the tradeoff between multiple criteria. When multiple result variable are defined, and use_optuna=True a pareto plot and the relative importance of each input variable on the output criteria is plotted. A summary of the points on the pareto front is printed as well. You can use the pareto plot to decide the how to trade off one objective for another. Pareto plots are supported for 2D and 3D. If you have more than 3 result variables the first 3 are selected for the pareto plot. Plotting 4D surfaces is left as an exercise to the reader",
|
33
33
|
input_vars=[
|
34
34
|
ExampleBenchCfgIn.param.theta,
|
35
35
|
ExampleBenchCfgIn.param.offset,
|
@@ -82,7 +82,7 @@ def example_cache_context() -> bch.Bench:
|
|
82
82
|
tag="example_tag1",
|
83
83
|
)
|
84
84
|
|
85
|
-
# these values have not been
|
85
|
+
# these values have not been calculated before so there should be 1 fn call
|
86
86
|
assert_call_counts(bencher, run_cfg, wrapper_calls=1, fn_calls=1, cache_calls=0)
|
87
87
|
|
88
88
|
# now create a new benchmark that calculates the values of the previous two benchmarks. The tag is the same so those values will be loaded from the cache instead of getting calculated again
|
@@ -106,7 +106,7 @@ def example_cache_context() -> bch.Bench:
|
|
106
106
|
tag="example_tag2",
|
107
107
|
)
|
108
108
|
|
109
|
-
# Both calls are
|
109
|
+
# Both calls are calculated because the tag is different so they don't hit the cache
|
110
110
|
assert_call_counts(bencher, run_cfg, wrapper_calls=2, fn_calls=2, cache_calls=0)
|
111
111
|
|
112
112
|
return bencher
|
@@ -12,7 +12,7 @@ from strenum import StrEnum
|
|
12
12
|
import bencher as bch
|
13
13
|
|
14
14
|
|
15
|
-
# define a class with the output variables you want to benchmark. It must inherit from ParametrizedSweep (which inherits from param.Parametrized). Param is a python library that allows you to track metadata about parameters. I would recommend reading at least the intro: https://param.holoviz.org/. I have extended param with some extra metadata such is the units of the variable so that it can
|
15
|
+
# define a class with the output variables you want to benchmark. It must inherit from ParametrizedSweep (which inherits from param.Parametrized). Param is a python library that allows you to track metadata about parameters. I would recommend reading at least the intro: https://param.holoviz.org/. I have extended param with some extra metadata such is the units of the variable so that it can automatically be plotted.
|
16
16
|
class OutputCfg(bch.ParametrizedSweep):
|
17
17
|
"""A class for defining what variables the benchmark function returns and metadata on those variables"""
|
18
18
|
|
@@ -41,7 +41,7 @@ class InputCfg(bch.ParametrizedSweep):
|
|
41
41
|
# The variables must be defined as one of the Sweep types, i.e, FloatSweep, IntSweep, EnumSweep from bencher.bench_vars
|
42
42
|
# theta = FloatSweep(default=0, bounds=[0, math.pi], doc="Input angle", units="rad", samples=30)
|
43
43
|
|
44
|
-
# Define sweep variables by passing in an enum class name. The first element of the enum is the default by convention, but you can
|
44
|
+
# Define sweep variables by passing in an enum class name. The first element of the enum is the default by convention, but you can override the default in the constructor
|
45
45
|
algo_setting_enum = bch.EnumSweep(AlgoSetting, default=AlgoSetting.poor)
|
46
46
|
|
47
47
|
# In this case there are no units so its marked as unitless or ul. You can define how many evenly distributed samples to sample the parameter with
|
@@ -66,7 +66,7 @@ class InputCfg(bch.ParametrizedSweep):
|
|
66
66
|
|
67
67
|
match cfg.algo_setting_enum:
|
68
68
|
case AlgoSetting.noisy:
|
69
|
-
# add some random noise to the output. When your
|
69
|
+
# add some random noise to the output. When your algorithm has noisy output it often is an indication that something is not quite right. The graphs should show that you want to avoid the "noisy" setting in your algorithm
|
70
70
|
output.accuracy += random.uniform(-10, 10)
|
71
71
|
case AlgoSetting.optimum:
|
72
72
|
output.accuracy += 30 # This is the setting with the best performance, and characterising that is is the goal of the benchmarking
|
@@ -122,9 +122,9 @@ if __name__ == "__main__":
|
|
122
122
|
result_vars=[OutputCfg.param.accuracy],
|
123
123
|
const_vars=[(InputCfg.param.algo_setting_float, 1.33)],
|
124
124
|
title="Simple example 1D sweep over time",
|
125
|
-
description="""Once you have found the optimal settings for your algorithm you want to make sure that the performance is not lost over time. You can set variables to a constant value and in this case the float value is set to its optimum value. The first time this function is run only the results from sweeping the categorical value is plotted (the same as example 1), but the second time it is run a graph the values over time is shown. [Run the code again if you don't see a graph over time]. If the graphs over time shows long term changes (not just noise), it indicate there is another external factor that is affecting your
|
125
|
+
description="""Once you have found the optimal settings for your algorithm you want to make sure that the performance is not lost over time. You can set variables to a constant value and in this case the float value is set to its optimum value. The first time this function is run only the results from sweeping the categorical value is plotted (the same as example 1), but the second time it is run a graph the values over time is shown. [Run the code again if you don't see a graph over time]. If the graphs over time shows long term changes (not just noise), it indicate there is another external factor that is affecting your performance over time, i.e. dependencies changing, physical degradation of equipment, an unnoticed bug from a pull request etc...
|
126
126
|
|
127
|
-
This shows the basic features of bencher. These examples are purposefully simplified to demonstrate its features in isolation and don't reeally show the real advantages of bencher. If you only have a few inputs and outputs its not that complicated to throw together some plots of performance. The power of bencher is that when you have a system with many moving parts that all interact with
|
127
|
+
This shows the basic features of bencher. These examples are purposefully simplified to demonstrate its features in isolation and don't reeally show the real advantages of bencher. If you only have a few inputs and outputs its not that complicated to throw together some plots of performance. The power of bencher is that when you have a system with many moving parts that all interact with each other, teasing apart those influences becomes much harder because the parameter spaces combine quite quickly into a high dimensional mess. Bencher makes it easier to experiment with different combination of inputs to gain an intuition of the system performance. Bencher can plot up to 6D input natively and you can add custom plots if you have exotic data types or state spaces [WIP].
|
128
128
|
""",
|
129
129
|
post_description="",
|
130
130
|
run_cfg=bch.BenchRunCfg(repeats=10, over_time=True, clear_history=False),
|
@@ -149,7 +149,7 @@ def example_meta(
|
|
149
149
|
bench.plot_sweep(
|
150
150
|
title="Meta Bench",
|
151
151
|
description="""## All Combinations of Variable Sweeps and Resulting Plots
|
152
|
-
This uses bencher to display all the
|
152
|
+
This uses bencher to display all the combinations of plots bencher is able to produce""",
|
153
153
|
input_vars=[
|
154
154
|
bch.p("float_vars", [0, 1, 2, 3]),
|
155
155
|
BenchMeta.param.categorical_vars,
|
@@ -10,7 +10,7 @@
|
|
10
10
|
# trig_func: str = "sin",
|
11
11
|
# **kwargs, # pylint: disable=unused-argument
|
12
12
|
# ) -> dict:
|
13
|
-
# """All the other examples use classes and parameters to define the inputs and outputs to the function. However it makes the code less flexible when integrating with other systems, so this example shows a more basic interface that accepts and returns dictionaries. The classes still need to be defined however because that is how the sweep and plotting settings are
|
13
|
+
# """All the other examples use classes and parameters to define the inputs and outputs to the function. However it makes the code less flexible when integrating with other systems, so this example shows a more basic interface that accepts and returns dictionaries. The classes still need to be defined however because that is how the sweep and plotting settings are calculated"""
|
14
14
|
# output = {}
|
15
15
|
|
16
16
|
# if trig_func == "sin":
|
bencher/plotting/plot_filter.py
CHANGED
@@ -6,7 +6,7 @@ import panel as pn
|
|
6
6
|
|
7
7
|
|
8
8
|
class VarRange:
|
9
|
-
"""A VarRange represents the bounded and unbounded ranges of integers. This class is used to define filters for various variable types. For example by defining cat_var = VarRange(0,0), calling matches(0) will return true, but any other integer will not match. You can also have unbounded ranges for example VarRange(2,None) will match to 2,3,4... up to infinity. for By default the lower and upper bounds are set to -1 so so that no matter what value is
|
9
|
+
"""A VarRange represents the bounded and unbounded ranges of integers. This class is used to define filters for various variable types. For example by defining cat_var = VarRange(0,0), calling matches(0) will return true, but any other integer will not match. You can also have unbounded ranges for example VarRange(2,None) will match to 2,3,4... up to infinity. for By default the lower and upper bounds are set to -1 so so that no matter what value is passed to matches() will return false. Matches only takes 0 and positive integers."""
|
10
10
|
|
11
11
|
def __init__(self, lower_bound: int = 0, upper_bound: int = -1) -> None:
|
12
12
|
"""
|
@@ -71,7 +71,7 @@ class PlotFilter:
|
|
71
71
|
|
72
72
|
# @dataclass
|
73
73
|
class PlotMatchesResult:
|
74
|
-
"""Stores information about which
|
74
|
+
"""Stores information about which properties match the requirements of a particular plotter"""
|
75
75
|
|
76
76
|
def __init__(self, plot_filter: PlotFilter, plt_cnt_cfg: PltCntCfg, plot_name: str):
|
77
77
|
match_info = []
|
bencher/plotting/plt_cnt_cfg.py
CHANGED
@@ -3,7 +3,13 @@ import param
|
|
3
3
|
from bencher.bench_cfg import BenchCfg
|
4
4
|
from bencher.variables.results import PANEL_TYPES
|
5
5
|
|
6
|
-
from bencher.variables.inputs import
|
6
|
+
from bencher.variables.inputs import (
|
7
|
+
IntSweep,
|
8
|
+
FloatSweep,
|
9
|
+
BoolSweep,
|
10
|
+
EnumSweep,
|
11
|
+
StringSweep,
|
12
|
+
)
|
7
13
|
from bencher.variables.time import TimeSnapshot
|
8
14
|
|
9
15
|
|
@@ -17,12 +23,13 @@ class PltCntCfg(param.Parameterized):
|
|
17
23
|
vector_len = param.Integer(1, doc="The vector length of the return variable , scalars = len 1")
|
18
24
|
result_vars = param.Integer(1, doc="The number result variables to plot") # todo remove
|
19
25
|
panel_vars = param.List(doc="A list of panel results")
|
20
|
-
panel_cnt = param.Integer(0, doc="Number of results
|
26
|
+
panel_cnt = param.Integer(0, doc="Number of results represent as panel panes")
|
21
27
|
repeats = param.Integer(0, doc="The number of repeat samples")
|
22
28
|
inputs_cnt = param.Integer(0, doc="The number of repeat samples")
|
23
29
|
|
24
30
|
print_debug = param.Boolean(
|
25
|
-
True,
|
31
|
+
True,
|
32
|
+
doc="Print debug information about why a filter matches this config or not",
|
26
33
|
)
|
27
34
|
|
28
35
|
@staticmethod
|
bencher/results/bench_result.py
CHANGED
@@ -7,15 +7,17 @@ from bencher.results.video_summary import VideoSummaryResult
|
|
7
7
|
from bencher.results.panel_result import PanelResult
|
8
8
|
from bencher.results.plotly_result import PlotlyResult
|
9
9
|
from bencher.results.holoview_result import HoloviewResult
|
10
|
+
from bencher.results.dataset_result import DataSetResult
|
10
11
|
from bencher.utils import listify
|
11
12
|
|
12
13
|
|
13
|
-
class BenchResult(PlotlyResult, HoloviewResult, VideoSummaryResult):
|
14
|
+
class BenchResult(PlotlyResult, HoloviewResult, VideoSummaryResult, DataSetResult):
|
14
15
|
"""Contains the results of the benchmark and has methods to cast the results to various datatypes and graphical representations"""
|
15
16
|
|
16
17
|
def __init__(self, bench_cfg) -> None:
|
17
18
|
PlotlyResult.__init__(self, bench_cfg)
|
18
19
|
HoloviewResult.__init__(self, bench_cfg)
|
20
|
+
# DataSetResult.__init__(self.bench_cfg)
|
19
21
|
|
20
22
|
@staticmethod
|
21
23
|
def default_plot_callbacks():
|