holobench 1.3.6__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bencher/__init__.py +41 -0
- bencher/bench_cfg.py +462 -0
- bencher/bench_plot_server.py +100 -0
- bencher/bench_report.py +268 -0
- bencher/bench_runner.py +136 -0
- bencher/bencher.py +805 -0
- bencher/caching.py +51 -0
- bencher/example/__init__.py +0 -0
- bencher/example/benchmark_data.py +200 -0
- bencher/example/example_all.py +45 -0
- bencher/example/example_categorical.py +99 -0
- bencher/example/example_custom_sweep.py +59 -0
- bencher/example/example_docs.py +34 -0
- bencher/example/example_float3D.py +101 -0
- bencher/example/example_float_cat.py +98 -0
- bencher/example/example_floats.py +89 -0
- bencher/example/example_floats2D.py +93 -0
- bencher/example/example_holosweep.py +104 -0
- bencher/example/example_holosweep_objects.py +111 -0
- bencher/example/example_holosweep_tap.py +144 -0
- bencher/example/example_image.py +82 -0
- bencher/example/example_levels.py +181 -0
- bencher/example/example_pareto.py +53 -0
- bencher/example/example_sample_cache.py +85 -0
- bencher/example/example_sample_cache_context.py +116 -0
- bencher/example/example_simple.py +134 -0
- bencher/example/example_simple_bool.py +34 -0
- bencher/example/example_simple_cat.py +47 -0
- bencher/example/example_simple_float.py +38 -0
- bencher/example/example_strings.py +46 -0
- bencher/example/example_time_event.py +62 -0
- bencher/example/example_video.py +124 -0
- bencher/example/example_workflow.py +189 -0
- bencher/example/experimental/example_bokeh_plotly.py +38 -0
- bencher/example/experimental/example_hover_ex.py +45 -0
- bencher/example/experimental/example_hvplot_explorer.py +39 -0
- bencher/example/experimental/example_interactive.py +75 -0
- bencher/example/experimental/example_streamnd.py +49 -0
- bencher/example/experimental/example_streams.py +36 -0
- bencher/example/experimental/example_template.py +40 -0
- bencher/example/experimental/example_updates.py +84 -0
- bencher/example/experimental/example_vector.py +84 -0
- bencher/example/meta/example_meta.py +171 -0
- bencher/example/meta/example_meta_cat.py +25 -0
- bencher/example/meta/example_meta_float.py +23 -0
- bencher/example/meta/example_meta_levels.py +26 -0
- bencher/example/optuna/example_optuna.py +78 -0
- bencher/example/shelved/example_float2D_scatter.py +109 -0
- bencher/example/shelved/example_float3D_cone.py +96 -0
- bencher/example/shelved/example_kwargs.py +63 -0
- bencher/job.py +184 -0
- bencher/optuna_conversions.py +168 -0
- bencher/plotting/__init__.py +0 -0
- bencher/plotting/plot_filter.py +110 -0
- bencher/plotting/plt_cnt_cfg.py +74 -0
- bencher/results/__init__.py +0 -0
- bencher/results/bench_result.py +80 -0
- bencher/results/bench_result_base.py +405 -0
- bencher/results/float_formatter.py +44 -0
- bencher/results/holoview_result.py +592 -0
- bencher/results/optuna_result.py +354 -0
- bencher/results/panel_result.py +113 -0
- bencher/results/plotly_result.py +65 -0
- bencher/utils.py +148 -0
- bencher/variables/inputs.py +193 -0
- bencher/variables/parametrised_sweep.py +206 -0
- bencher/variables/results.py +176 -0
- bencher/variables/sweep_base.py +167 -0
- bencher/variables/time.py +74 -0
- bencher/video_writer.py +30 -0
- bencher/worker_job.py +40 -0
- holobench-1.3.6.dist-info/METADATA +85 -0
- holobench-1.3.6.dist-info/RECORD +74 -0
- holobench-1.3.6.dist-info/WHEEL +5 -0
bencher/bench_report.py
ADDED
@@ -0,0 +1,268 @@
|
|
1
|
+
import logging
|
2
|
+
from typing import Callable
|
3
|
+
import os
|
4
|
+
import panel as pn
|
5
|
+
from pathlib import Path
|
6
|
+
import shutil
|
7
|
+
from threading import Thread
|
8
|
+
|
9
|
+
from bencher.results.bench_result import BenchResult
|
10
|
+
from bencher.bench_plot_server import BenchPlotServer
|
11
|
+
from bencher.bench_cfg import BenchRunCfg
|
12
|
+
|
13
|
+
|
14
|
+
class BenchReport(BenchPlotServer):
|
15
|
+
def __init__(
|
16
|
+
self,
|
17
|
+
bench_name: str = None,
|
18
|
+
) -> None:
|
19
|
+
self.bench_name = bench_name
|
20
|
+
self.pane = pn.Tabs(tabs_location="left", name=self.bench_name)
|
21
|
+
|
22
|
+
def append_title(self, title: str, new_tab: bool = True):
|
23
|
+
if new_tab:
|
24
|
+
return self.append_tab(pn.pane.Markdown(f"# {title}", name=title), title)
|
25
|
+
return self.append_markdown(f"# {title}", title)
|
26
|
+
|
27
|
+
def append_markdown(self, markdown: str, name=None, width=800, **kwargs) -> pn.pane.Markdown:
|
28
|
+
if name is None:
|
29
|
+
name = markdown
|
30
|
+
md = pn.pane.Markdown(markdown, name=name, width=width, **kwargs)
|
31
|
+
self.append(md, name)
|
32
|
+
return md
|
33
|
+
|
34
|
+
def append(self, pane: pn.panel, name: str = None) -> None:
|
35
|
+
if len(self.pane) == 0:
|
36
|
+
if name is None:
|
37
|
+
name = pane.name
|
38
|
+
self.append_tab(pane, name)
|
39
|
+
else:
|
40
|
+
self.pane[-1].append(pane)
|
41
|
+
|
42
|
+
def append_col(self, pane: pn.panel, name: str = None) -> None:
|
43
|
+
if name is not None:
|
44
|
+
col = pn.Column(pane, name=name)
|
45
|
+
else:
|
46
|
+
col = pn.Column(pane, name=pane.name)
|
47
|
+
self.pane.append(col)
|
48
|
+
|
49
|
+
def append_result(self, bench_res: BenchResult) -> None:
|
50
|
+
self.append_tab(bench_res.to_auto_plots(), bench_res.bench_cfg.title)
|
51
|
+
|
52
|
+
def append_tab(self, pane: pn.panel, name: str = None) -> None:
|
53
|
+
if pane is not None:
|
54
|
+
if name is None:
|
55
|
+
name = pane.name
|
56
|
+
self.pane.append(pn.Column(pane, name=name))
|
57
|
+
|
58
|
+
def save_index(self, directory="", filename="index.html") -> Path:
|
59
|
+
"""Saves the result to index.html in the root folder so that it can be displayed by github pages.
|
60
|
+
|
61
|
+
Returns:
|
62
|
+
Path: save path
|
63
|
+
"""
|
64
|
+
return self.save(directory, filename, False)
|
65
|
+
|
66
|
+
def save(
|
67
|
+
self,
|
68
|
+
directory: str | Path = "cachedir",
|
69
|
+
filename: str = None,
|
70
|
+
in_html_folder: bool = True,
|
71
|
+
**kwargs,
|
72
|
+
) -> Path:
|
73
|
+
"""Save the result to a html file. Note that dynamic content will not work. by passing save(__file__) the html output will be saved in the same folder as the source code in a html subfolder.
|
74
|
+
|
75
|
+
Args:
|
76
|
+
directory (str | Path, optional): base folder to save to. Defaults to "cachedir" which should be ignored by git.
|
77
|
+
filename (str, optional): The name of the html file. Defaults to the name of the benchmark
|
78
|
+
in_html_folder (bool, optional): Put the saved files in a html subfolder to help keep the results separate from source code. Defaults to True.
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
Path: the save path
|
82
|
+
"""
|
83
|
+
|
84
|
+
if filename is None:
|
85
|
+
filename = f"{self.bench_name}.html"
|
86
|
+
|
87
|
+
base_path = Path(directory)
|
88
|
+
|
89
|
+
if in_html_folder:
|
90
|
+
base_path /= "html"
|
91
|
+
|
92
|
+
logging.info(f"creating dir {base_path.absolute()}")
|
93
|
+
os.makedirs(base_path.absolute(), exist_ok=True)
|
94
|
+
|
95
|
+
base_path = base_path / filename
|
96
|
+
|
97
|
+
logging.info(f"saving html output to: {base_path.absolute()}")
|
98
|
+
|
99
|
+
self.pane.save(filename=base_path, progress=True, embed=True, **kwargs)
|
100
|
+
return base_path
|
101
|
+
|
102
|
+
def show(self, run_cfg: BenchRunCfg = None) -> Thread: # pragma: no cover
|
103
|
+
"""Launches a webserver with plots of the benchmark results, blocking
|
104
|
+
|
105
|
+
Args:
|
106
|
+
run_cfg (BenchRunCfg, optional): Options for the webserve such as the port. Defaults to None.
|
107
|
+
|
108
|
+
"""
|
109
|
+
if run_cfg is None:
|
110
|
+
run_cfg = BenchRunCfg()
|
111
|
+
|
112
|
+
return BenchPlotServer().plot_server(self.bench_name, run_cfg, self.pane)
|
113
|
+
|
114
|
+
def publish(
|
115
|
+
self, remote_callback: Callable, branch_name: str = None, debug: bool = False
|
116
|
+
) -> str: # pragma: no cover
|
117
|
+
"""Publish the results as an html file by committing it to the bench_results branch in the current repo. If you have set up your repo with github pages or equivalent then the html file will be served as a viewable webpage. This is an example of a callable to publish on github pages:
|
118
|
+
|
119
|
+
.. code-block:: python
|
120
|
+
|
121
|
+
def publish_args(branch_name) -> Tuple[str, str]:
|
122
|
+
return (
|
123
|
+
"https://github.com/dyson-ai/bencher.git",
|
124
|
+
f"https://github.com/dyson-ai/bencher/blob/{branch_name}")
|
125
|
+
|
126
|
+
|
127
|
+
Args:
|
128
|
+
remote (Callable): A function the returns a tuple of the publishing urls. It must follow the signature def publish_args(branch_name) -> Tuple[str, str]. The first url is the git repo name, the second url needs to match the format for viewable html pages on your git provider. The second url can use the argument branch_name to point to the report on a specified branch.
|
129
|
+
|
130
|
+
Returns:
|
131
|
+
str: the url of the published report
|
132
|
+
"""
|
133
|
+
|
134
|
+
if branch_name is None:
|
135
|
+
branch_name = self.bench_name
|
136
|
+
branch_name += "_debug" if debug else ""
|
137
|
+
|
138
|
+
remote, publish_url = remote_callback(branch_name)
|
139
|
+
|
140
|
+
directory = "tmpgit"
|
141
|
+
report_path = self.save(directory, filename="index.html", in_html_folder=False)
|
142
|
+
logging.info(f"created report at: {report_path.absolute()}")
|
143
|
+
|
144
|
+
cd_dir = f"cd {directory} &&"
|
145
|
+
|
146
|
+
os.system(f"{cd_dir} git init")
|
147
|
+
os.system(f"{cd_dir} git checkout -b {branch_name}")
|
148
|
+
os.system(f"{cd_dir} git add index.html")
|
149
|
+
os.system(f'{cd_dir} git commit -m "publish {branch_name}"')
|
150
|
+
os.system(f"{cd_dir} git remote add origin {remote}")
|
151
|
+
os.system(f"{cd_dir} git push --set-upstream origin {branch_name} -f")
|
152
|
+
|
153
|
+
logging.info("Published report @")
|
154
|
+
logging.info(publish_url)
|
155
|
+
|
156
|
+
shutil.rmtree(directory)
|
157
|
+
|
158
|
+
return publish_url
|
159
|
+
|
160
|
+
|
161
|
+
# def append(self,pane):
|
162
|
+
# self.report.append(pane)
|
163
|
+
|
164
|
+
# def __getstate__(self):
|
165
|
+
# state = self.__dict__.copy()
|
166
|
+
# # Don't pickle baz
|
167
|
+
# del state["pane"]
|
168
|
+
# return state
|
169
|
+
|
170
|
+
# def __setstate__(self, state):
|
171
|
+
# self.__dict__.update(state)
|
172
|
+
# # Add baz back since it doesn't exist in the pickle
|
173
|
+
# self.report = []
|
174
|
+
|
175
|
+
# def publish_old(
|
176
|
+
# self,
|
177
|
+
# directory: str = "bench_results",
|
178
|
+
# branch_name: str = "bench_results",
|
179
|
+
# url_postprocess: Callable = None,
|
180
|
+
# **kwargs,
|
181
|
+
# ) -> str:
|
182
|
+
# """Publish the results as an html file by committing it to the bench_results branch in the current repo. If you have set up your repo with github pages or equivalent then the html file will be served as a viewable webpage.
|
183
|
+
|
184
|
+
# Args:
|
185
|
+
# directory (str, optional): Directory to save the results. Defaults to "bench_results".
|
186
|
+
# branch_name (str, optional): Branch to publish on. Defaults to "bench_results".
|
187
|
+
# url_postprocess (Callable, optional): A function that maps the origin url to a github pages url. Pass your own function if you are using another git providers. Defaults to None.
|
188
|
+
|
189
|
+
# Returns:
|
190
|
+
# str: _description_
|
191
|
+
# """
|
192
|
+
|
193
|
+
# def get_output(cmd: str) -> str:
|
194
|
+
# return (
|
195
|
+
# subprocess.run(cmd.split(" "), stdout=subprocess.PIPE, check=False)
|
196
|
+
# .stdout.decode("utf=8")
|
197
|
+
# .strip()
|
198
|
+
# )
|
199
|
+
|
200
|
+
# def postprocess_url(publish_url: str, branch_name: str, report_path: str, **kwargs) -> str:
|
201
|
+
# # import re
|
202
|
+
|
203
|
+
# # return re.sub(
|
204
|
+
# # """((git|ssh|http(s)?)|(git@[\w\.-]+))(:(//)?)([\w\.@\:/\-~]+)(\.git)(/)?""",
|
205
|
+
# # """https://$7/""",
|
206
|
+
# # publish_url,
|
207
|
+
# # )
|
208
|
+
# # git@github.com:user/project.git
|
209
|
+
# # https://github.com/user/project.git
|
210
|
+
# # http://github.com/user/project.git
|
211
|
+
# # git@192.168.101.127:user/project.git
|
212
|
+
# # https://192.168.101.127/user/project.git
|
213
|
+
# # http://192.168.101.127/user/project.git
|
214
|
+
# # ssh://user@host.xz:port/path/to/repo.git/
|
215
|
+
# # ssh://user@host.xz/path/to/repo.git/
|
216
|
+
# # ssh://host.xz:port/path/to/repo.git/
|
217
|
+
# # ssh://host.xz/path/to/repo.git/
|
218
|
+
# # ssh://user@host.xz/path/to/repo.git/
|
219
|
+
# # ssh://host.xz/path/to/repo.git/
|
220
|
+
# # ssh://user@host.xz/~user/path/to/repo.git/
|
221
|
+
# # ssh://host.xz/~user/path/to/repo.git/
|
222
|
+
# # ssh://user@host.xz/~/path/to/repo.git
|
223
|
+
# # ssh://host.xz/~/path/to/repo.git
|
224
|
+
# # git://host.xz/path/to/repo.git/
|
225
|
+
# # git://host.xz/~user/path/to/repo.git/
|
226
|
+
# # http://host.xz/path/to/repo.git/
|
227
|
+
# # https://host.xz/path/to/repo.git/
|
228
|
+
# # https://regex101.com/r/qT7NP0/3
|
229
|
+
|
230
|
+
# return publish_url.replace(".git", f"/blob/{directory}/{report_path}")
|
231
|
+
|
232
|
+
# if url_postprocess is None:
|
233
|
+
# url_postprocess = postprocess_url
|
234
|
+
# current_branch = get_output("git symbolic-ref --short HEAD")
|
235
|
+
# logging.info(f"on branch: {current_branch}")
|
236
|
+
# stash_msg = get_output("git stash")
|
237
|
+
# logging.info(f"stashing current work :{stash_msg}")
|
238
|
+
# checkout_msg = get_output(f"git checkout -b {branch_name}")
|
239
|
+
# checkout_msg = get_output(f"git checkout {branch_name}")
|
240
|
+
# get_output("git pull")
|
241
|
+
|
242
|
+
# logging.info(f"checking out branch: {checkout_msg}")
|
243
|
+
# report_path = self.save(directory, in_html_folder=False)
|
244
|
+
# logging.info(f"created report at: {report_path.absolute()}")
|
245
|
+
# # commit_msg = f""
|
246
|
+
# logging.info("adding report to git")
|
247
|
+
# get_output(f"git add {report_path.absolute()}")
|
248
|
+
# get_output("git status")
|
249
|
+
# logging.info("committing report")
|
250
|
+
# cmd = f'git commit -m "generate_report:{self.bench_name}"'
|
251
|
+
# logging.info(cmd)
|
252
|
+
# get_output(cmd)
|
253
|
+
# logging.info("pushing report to origin")
|
254
|
+
# get_output(f"git push --set-upstream origin {branch_name}")
|
255
|
+
# logging.info("checking out original branch")
|
256
|
+
# get_output(f"git checkout {current_branch}")
|
257
|
+
# if "No local changes" not in stash_msg:
|
258
|
+
# logging.info("restoring work with git stash pop")
|
259
|
+
# get_output("git stash pop")
|
260
|
+
|
261
|
+
# publish_url = get_output("git remote get-url --push origin")
|
262
|
+
# logging.info(f"raw url:{publish_url}")
|
263
|
+
# publish_url = url_postprocess(
|
264
|
+
# publish_url, branch_name=branch_name, report_path=report_path, **kwargs
|
265
|
+
# )
|
266
|
+
# logging.info("Published report @")
|
267
|
+
# logging.info(publish_url)
|
268
|
+
# return publish_url
|
bencher/bench_runner.py
ADDED
@@ -0,0 +1,136 @@
|
|
1
|
+
from typing import Protocol, Callable, List
|
2
|
+
import logging
|
3
|
+
from bencher.bench_cfg import BenchRunCfg, BenchCfg
|
4
|
+
from bencher.variables.parametrised_sweep import ParametrizedSweep
|
5
|
+
from bencher.bencher import Bench
|
6
|
+
from bencher.bench_report import BenchReport
|
7
|
+
from copy import deepcopy
|
8
|
+
|
9
|
+
|
10
|
+
class Benchable(Protocol):
|
11
|
+
def bench(self, run_cfg: BenchRunCfg, report: BenchReport) -> BenchCfg:
|
12
|
+
raise NotImplementedError
|
13
|
+
|
14
|
+
|
15
|
+
class BenchRunner:
|
16
|
+
"""A class to manage running multiple benchmarks in groups, or running the same benchmark but at multiple resolutions"""
|
17
|
+
|
18
|
+
def __init__(
|
19
|
+
self,
|
20
|
+
name: str,
|
21
|
+
bench_class=None,
|
22
|
+
run_cfg: BenchRunCfg = BenchRunCfg(),
|
23
|
+
publisher: Callable = None,
|
24
|
+
) -> None:
|
25
|
+
self.name = name
|
26
|
+
self.run_cfg = BenchRunner.setup_run_cfg(run_cfg)
|
27
|
+
self.bench_fns = []
|
28
|
+
self.publisher = publisher
|
29
|
+
if bench_class is not None:
|
30
|
+
self.add_bench(bench_class)
|
31
|
+
self.results = []
|
32
|
+
self.servers = []
|
33
|
+
|
34
|
+
@staticmethod
|
35
|
+
def setup_run_cfg(
|
36
|
+
run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, use_cache=True
|
37
|
+
) -> BenchRunCfg:
|
38
|
+
run_cfg_out = deepcopy(run_cfg)
|
39
|
+
run_cfg_out.use_sample_cache = use_cache
|
40
|
+
run_cfg_out.only_hash_tag = use_cache
|
41
|
+
run_cfg_out.level = level
|
42
|
+
return run_cfg_out
|
43
|
+
|
44
|
+
@staticmethod
|
45
|
+
def from_parametrized_sweep(
|
46
|
+
class_instance: ParametrizedSweep,
|
47
|
+
run_cfg: BenchRunCfg = BenchRunCfg(),
|
48
|
+
report: BenchReport = BenchReport(),
|
49
|
+
):
|
50
|
+
return Bench(f"bench_{class_instance.name}", class_instance, run_cfg=run_cfg, report=report)
|
51
|
+
|
52
|
+
def add_run(self, bench_fn: Benchable) -> None:
|
53
|
+
self.bench_fns.append(bench_fn)
|
54
|
+
|
55
|
+
def add_bench(self, class_instance: ParametrizedSweep) -> None:
|
56
|
+
def cb(run_cfg: BenchRunCfg, report: BenchReport) -> BenchCfg:
|
57
|
+
bench = BenchRunner.from_parametrized_sweep(
|
58
|
+
class_instance, run_cfg=run_cfg, report=report
|
59
|
+
)
|
60
|
+
return bench.plot_sweep(f"bench_{class_instance.name}")
|
61
|
+
|
62
|
+
self.add_run(cb)
|
63
|
+
|
64
|
+
def run(
|
65
|
+
self,
|
66
|
+
min_level: int = 2,
|
67
|
+
max_level: int = 6,
|
68
|
+
level: int = None,
|
69
|
+
repeats: int = 1,
|
70
|
+
run_cfg: BenchRunCfg = None,
|
71
|
+
publish: bool = False,
|
72
|
+
debug: bool = False,
|
73
|
+
show: bool = False,
|
74
|
+
save: bool = False,
|
75
|
+
grouped: bool = True,
|
76
|
+
use_cache: bool = True,
|
77
|
+
) -> List[Bench]:
|
78
|
+
"""This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default use_cache=True so that previous values are reused.
|
79
|
+
|
80
|
+
Args:
|
81
|
+
min_level (int, optional): The minimum level to start sampling at. Defaults to 2.
|
82
|
+
max_level (int, optional): The maximum level to sample up to. Defaults to 6.
|
83
|
+
level (int, optional): If this is set, then min_level and max_level are not used and only a single level is sampled. Defaults to None.
|
84
|
+
repeats (int, optional): The number of times to run the entire benchmarking procedure. Defaults to 1.
|
85
|
+
run_cfg (BenchRunCfg, optional): benchmark run configuration. Defaults to None.
|
86
|
+
publish (bool, optional): Publish the results to git, requires a publish url to be set up. Defaults to False.
|
87
|
+
debug (bool, optional): _description_. Defaults to False.
|
88
|
+
show (bool, optional): show the results in the local web browswer. Defaults to False.
|
89
|
+
save (bool, optional): save the results to disk in index.html. Defaults to False.
|
90
|
+
grouped (bool, optional): Produce a single html page with all the benchmarks included. Defaults to True.
|
91
|
+
use_cache (bool, optional): Use the sample cache to reused previous results. Defaults to True.
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
List[BenchCfg]: A list of bencher instances
|
95
|
+
"""
|
96
|
+
if run_cfg is None:
|
97
|
+
run_cfg = deepcopy(self.run_cfg)
|
98
|
+
run_cfg = BenchRunner.setup_run_cfg(run_cfg, use_cache=use_cache)
|
99
|
+
|
100
|
+
if level is not None:
|
101
|
+
min_level = level
|
102
|
+
max_level = level
|
103
|
+
for r in range(1, repeats + 1):
|
104
|
+
for lvl in range(min_level, max_level + 1):
|
105
|
+
if grouped:
|
106
|
+
report_level = BenchReport(self.name)
|
107
|
+
|
108
|
+
for bch_fn in self.bench_fns:
|
109
|
+
run_lvl = deepcopy(run_cfg)
|
110
|
+
run_lvl.level = lvl
|
111
|
+
run_lvl.repeats = r
|
112
|
+
logging.info(f"Running {bch_fn} at level: {lvl} with repeats:{r}")
|
113
|
+
if grouped:
|
114
|
+
res = bch_fn(run_lvl, report_level)
|
115
|
+
else:
|
116
|
+
res = bch_fn(run_lvl, BenchReport())
|
117
|
+
self.show_publish(res.report, show, publish, save, debug)
|
118
|
+
self.results.append(res)
|
119
|
+
if grouped:
|
120
|
+
self.show_publish(report_level, show, publish, save, debug)
|
121
|
+
return self.results
|
122
|
+
|
123
|
+
def show_publish(self, report, show, publish, save, debug):
|
124
|
+
if save:
|
125
|
+
report.save_index()
|
126
|
+
if publish and self.publisher is not None:
|
127
|
+
report.publish(remote_callback=self.publisher, debug=debug)
|
128
|
+
if show:
|
129
|
+
self.servers.append(report.show())
|
130
|
+
|
131
|
+
def shutdown(self):
|
132
|
+
while self.servers:
|
133
|
+
self.servers.pop().stop()
|
134
|
+
|
135
|
+
def __del__(self) -> None:
|
136
|
+
self.shutdown()
|