holobench 1.3.6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. bencher/__init__.py +41 -0
  2. bencher/bench_cfg.py +462 -0
  3. bencher/bench_plot_server.py +100 -0
  4. bencher/bench_report.py +268 -0
  5. bencher/bench_runner.py +136 -0
  6. bencher/bencher.py +805 -0
  7. bencher/caching.py +51 -0
  8. bencher/example/__init__.py +0 -0
  9. bencher/example/benchmark_data.py +200 -0
  10. bencher/example/example_all.py +45 -0
  11. bencher/example/example_categorical.py +99 -0
  12. bencher/example/example_custom_sweep.py +59 -0
  13. bencher/example/example_docs.py +34 -0
  14. bencher/example/example_float3D.py +101 -0
  15. bencher/example/example_float_cat.py +98 -0
  16. bencher/example/example_floats.py +89 -0
  17. bencher/example/example_floats2D.py +93 -0
  18. bencher/example/example_holosweep.py +104 -0
  19. bencher/example/example_holosweep_objects.py +111 -0
  20. bencher/example/example_holosweep_tap.py +144 -0
  21. bencher/example/example_image.py +82 -0
  22. bencher/example/example_levels.py +181 -0
  23. bencher/example/example_pareto.py +53 -0
  24. bencher/example/example_sample_cache.py +85 -0
  25. bencher/example/example_sample_cache_context.py +116 -0
  26. bencher/example/example_simple.py +134 -0
  27. bencher/example/example_simple_bool.py +34 -0
  28. bencher/example/example_simple_cat.py +47 -0
  29. bencher/example/example_simple_float.py +38 -0
  30. bencher/example/example_strings.py +46 -0
  31. bencher/example/example_time_event.py +62 -0
  32. bencher/example/example_video.py +124 -0
  33. bencher/example/example_workflow.py +189 -0
  34. bencher/example/experimental/example_bokeh_plotly.py +38 -0
  35. bencher/example/experimental/example_hover_ex.py +45 -0
  36. bencher/example/experimental/example_hvplot_explorer.py +39 -0
  37. bencher/example/experimental/example_interactive.py +75 -0
  38. bencher/example/experimental/example_streamnd.py +49 -0
  39. bencher/example/experimental/example_streams.py +36 -0
  40. bencher/example/experimental/example_template.py +40 -0
  41. bencher/example/experimental/example_updates.py +84 -0
  42. bencher/example/experimental/example_vector.py +84 -0
  43. bencher/example/meta/example_meta.py +171 -0
  44. bencher/example/meta/example_meta_cat.py +25 -0
  45. bencher/example/meta/example_meta_float.py +23 -0
  46. bencher/example/meta/example_meta_levels.py +26 -0
  47. bencher/example/optuna/example_optuna.py +78 -0
  48. bencher/example/shelved/example_float2D_scatter.py +109 -0
  49. bencher/example/shelved/example_float3D_cone.py +96 -0
  50. bencher/example/shelved/example_kwargs.py +63 -0
  51. bencher/job.py +184 -0
  52. bencher/optuna_conversions.py +168 -0
  53. bencher/plotting/__init__.py +0 -0
  54. bencher/plotting/plot_filter.py +110 -0
  55. bencher/plotting/plt_cnt_cfg.py +74 -0
  56. bencher/results/__init__.py +0 -0
  57. bencher/results/bench_result.py +80 -0
  58. bencher/results/bench_result_base.py +405 -0
  59. bencher/results/float_formatter.py +44 -0
  60. bencher/results/holoview_result.py +592 -0
  61. bencher/results/optuna_result.py +354 -0
  62. bencher/results/panel_result.py +113 -0
  63. bencher/results/plotly_result.py +65 -0
  64. bencher/utils.py +148 -0
  65. bencher/variables/inputs.py +193 -0
  66. bencher/variables/parametrised_sweep.py +206 -0
  67. bencher/variables/results.py +176 -0
  68. bencher/variables/sweep_base.py +167 -0
  69. bencher/variables/time.py +74 -0
  70. bencher/video_writer.py +30 -0
  71. bencher/worker_job.py +40 -0
  72. holobench-1.3.6.dist-info/METADATA +85 -0
  73. holobench-1.3.6.dist-info/RECORD +74 -0
  74. holobench-1.3.6.dist-info/WHEEL +5 -0
bencher/__init__.py ADDED
@@ -0,0 +1,41 @@
1
+ from .bencher import Bench, BenchCfg, BenchRunCfg
2
+ from .bench_runner import BenchRunner
3
+ from .example.benchmark_data import ExampleBenchCfgIn, ExampleBenchCfgOut, bench_function
4
+ from .bench_plot_server import BenchPlotServer
5
+ from .variables.sweep_base import hash_sha1
6
+ from .variables.inputs import IntSweep, FloatSweep, StringSweep, EnumSweep, BoolSweep, SweepBase
7
+ from .variables.time import TimeSnapshot
8
+
9
+ from .variables.inputs import box
10
+ from .variables.results import (
11
+ ResultVar,
12
+ ResultVec,
13
+ ResultHmap,
14
+ ResultVideo,
15
+ ResultImage,
16
+ ResultString,
17
+ ResultContainer,
18
+ ResultReference,
19
+ ResultVolume,
20
+ OptDir,
21
+ curve,
22
+ )
23
+
24
+ from .plotting.plot_filter import VarRange, PlotFilter
25
+ from .utils import (
26
+ hmap_canonical_input,
27
+ get_nearest_coords,
28
+ make_namedtuple,
29
+ gen_path,
30
+ gen_image_path,
31
+ gen_video_path,
32
+ lerp,
33
+ )
34
+ from .variables.parametrised_sweep import ParametrizedSweep
35
+ from .caching import CachedParams
36
+ from .results.bench_result import BenchResult
37
+ from .results.panel_result import PanelResult
38
+ from .results.holoview_result import ReduceType, HoloviewResult
39
+ from .bench_report import BenchReport
40
+ from .job import Executors
41
+ from .video_writer import VideoWriter, add_image
bencher/bench_cfg.py ADDED
@@ -0,0 +1,462 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import logging
5
+
6
+ from typing import List
7
+
8
+ import param
9
+ from str2bool import str2bool
10
+ import panel as pn
11
+
12
+
13
+ from bencher.variables.sweep_base import hash_sha1, describe_variable
14
+ from bencher.variables.time import TimeSnapshot, TimeEvent
15
+ from bencher.variables.results import OptDir
16
+ from bencher.job import Executors
17
+ from datetime import datetime
18
+
19
+ # from bencher.results.bench_result import BenchResult
20
+
21
+
22
+ class BenchPlotSrvCfg(param.Parameterized):
23
+ port: int = param.Integer(None, doc="The port to launch panel with")
24
+ allow_ws_origin = param.Boolean(
25
+ False,
26
+ doc="Add the port to the whilelist, (warning will disable remote access if set to true)",
27
+ )
28
+ show: bool = param.Boolean(True, doc="Open the served page in a web browser")
29
+
30
+
31
+ class BenchRunCfg(BenchPlotSrvCfg):
32
+ """A Class to store options for how to run a benchmark parameter sweep"""
33
+
34
+ repeats: bool = param.Integer(1, doc="The number of times to sample the inputs")
35
+
36
+ over_time: bool = param.Boolean(
37
+ False,
38
+ doc="If true each time the function is called it will plot a timeseries of historical and the latest result.",
39
+ )
40
+
41
+ debug: bool = param.Boolean(
42
+ False, doc="Debug the sampling faster by reducing the dimension sampling resolution"
43
+ )
44
+
45
+ use_optuna: bool = param.Boolean(False, doc="show optuna plots")
46
+
47
+ summarise_constant_inputs = param.Boolean(
48
+ True, doc="Print the inputs that are kept constant when describing the sweep parameters"
49
+ )
50
+
51
+ print_bench_inputs: bool = param.Boolean(
52
+ True, doc="Print the inputs to the benchmark function every time it is called"
53
+ )
54
+
55
+ print_bench_results: bool = param.Boolean(
56
+ True, doc="Print the results of the benchmark function every time it is called"
57
+ )
58
+
59
+ clear_history: bool = param.Boolean(False, doc="Clear historical results")
60
+
61
+ print_pandas: bool = param.Boolean(
62
+ False, doc="Print a pandas summary of the results to the console."
63
+ )
64
+
65
+ print_xarray: bool = param.Boolean(
66
+ False, doc="Print an xarray summary of the results to the console"
67
+ )
68
+
69
+ serve_pandas: bool = param.Boolean(
70
+ False,
71
+ doc="Serve a pandas summary on the results webpage. If you have a large dataset consider setting this to false if the page loading is slow",
72
+ )
73
+
74
+ serve_pandas_flat: bool = param.Boolean(
75
+ True,
76
+ doc="Serve a flattend pandas summary on the results webpage. If you have a large dataset consider setting this to false if the page loading is slow",
77
+ )
78
+
79
+ serve_xarray: bool = param.Boolean(
80
+ False,
81
+ doc="Serve an xarray summary on the results webpage. If you have a large dataset consider setting this to false if the page loading is slow",
82
+ )
83
+
84
+ auto_plot: bool = param.Boolean(
85
+ True, doc=" Automaticlly dedeuce the best type of plot for the results."
86
+ )
87
+
88
+ raise_duplicate_exception: bool = param.Boolean(False, doc=" Used to debug unique plot names.")
89
+
90
+ use_cache: bool = param.Boolean(
91
+ False,
92
+ doc="This is a benchmark level cache that stores the results of a fully completed benchmark. At the end of a benchmark the values are added to the cache but are not if the benchmark does not complete. If you want to cache values during the benchmark you need to use the use_sample_cache option. Beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
93
+ )
94
+
95
+ clear_cache: bool = param.Boolean(
96
+ False, doc=" Clear the cache of saved input->output mappings."
97
+ )
98
+
99
+ use_sample_cache: bool = param.Boolean(
100
+ False,
101
+ doc="If true, every time the benchmark function is called, bencher will check if that value has been calculated before and if so load the from the cache. Note that the sample level cache is different from the benchmark level cache which only caches the aggregate of all the results at the end of the benchmark. This cache lets you stop a benchmark halfway through and continue. However, beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
102
+ )
103
+
104
+ only_hash_tag: bool = param.Boolean(
105
+ False,
106
+ doc="By default when checking if a sample has been calculated before it includes the hash of the greater benchmarking context. This is safer because it means that data generated from one benchmark will not affect data from another benchmark. However, if you are careful it can be more flexible to ignore which benchmark generated the data and only use the tag hash to check if that data has been calculated before. ie, you can create two benchmarks that sample a subset of the problem during exploration and give them the same tag, and then afterwards create a larger benchmark that covers the cases you already explored. If this value is true, the combined benchmark will use any data from other benchmarks with the same tag.",
107
+ )
108
+
109
+ clear_sample_cache: bool = param.Boolean(
110
+ False,
111
+ doc="Clears the per-sample cache. Use this if you get unexpected behavior. The per_sample cache is tagged by the specific benchmark it was sampled from. So clearing the cache of one benchmark will not clear the cache of other benchmarks.",
112
+ )
113
+
114
+ overwrite_sample_cache: bool = param.Boolean(
115
+ False,
116
+ doc="If True, recalculate the value and overwrite the value stored in the sample cache",
117
+ )
118
+
119
+ only_plot: bool = param.Boolean(
120
+ False, doc="Do not attempt to calculate benchmarks if no results are found in the cache"
121
+ )
122
+
123
+ use_holoview: bool = param.Boolean(False, doc="Use holoview for plotting")
124
+
125
+ nightly: bool = param.Boolean(
126
+ False, doc="Run a more extensive set of tests for a nightly benchmark"
127
+ )
128
+
129
+ time_event: str = param.String(
130
+ None,
131
+ doc="A string representation of a sequence over time, i.e. datetime, pull request number, or run number",
132
+ )
133
+
134
+ headless: bool = param.Boolean(False, doc="Run the benchmarks headlessly")
135
+
136
+ render_plotly = param.Boolean(
137
+ True,
138
+ doc="Plotly and Bokeh don't play nicely together, so by default pre-render plotly figures to a non dynamic version so that bokeh plots correctly. If you want interactive 3D graphs, set this to true but be aware that your 2D interactive graphs will probalby stop working.",
139
+ )
140
+
141
+ level = param.Integer(
142
+ default=0,
143
+ bounds=[0, 12],
144
+ doc="The level parameter is a method of defining the number samples to sweep over in a variable agnostic way, i.e you don't need to specficy the number of samples for each variable as they are calculated dynamically from the sampling level. See example_level.py for more information.",
145
+ )
146
+
147
+ run_tag = param.String(
148
+ default="",
149
+ doc="Define a tag for a run to isolate the results stored in the cache from other runs",
150
+ )
151
+
152
+ run_date = param.Date(
153
+ default=datetime.now(),
154
+ doc="The date the bench run was performed",
155
+ )
156
+
157
+ # parallel = param.Boolean(
158
+ # default=False,
159
+ # doc="Run the sweep in parallel. Warning! You need to make sure your code is threadsafe before using this option",
160
+ # )
161
+
162
+ executor = param.Selector(
163
+ objects=list(Executors),
164
+ doc="The function can be run serially or in parallel with different futures executors",
165
+ )
166
+
167
+ plot_size = param.Integer(default=None, doc="Sets the width and height of the plot")
168
+ plot_width = param.Integer(
169
+ default=None,
170
+ doc="Sets with width of the plots, this will ovverride the plot_size parameter",
171
+ )
172
+ plot_height = param.Integer(
173
+ default=None, doc="Sets the height of the plot, this will ovverride the plot_size parameter"
174
+ )
175
+
176
+ @staticmethod
177
+ def from_cmd_line() -> BenchRunCfg: # pragma: no cover
178
+ """create a BenchRunCfg by parsing command line arguments
179
+
180
+ Returns:
181
+ parsed args: parsed args
182
+ """
183
+
184
+ parser = argparse.ArgumentParser(description="benchmark")
185
+
186
+ parser.add_argument(
187
+ "--use-cache",
188
+ action="store_true",
189
+ help=BenchRunCfg.param.use_cache.doc,
190
+ )
191
+
192
+ parser.add_argument(
193
+ "--only-plot",
194
+ action="store_true",
195
+ help=BenchRunCfg.param.only_plot.doc,
196
+ )
197
+
198
+ parser.add_argument(
199
+ "--port",
200
+ type=int,
201
+ help=BenchRunCfg.param.port.doc,
202
+ )
203
+
204
+ parser.add_argument(
205
+ "--nightly",
206
+ type=lambda b: bool(str2bool(b)),
207
+ nargs="?",
208
+ const=False,
209
+ default=False,
210
+ help="turn on nightly benchmarking",
211
+ )
212
+
213
+ parser.add_argument(
214
+ "--time_event",
215
+ type=str,
216
+ default=BenchRunCfg.param.time_event.default,
217
+ help=BenchRunCfg.param.time_event.doc,
218
+ )
219
+
220
+ parser.add_argument(
221
+ "--repeats",
222
+ type=int,
223
+ default=BenchRunCfg.param.repeats.default,
224
+ help=BenchRunCfg.param.repeats.doc,
225
+ )
226
+
227
+ return BenchRunCfg(**vars(parser.parse_args()))
228
+
229
+
230
+ class BenchCfg(BenchRunCfg):
231
+ """A class for storing the arguments to configure a benchmark protocol If the inputs variables are the same the class should return the same hash and same filename. This is so that historical data can be referenced and ensures that the generated plots are unique per benchmark"""
232
+
233
+ input_vars = param.List(
234
+ default=None,
235
+ doc="A list of ParameterizedSweep variables to perform a parameter sweep over",
236
+ )
237
+ result_vars = param.List(
238
+ default=None,
239
+ doc="A list of ParameterizedSweep results collect and plot.",
240
+ )
241
+
242
+ const_vars = param.List(
243
+ default=None,
244
+ doc="Variables to keep constant but are different from the default value",
245
+ )
246
+
247
+ result_hmaps = param.List(default=None, doc="a list of holomap results")
248
+
249
+ meta_vars = param.List(
250
+ default=None,
251
+ doc="Meta variables such as recording time and repeat id",
252
+ )
253
+ all_vars = param.List(
254
+ default=None,
255
+ doc="Stores a list of both the input_vars and meta_vars that are used to define a unique hash for the input",
256
+ )
257
+ iv_time = param.List(
258
+ default=[],
259
+ item_type=TimeSnapshot | TimeEvent,
260
+ doc="A parameter to represent the sampling the same inputs over time as a scalar type",
261
+ )
262
+
263
+ iv_time_event = param.List(
264
+ default=[],
265
+ item_type=TimeEvent,
266
+ doc="A parameter to represent the sampling the same inputs over time as a discrete type",
267
+ )
268
+
269
+ over_time: param.Boolean(
270
+ False, doc="A parameter to control whether the function is sampled over time"
271
+ )
272
+ name: str = param.String(None, doc="The name of the benchmarkCfg")
273
+ title: str = param.String(None, doc="The title of the benchmark")
274
+ raise_duplicate_exception: str = param.Boolean(
275
+ False, doc="Use this while debugging if filename generation is unique"
276
+ )
277
+ bench_name: str = param.String(
278
+ None, doc="The name of the benchmark and the name of the save folder"
279
+ )
280
+ description: str = param.String(
281
+ None,
282
+ doc="A place to store a longer description of the function of the benchmark",
283
+ )
284
+ post_description: str = param.String(None, doc="A place to comment on the output of the graphs")
285
+
286
+ has_results: bool = param.Boolean(
287
+ False,
288
+ doc="If this config has results, true, otherwise used to store titles and other bench metadata",
289
+ )
290
+
291
+ pass_repeat: bool = param.Boolean(
292
+ False,
293
+ doc="By default do not pass the kwarg 'repeat' to the benchmark function. Set to true if you want the benchmark function to be passed the repeat number",
294
+ )
295
+
296
+ tag: str = param.String(
297
+ "",
298
+ doc="Use tags to group different benchmarks together. By default benchmarks are considered distinct from eachother and are identified by the hash of their name and inputs, constants and results and tag, but you can optionally change the hash value to only depend on the tag. This way you can have multiple unrelated benchmarks share values with eachother based only on the tag value.",
299
+ )
300
+
301
+ hash_value: str = param.String(
302
+ "",
303
+ doc="store the hash value of the config to avoid having to hash multiple times",
304
+ )
305
+
306
+ def __init__(self, **params):
307
+ super().__init__(**params)
308
+ self.plot_lib = None
309
+ self.hmap_kdims = None
310
+ self.iv_repeat = None
311
+
312
+ def hash_persistent(self, include_repeats) -> str:
313
+ """override the default hash function becuase the default hash function does not return the same value for the same inputs. It references internal variables that are unique per instance of BenchCfg
314
+
315
+ Args:
316
+ include_repeats (bool) : by default include repeats as part of the hash execpt with using the sample cache
317
+ """
318
+
319
+ if include_repeats:
320
+ # needed so that the historical xarray arrays are the same size
321
+ repeats_hash = hash_sha1(self.repeats)
322
+ else:
323
+ repeats_hash = 0
324
+
325
+ hash_val = hash_sha1(
326
+ (
327
+ hash_sha1(str(self.bench_name)),
328
+ hash_sha1(str(self.title)),
329
+ hash_sha1(self.over_time),
330
+ repeats_hash,
331
+ hash_sha1(self.debug),
332
+ hash_sha1(self.tag),
333
+ )
334
+ )
335
+ all_vars = self.input_vars + self.result_vars
336
+ for v in all_vars:
337
+ hash_val = hash_sha1((hash_val, v.hash_persistent()))
338
+
339
+ for v in self.const_vars:
340
+ hash_val = hash_sha1((v[0].hash_persistent(), hash_sha1(v[1])))
341
+
342
+ return hash_val
343
+
344
+ def inputs_as_str(self) -> List[str]:
345
+ return [i.name for i in self.input_vars]
346
+
347
+ def describe_sweep(self, width: int = 800) -> pn.pane.Markdown:
348
+ """Produce a markdown summary of the sweep settings"""
349
+ return pn.pane.Markdown(self.describe_benchmark(), width=width)
350
+
351
+ def describe_benchmark(self) -> str:
352
+ """Generate a string summary of the inputs and results from a BenchCfg
353
+
354
+ Returns:
355
+ str: summary of BenchCfg
356
+ """
357
+ benchmark_sampling_str = ["```text"]
358
+ benchmark_sampling_str.append("")
359
+
360
+ benchmark_sampling_str.append("Input Variables:")
361
+ for iv in self.input_vars:
362
+ benchmark_sampling_str.extend(describe_variable(iv, self.debug, True))
363
+
364
+ if self.const_vars and (self.summarise_constant_inputs):
365
+ benchmark_sampling_str.append("\nConstants:")
366
+ for cv in self.const_vars:
367
+ benchmark_sampling_str.extend(describe_variable(cv[0], False, False, cv[1]))
368
+
369
+ benchmark_sampling_str.append("\nResult Variables:")
370
+ for rv in self.result_vars:
371
+ benchmark_sampling_str.extend(describe_variable(rv, self.debug, False))
372
+
373
+ print_meta = True
374
+ # if len(self.meta_vars) == 1:
375
+ # mv = self.meta_vars[0]
376
+ # if mv.name == "repeat" and mv.samples == 1:
377
+ # print_meta = False
378
+
379
+ if print_meta:
380
+ benchmark_sampling_str.append("\nMeta Variables:")
381
+ benchmark_sampling_str.append(f" run date: {self.run_date}")
382
+ if self.run_tag is not None and len(self.run_tag) > 0:
383
+ benchmark_sampling_str.append(f" run tag: {self.run_tag}")
384
+ if self.level is not None:
385
+ benchmark_sampling_str.append(f" bench level: {self.level}")
386
+ benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
387
+ benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
388
+ benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
389
+ benchmark_sampling_str.append(f" parallel: {self.executor}")
390
+
391
+ for mv in self.meta_vars:
392
+ benchmark_sampling_str.extend(describe_variable(mv, self.debug, True))
393
+
394
+ benchmark_sampling_str.append("```")
395
+
396
+ benchmark_sampling_str = "\n".join(benchmark_sampling_str)
397
+ return benchmark_sampling_str
398
+
399
+ def to_title(self, panel_name: str = None) -> pn.pane.Markdown:
400
+ if panel_name is None:
401
+ panel_name = self.title
402
+ return pn.pane.Markdown(f"# {self.title}", name=panel_name)
403
+
404
+ def to_description(self, width: int = 800) -> pn.pane.Markdown:
405
+ return pn.pane.Markdown(f"{self.description}", width=width)
406
+
407
+ def to_post_description(self, width: int = 800) -> pn.pane.Markdown:
408
+ return pn.pane.Markdown(f"{self.post_description}", width=width)
409
+
410
+ def to_sweep_summary(
411
+ self,
412
+ name=None,
413
+ description=True,
414
+ describe_sweep=True,
415
+ results_suffix=True,
416
+ title: bool = True,
417
+ ) -> pn.pane.Markdown:
418
+ """Produce panel output summarising the title, description and sweep setting"""
419
+ if name is None:
420
+ if title:
421
+ name = self.title
422
+ else:
423
+ name = "Data Collection Parameters"
424
+ col = pn.Column(name=name)
425
+ if title:
426
+ col.append(self.to_title())
427
+ if self.description is not None and description:
428
+ col.append(self.to_description())
429
+ if describe_sweep:
430
+ col.append(pn.Accordion(("Data Collection Parameters", self.describe_sweep())))
431
+ if results_suffix:
432
+ col.append(pn.pane.Markdown("## Results:"))
433
+ return col
434
+
435
+ def optuna_targets(self, as_var=False) -> List[str]:
436
+ targets = []
437
+ for rv in self.result_vars:
438
+ if hasattr(rv, "direction") and rv.direction != OptDir.none:
439
+ if as_var:
440
+ targets.append(rv)
441
+ else:
442
+ targets.append(rv.name)
443
+ return targets
444
+
445
+
446
+ class DimsCfg:
447
+ """A class to store data about the sampling and result dimensions"""
448
+
449
+ def __init__(self, bench_cfg: BenchCfg) -> None:
450
+ self.dims_name = [i.name for i in bench_cfg.all_vars]
451
+
452
+ self.dim_ranges = []
453
+ self.dim_ranges = [i.values(bench_cfg.debug) for i in bench_cfg.all_vars]
454
+ self.dims_size = [len(p) for p in self.dim_ranges]
455
+ self.dim_ranges_index = [list(range(i)) for i in self.dims_size]
456
+ self.dim_ranges_str = [f"{s}\n" for s in self.dim_ranges]
457
+ self.coords = dict(zip(self.dims_name, self.dim_ranges))
458
+
459
+ logging.debug(f"dims_name: {self.dims_name}")
460
+ logging.debug(f"dim_ranges {self.dim_ranges_str}")
461
+ logging.debug(f"dim_ranges_index {self.dim_ranges_index}")
462
+ logging.debug(f"coords: {self.coords}")
@@ -0,0 +1,100 @@
1
+ """A server for display plots of benchmark results"""
2
+ import logging
3
+ import os
4
+ from typing import List, Tuple
5
+ from threading import Thread
6
+
7
+ import panel as pn
8
+ from diskcache import Cache
9
+
10
+ from bencher.bench_cfg import BenchCfg, BenchPlotSrvCfg
11
+
12
+ logging.basicConfig(level=logging.INFO)
13
+
14
+
15
+ class BenchPlotServer:
16
+ """A server for display plots of benchmark results"""
17
+
18
+ def __init__(self) -> None:
19
+ """Create a new BenchPlotServer object"""
20
+
21
+ def plot_server(
22
+ self, bench_name: str, plot_cfg: BenchPlotSrvCfg = BenchPlotSrvCfg(), plots_instance=None
23
+ ) -> Thread:
24
+ """Load previously calculated benchmark data from the database and start a plot server to display it
25
+
26
+ Args:
27
+ bench_name (str): The name of the benchmark and output folder for the figures
28
+ plot_cfg (BenchPlotSrvCfg, optional): Options for the plot server. Defaults to BenchPlotSrvCfg().
29
+
30
+ Raises:
31
+ FileNotFoundError: No data found was found in the database to plot
32
+ """
33
+
34
+ if plots_instance is None:
35
+ plots_instance = self.load_data_from_cache(bench_name)
36
+ if plot_cfg.port is not None and plot_cfg.allow_ws_origin:
37
+ os.environ["BOKEH_ALLOW_WS_ORIGIN"] = f"localhost:{plot_cfg.port}"
38
+
39
+ return self.serve(bench_name, plots_instance, port=plot_cfg.port, show=plot_cfg.show)
40
+
41
+ def load_data_from_cache(self, bench_name: str) -> Tuple[BenchCfg, List[pn.panel]] | None:
42
+ """Load previously calculated benchmark data from the database and start a plot server to display it
43
+
44
+ Args:
45
+ bench_name (str): The name of the benchmark and output folder for the figures
46
+
47
+ Returns:
48
+ Tuple[BenchCfg, List[pn.panel]] | None: benchmark result data and any additional panels
49
+
50
+ Raises:
51
+ FileNotFoundError: No data found was found in the database to plot
52
+ """
53
+
54
+ with Cache("cachedir/benchmark_inputs") as cache:
55
+ if bench_name in cache:
56
+ logging.info(f"loading benchmarks: {bench_name}")
57
+ # use the benchmark name to look up the hash of the results
58
+ bench_cfg_hashes = cache[bench_name]
59
+ plots_instance = None
60
+ for bench_cfg_hash in bench_cfg_hashes:
61
+ # load the results based on the hash retrieved from the benchmark name
62
+ if bench_cfg_hash in cache:
63
+ logging.info(f"loading cached results from key: {bench_cfg_hash}")
64
+ bench_res = cache[bench_cfg_hash]
65
+ logging.info(f"loaded: {bench_res.bench_cfg.title}")
66
+
67
+ plots_instance = bench_res.to_auto_plots()
68
+ else:
69
+ raise FileNotFoundError(
70
+ "The benchmarks have been run and saved, but the specific results you are trying to load do not exist. This should not happen and could be because the cache was cleared."
71
+ )
72
+ return plots_instance
73
+ raise FileNotFoundError(
74
+ "This benchmark name does not exist in the results cache. Was not able to load the results to plot! Make sure to run the bencher to generate and save results to the cache"
75
+ )
76
+
77
+ def serve(
78
+ self, bench_name: str, plots_instance: List[pn.panel], port: int = None, show: bool = True
79
+ ) -> Thread:
80
+ """Launch a panel server to view results
81
+
82
+
83
+ Args:
84
+ bench_cfg (BenchCfg): benchmark results
85
+ plots_instance (List[pn.panel]): list of panel objects to display
86
+ port (int): use a fixed port to lauch the server
87
+ """
88
+
89
+ if port is not None:
90
+ return pn.serve(
91
+ plots_instance,
92
+ title=bench_name,
93
+ websocket_origin=["*"],
94
+ port=port,
95
+ threaded=True,
96
+ show=show,
97
+ )
98
+
99
+ logging.getLogger().setLevel(logging.WARNING)
100
+ return pn.serve(plots_instance, title=bench_name, threaded=True, show=show)