holobench 1.3.4__py3-none-any.whl → 1.22.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. bencher/__init__.py +4 -1
  2. bencher/bench_cfg.py +37 -34
  3. bencher/bench_plot_server.py +5 -1
  4. bencher/bench_report.py +14 -14
  5. bencher/bench_runner.py +2 -1
  6. bencher/bencher.py +87 -50
  7. bencher/class_enum.py +52 -0
  8. bencher/job.py +6 -4
  9. bencher/optuna_conversions.py +1 -1
  10. bencher/utils.py +58 -3
  11. bencher/video_writer.py +110 -6
  12. holobench-1.22.2.data/data/share/bencher/package.xml +33 -0
  13. holobench-1.22.2.dist-info/LICENSE +21 -0
  14. {holobench-1.3.4.dist-info → holobench-1.22.2.dist-info}/METADATA +39 -32
  15. holobench-1.22.2.dist-info/RECORD +20 -0
  16. {holobench-1.3.4.dist-info → holobench-1.22.2.dist-info}/WHEEL +2 -1
  17. holobench-1.22.2.dist-info/top_level.txt +1 -0
  18. bencher/example/benchmark_data.py +0 -200
  19. bencher/example/example_all.py +0 -45
  20. bencher/example/example_categorical.py +0 -99
  21. bencher/example/example_custom_sweep.py +0 -59
  22. bencher/example/example_docs.py +0 -34
  23. bencher/example/example_float3D.py +0 -101
  24. bencher/example/example_float_cat.py +0 -98
  25. bencher/example/example_floats.py +0 -89
  26. bencher/example/example_floats2D.py +0 -93
  27. bencher/example/example_holosweep.py +0 -104
  28. bencher/example/example_holosweep_objects.py +0 -111
  29. bencher/example/example_holosweep_tap.py +0 -144
  30. bencher/example/example_image.py +0 -82
  31. bencher/example/example_levels.py +0 -181
  32. bencher/example/example_pareto.py +0 -53
  33. bencher/example/example_sample_cache.py +0 -85
  34. bencher/example/example_sample_cache_context.py +0 -116
  35. bencher/example/example_simple.py +0 -134
  36. bencher/example/example_simple_bool.py +0 -34
  37. bencher/example/example_simple_cat.py +0 -47
  38. bencher/example/example_simple_float.py +0 -38
  39. bencher/example/example_strings.py +0 -46
  40. bencher/example/example_time_event.py +0 -62
  41. bencher/example/example_video.py +0 -98
  42. bencher/example/example_workflow.py +0 -189
  43. bencher/example/experimental/example_bokeh_plotly.py +0 -38
  44. bencher/example/experimental/example_hover_ex.py +0 -45
  45. bencher/example/experimental/example_hvplot_explorer.py +0 -39
  46. bencher/example/experimental/example_interactive.py +0 -75
  47. bencher/example/experimental/example_streamnd.py +0 -49
  48. bencher/example/experimental/example_streams.py +0 -36
  49. bencher/example/experimental/example_template.py +0 -40
  50. bencher/example/experimental/example_updates.py +0 -84
  51. bencher/example/experimental/example_vector.py +0 -84
  52. bencher/example/meta/example_meta.py +0 -171
  53. bencher/example/meta/example_meta_cat.py +0 -25
  54. bencher/example/meta/example_meta_float.py +0 -23
  55. bencher/example/meta/example_meta_levels.py +0 -26
  56. bencher/example/optuna/example_optuna.py +0 -78
  57. bencher/example/shelved/example_float2D_scatter.py +0 -109
  58. bencher/example/shelved/example_float3D_cone.py +0 -96
  59. bencher/example/shelved/example_kwargs.py +0 -63
  60. bencher/plotting/__init__.py +0 -0
  61. bencher/plotting/plot_filter.py +0 -110
  62. bencher/plotting/plt_cnt_cfg.py +0 -74
  63. bencher/results/__init__.py +0 -0
  64. bencher/results/bench_result.py +0 -83
  65. bencher/results/bench_result_base.py +0 -401
  66. bencher/results/float_formatter.py +0 -44
  67. bencher/results/holoview_result.py +0 -535
  68. bencher/results/optuna_result.py +0 -332
  69. bencher/results/panel_result.py +0 -113
  70. bencher/results/plotly_result.py +0 -65
  71. bencher/variables/inputs.py +0 -193
  72. bencher/variables/parametrised_sweep.py +0 -206
  73. bencher/variables/results.py +0 -176
  74. bencher/variables/sweep_base.py +0 -167
  75. bencher/variables/time.py +0 -74
  76. holobench-1.3.4.dist-info/RECORD +0 -74
  77. /bencher/example/__init__.py → /holobench-1.22.2.data/data/share/ament_index/resource_index/packages/bencher +0 -0
bencher/__init__.py CHANGED
@@ -11,6 +11,7 @@ from .variables.results import (
11
11
  ResultVar,
12
12
  ResultVec,
13
13
  ResultHmap,
14
+ ResultPath,
14
15
  ResultVideo,
15
16
  ResultImage,
16
17
  ResultString,
@@ -30,6 +31,7 @@ from .utils import (
30
31
  gen_image_path,
31
32
  gen_video_path,
32
33
  lerp,
34
+ tabs_in_markdown,
33
35
  )
34
36
  from .variables.parametrised_sweep import ParametrizedSweep
35
37
  from .caching import CachedParams
@@ -38,4 +40,5 @@ from .results.panel_result import PanelResult
38
40
  from .results.holoview_result import ReduceType, HoloviewResult
39
41
  from .bench_report import BenchReport
40
42
  from .job import Executors
41
- from .video_writer import VideoWriter
43
+ from .video_writer import VideoWriter, add_image
44
+ from .class_enum import ClassEnum, ExampleEnum
bencher/bench_cfg.py CHANGED
@@ -38,10 +38,6 @@ class BenchRunCfg(BenchPlotSrvCfg):
38
38
  doc="If true each time the function is called it will plot a timeseries of historical and the latest result.",
39
39
  )
40
40
 
41
- debug: bool = param.Boolean(
42
- False, doc="Debug the sampling faster by reducing the dimension sampling resolution"
43
- )
44
-
45
41
  use_optuna: bool = param.Boolean(False, doc="show optuna plots")
46
42
 
47
43
  summarise_constant_inputs = param.Boolean(
@@ -164,7 +160,14 @@ class BenchRunCfg(BenchPlotSrvCfg):
164
160
  doc="The function can be run serially or in parallel with different futures executors",
165
161
  )
166
162
 
167
- plot_size = param.Integer(default=None)
163
+ plot_size = param.Integer(default=None, doc="Sets the width and height of the plot")
164
+ plot_width = param.Integer(
165
+ default=None,
166
+ doc="Sets with width of the plots, this will ovverride the plot_size parameter",
167
+ )
168
+ plot_height = param.Integer(
169
+ default=None, doc="Sets the height of the plot, this will ovverride the plot_size parameter"
170
+ )
168
171
 
169
172
  @staticmethod
170
173
  def from_cmd_line() -> BenchRunCfg: # pragma: no cover
@@ -296,6 +299,11 @@ class BenchCfg(BenchRunCfg):
296
299
  doc="store the hash value of the config to avoid having to hash multiple times",
297
300
  )
298
301
 
302
+ plot_callbacks = param.List(
303
+ None,
304
+ doc="A callable that takes a BenchResult and returns panel representation of the results",
305
+ )
306
+
299
307
  def __init__(self, **params):
300
308
  super().__init__(**params)
301
309
  self.plot_lib = None
@@ -321,7 +329,6 @@ class BenchCfg(BenchRunCfg):
321
329
  hash_sha1(str(self.title)),
322
330
  hash_sha1(self.over_time),
323
331
  repeats_hash,
324
- hash_sha1(self.debug),
325
332
  hash_sha1(self.tag),
326
333
  )
327
334
  )
@@ -337,9 +344,13 @@ class BenchCfg(BenchRunCfg):
337
344
  def inputs_as_str(self) -> List[str]:
338
345
  return [i.name for i in self.input_vars]
339
346
 
340
- def describe_sweep(self, width: int = 800) -> pn.pane.Markdown:
347
+ def describe_sweep(self, width: int = 800, accordion=True) -> pn.pane.Markdown:
341
348
  """Produce a markdown summary of the sweep settings"""
342
- return pn.pane.Markdown(self.describe_benchmark(), width=width)
349
+
350
+ desc = pn.pane.Markdown(self.describe_benchmark(), width=width)
351
+ if accordion:
352
+ return pn.Accordion(("Data Collection Parameters", desc))
353
+ return desc
343
354
 
344
355
  def describe_benchmark(self) -> str:
345
356
  """Generate a string summary of the inputs and results from a BenchCfg
@@ -352,37 +363,30 @@ class BenchCfg(BenchRunCfg):
352
363
 
353
364
  benchmark_sampling_str.append("Input Variables:")
354
365
  for iv in self.input_vars:
355
- benchmark_sampling_str.extend(describe_variable(iv, self.debug, True))
366
+ benchmark_sampling_str.extend(describe_variable(iv, True))
356
367
 
357
368
  if self.const_vars and (self.summarise_constant_inputs):
358
369
  benchmark_sampling_str.append("\nConstants:")
359
370
  for cv in self.const_vars:
360
- benchmark_sampling_str.extend(describe_variable(cv[0], False, False, cv[1]))
371
+ benchmark_sampling_str.extend(describe_variable(cv[0], False, cv[1]))
361
372
 
362
373
  benchmark_sampling_str.append("\nResult Variables:")
363
374
  for rv in self.result_vars:
364
- benchmark_sampling_str.extend(describe_variable(rv, self.debug, False))
365
-
366
- print_meta = True
367
- # if len(self.meta_vars) == 1:
368
- # mv = self.meta_vars[0]
369
- # if mv.name == "repeat" and mv.samples == 1:
370
- # print_meta = False
371
-
372
- if print_meta:
373
- benchmark_sampling_str.append("\nMeta Variables:")
374
- benchmark_sampling_str.append(f" run date: {self.run_date}")
375
- if self.run_tag is not None and len(self.run_tag) > 0:
376
- benchmark_sampling_str.append(f" run tag: {self.run_tag}")
377
- if self.level is not None:
378
- benchmark_sampling_str.append(f" bench level: {self.level}")
379
- benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
380
- benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
381
- benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
382
- benchmark_sampling_str.append(f" parallel: {self.executor}")
383
-
384
- for mv in self.meta_vars:
385
- benchmark_sampling_str.extend(describe_variable(mv, self.debug, True))
375
+ benchmark_sampling_str.extend(describe_variable(rv, False))
376
+
377
+ benchmark_sampling_str.append("\nMeta Variables:")
378
+ benchmark_sampling_str.append(f" run date: {self.run_date}")
379
+ if self.run_tag:
380
+ benchmark_sampling_str.append(f" run tag: {self.run_tag}")
381
+ if self.level is not None:
382
+ benchmark_sampling_str.append(f" bench level: {self.level}")
383
+ benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
384
+ benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
385
+ benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
386
+ benchmark_sampling_str.append(f" executor: {self.executor}")
387
+
388
+ for mv in self.meta_vars:
389
+ benchmark_sampling_str.extend(describe_variable(mv, True))
386
390
 
387
391
  benchmark_sampling_str.append("```")
388
392
 
@@ -420,7 +424,6 @@ class BenchCfg(BenchRunCfg):
420
424
  if self.description is not None and description:
421
425
  col.append(self.to_description())
422
426
  if describe_sweep:
423
- col.append(pn.pane.Markdown("## Data Collection Parameters:"))
424
427
  col.append(self.describe_sweep())
425
428
  if results_suffix:
426
429
  col.append(pn.pane.Markdown("## Results:"))
@@ -444,7 +447,7 @@ class DimsCfg:
444
447
  self.dims_name = [i.name for i in bench_cfg.all_vars]
445
448
 
446
449
  self.dim_ranges = []
447
- self.dim_ranges = [i.values(bench_cfg.debug) for i in bench_cfg.all_vars]
450
+ self.dim_ranges = [i.values() for i in bench_cfg.all_vars]
448
451
  self.dims_size = [len(p) for p in self.dim_ranges]
449
452
  self.dim_ranges_index = [list(range(i)) for i in self.dims_size]
450
453
  self.dim_ranges_str = [f"{s}\n" for s in self.dim_ranges]
@@ -1,4 +1,5 @@
1
1
  """A server for display plots of benchmark results"""
2
+
2
3
  import logging
3
4
  import os
4
5
  from typing import List, Tuple
@@ -86,6 +87,10 @@ class BenchPlotServer:
86
87
  port (int): use a fixed port to lauch the server
87
88
  """
88
89
 
90
+ # suppress verbose tornado and bokeh output
91
+ for logger in ["tornado", "bokeh"]:
92
+ logging.getLogger(logger).setLevel(logging.WARNING)
93
+
89
94
  if port is not None:
90
95
  return pn.serve(
91
96
  plots_instance,
@@ -96,5 +101,4 @@ class BenchPlotServer:
96
101
  show=show,
97
102
  )
98
103
 
99
- logging.getLogger().setLevel(logging.WARNING)
100
104
  return pn.serve(plots_instance, title=bench_name, threaded=True, show=show)
bencher/bench_report.py CHANGED
@@ -3,7 +3,8 @@ from typing import Callable
3
3
  import os
4
4
  import panel as pn
5
5
  from pathlib import Path
6
- import shutil
6
+ import tempfile
7
+
7
8
  from threading import Thread
8
9
 
9
10
  from bencher.results.bench_result import BenchResult
@@ -47,7 +48,7 @@ class BenchReport(BenchPlotServer):
47
48
  self.pane.append(col)
48
49
 
49
50
  def append_result(self, bench_res: BenchResult) -> None:
50
- self.append_tab(bench_res.to_auto_plots(), bench_res.bench_cfg.title)
51
+ self.append_tab(bench_res.plot(), bench_res.bench_cfg.title)
51
52
 
52
53
  def append_tab(self, pane: pn.panel, name: str = None) -> None:
53
54
  if pane is not None:
@@ -137,24 +138,23 @@ class BenchReport(BenchPlotServer):
137
138
 
138
139
  remote, publish_url = remote_callback(branch_name)
139
140
 
140
- directory = "tmpgit"
141
- report_path = self.save(directory, filename="index.html", in_html_folder=False)
142
- logging.info(f"created report at: {report_path.absolute()}")
141
+ with tempfile.TemporaryDirectory() as td:
142
+ directory = td
143
+ report_path = self.save(directory, filename="index.html", in_html_folder=False)
144
+ logging.info(f"created report at: {report_path.absolute()}")
143
145
 
144
- cd_dir = f"cd {directory} &&"
146
+ cd_dir = f"cd {directory} &&"
145
147
 
146
- os.system(f"{cd_dir} git init")
147
- os.system(f"{cd_dir} git checkout -b {branch_name}")
148
- os.system(f"{cd_dir} git add index.html")
149
- os.system(f'{cd_dir} git commit -m "publish {branch_name}"')
150
- os.system(f"{cd_dir} git remote add origin {remote}")
151
- os.system(f"{cd_dir} git push --set-upstream origin {branch_name} -f")
148
+ os.system(f"{cd_dir} git init")
149
+ os.system(f"{cd_dir} git checkout -b {branch_name}")
150
+ os.system(f"{cd_dir} git add index.html")
151
+ os.system(f'{cd_dir} git commit -m "publish {branch_name}"')
152
+ os.system(f"{cd_dir} git remote add origin {remote}")
153
+ os.system(f"{cd_dir} git push --set-upstream origin {branch_name} -f")
152
154
 
153
155
  logging.info("Published report @")
154
156
  logging.info(publish_url)
155
157
 
156
- shutil.rmtree(directory)
157
-
158
158
  return publish_url
159
159
 
160
160
 
bencher/bench_runner.py CHANGED
@@ -103,7 +103,7 @@ class BenchRunner:
103
103
  for r in range(1, repeats + 1):
104
104
  for lvl in range(min_level, max_level + 1):
105
105
  if grouped:
106
- report_level = BenchReport(self.name)
106
+ report_level = BenchReport(f"{run_cfg.run_tag}_{self.name}")
107
107
 
108
108
  for bch_fn in self.bench_fns:
109
109
  run_lvl = deepcopy(run_cfg)
@@ -114,6 +114,7 @@ class BenchRunner:
114
114
  res = bch_fn(run_lvl, report_level)
115
115
  else:
116
116
  res = bch_fn(run_lvl, BenchReport())
117
+ res.report.bench_name = f"{run_cfg.run_tag}_{res.report.bench_name}"
117
118
  self.show_publish(res.report, show, publish, save, debug)
118
119
  self.results.append(res)
119
120
  if grouped:
bencher/bencher.py CHANGED
@@ -10,6 +10,7 @@ import xarray as xr
10
10
  from diskcache import Cache
11
11
  from contextlib import suppress
12
12
  from functools import partial
13
+ import panel as pn
13
14
 
14
15
  from bencher.worker_job import WorkerJob
15
16
 
@@ -23,6 +24,7 @@ from bencher.variables.results import (
23
24
  ResultVar,
24
25
  ResultVec,
25
26
  ResultHmap,
27
+ ResultPath,
26
28
  ResultVideo,
27
29
  ResultImage,
28
30
  ResultString,
@@ -32,6 +34,7 @@ from bencher.variables.results import (
32
34
  from bencher.results.bench_result import BenchResult
33
35
  from bencher.variables.parametrised_sweep import ParametrizedSweep
34
36
  from bencher.job import Job, FutureCache, JobFuture, Executors
37
+ from bencher.utils import params_to_str
35
38
 
36
39
  # Customize the formatter
37
40
  formatter = logging.Formatter("%(levelname)s: %(message)s")
@@ -164,6 +167,23 @@ class Bench(BenchPlotServer):
164
167
 
165
168
  self.cache_size = int(100e9) # default to 100gb
166
169
 
170
+ # self.bench_cfg = BenchCfg()
171
+
172
+ # Maybe put this in SweepCfg
173
+ self.input_vars = None
174
+ self.result_vars = None
175
+ self.const_vars = None
176
+ self.plot_callbacks = []
177
+ self.plot = True
178
+
179
+ def add_plot_callback(self, callback: Callable[[BenchResult], pn.panel], **kwargs) -> None:
180
+ """Add a plotting callback that will be called on any result produced when calling a sweep funciton. You can pass additional arguments to the plotting function with kwargs. e.g. add_plot_callback(bch.BenchResult.to_video_grid,)
181
+
182
+ Args:
183
+ callback (Callable[[BenchResult], pn.panel]): _description_
184
+ """
185
+ self.plot_callbacks.append(partial(callback, **kwargs))
186
+
167
187
  def set_worker(self, worker: Callable, worker_input_cfg: ParametrizedSweep = None) -> None:
168
188
  """Set the benchmark worker function and optionally the type the worker expects
169
189
 
@@ -187,36 +207,9 @@ class Bench(BenchPlotServer):
187
207
  logging.info(f"setting worker {worker}")
188
208
  self.worker_input_cfg = worker_input_cfg
189
209
 
190
- def sweep(
191
- self,
192
- input_vars: List[ParametrizedSweep] = None,
193
- result_vars: List[ParametrizedSweep] = None,
194
- const_vars: List[ParametrizedSweep] = None,
195
- time_src: datetime = None,
196
- description: str = None,
197
- post_description: str = None,
198
- pass_repeat: bool = False,
199
- tag: str = "",
200
- run_cfg: BenchRunCfg = None,
201
- plot: bool = False,
202
- ) -> BenchResult:
203
- title = "Sweeping " + " vs ".join([i.name for i in input_vars])
204
- return self.plot_sweep(
205
- title,
206
- input_vars=input_vars,
207
- result_vars=result_vars,
208
- const_vars=const_vars,
209
- time_src=time_src,
210
- description=description,
211
- post_description=post_description,
212
- pass_repeat=pass_repeat,
213
- tag=tag,
214
- run_cfg=run_cfg,
215
- plot=plot,
216
- )
217
-
218
210
  def sweep_sequential(
219
211
  self,
212
+ title="",
220
213
  input_vars: List[ParametrizedSweep] = None,
221
214
  result_vars: List[ParametrizedSweep] = None,
222
215
  const_vars: List[ParametrizedSweep] = None,
@@ -225,23 +218,25 @@ class Bench(BenchPlotServer):
225
218
  group_size: int = 1,
226
219
  iterations: int = 1,
227
220
  relationship_cb=None,
221
+ plot_callbacks: List | bool = None,
228
222
  ) -> List[BenchResult]:
229
223
  results = []
230
224
  if relationship_cb is None:
231
225
  relationship_cb = combinations
232
226
  for it in range(iterations):
233
227
  for input_group in relationship_cb(input_vars, group_size):
234
- title = "Sweeping " + " vs ".join([i.name for i in input_vars])
228
+ title_gen = title + "Sweeping " + " vs ".join(params_to_str(input_group))
235
229
  if iterations > 1:
236
- title += f" iteration:{it}"
230
+ title_gen += f" iteration:{it}"
237
231
  res = self.plot_sweep(
238
- title=title,
239
- input_vars=input_group,
232
+ title=title_gen,
233
+ input_vars=list(input_group),
240
234
  result_vars=result_vars,
241
235
  const_vars=const_vars,
242
236
  run_cfg=run_cfg,
243
- plot=True,
237
+ plot_callbacks=plot_callbacks,
244
238
  )
239
+
245
240
  if optimise_var is not None:
246
241
  const_vars = res.get_optimal_inputs(optimise_var, True)
247
242
  results.append(res)
@@ -259,7 +254,7 @@ class Bench(BenchPlotServer):
259
254
  pass_repeat: bool = False,
260
255
  tag: str = "",
261
256
  run_cfg: BenchRunCfg = None,
262
- plot: bool = True,
257
+ plot_callbacks: List | bool = None,
263
258
  ) -> BenchResult:
264
259
  """The all in 1 function benchmarker and results plotter.
265
260
 
@@ -274,7 +269,8 @@ class Bench(BenchPlotServer):
274
269
  pass_repeat (bool,optional) By default do not pass the kwarg 'repeat' to the benchmark function. Set to true if
275
270
  you want the benchmark function to be passed the repeat number
276
271
  tag (str,optional): Use tags to group different benchmarks together.
277
- run_cfg: (BenchRunCfg, optional): A config for storing how the benchmarks and run and plotted
272
+ run_cfg: (BenchRunCfg, optional): A config for storing how the benchmarks and run
273
+ plot_callbacks: (List | bool) A list of plot callbacks to call on the results. Pass false or an empty list to turn off plotting
278
274
  Raises:
279
275
  ValueError: If a result variable is not set
280
276
 
@@ -287,18 +283,26 @@ class Bench(BenchPlotServer):
287
283
  logging.info(
288
284
  "No input variables passed, using all param variables in bench class as inputs"
289
285
  )
290
- input_vars = self.worker_class_instance.get_inputs_only()
286
+ if self.input_vars is None:
287
+ input_vars = self.worker_class_instance.get_inputs_only()
288
+ else:
289
+ input_vars = self.input_vars
291
290
  for i in input_vars:
292
291
  logging.info(f"input var: {i.name}")
293
292
  if result_vars is None:
294
293
  logging.info(
295
294
  "No results variables passed, using all result variables in bench class:"
296
295
  )
297
- result_vars = self.worker_class_instance.get_results_only()
298
- for r in result_vars:
299
- logging.info(f"result var: {r.name}")
296
+ if self.result_vars is None:
297
+ result_vars = self.worker_class_instance.get_results_only()
298
+ else:
299
+ result_vars = self.result_vars
300
+
300
301
  if const_vars is None:
301
- const_vars = self.worker_class_instance.get_input_defaults()
302
+ if self.const_vars is None:
303
+ const_vars = self.worker_class_instance.get_input_defaults()
304
+ else:
305
+ const_vars = self.const_vars
302
306
  else:
303
307
  if input_vars is None:
304
308
  input_vars = []
@@ -313,6 +317,13 @@ class Bench(BenchPlotServer):
313
317
  input_vars[i] = self.convert_vars_to_params(input_vars[i], "input")
314
318
  for i in range(len(result_vars)):
315
319
  result_vars[i] = self.convert_vars_to_params(result_vars[i], "result")
320
+
321
+ for r in result_vars:
322
+ logging.info(f"result var: {r.name}")
323
+
324
+ if isinstance(const_vars, dict):
325
+ const_vars = list(const_vars.items())
326
+
316
327
  for i in range(len(const_vars)):
317
328
  # consts come as tuple pairs
318
329
  cv_list = list(const_vars[i])
@@ -338,8 +349,8 @@ class Bench(BenchPlotServer):
338
349
  elif len(const_vars) > 0:
339
350
  title = "Constant Value"
340
351
  if len(const_vars) > 1:
341
- title += "es"
342
- title += ": " + " ".join([f"{c[0].name}={c[1]}" for c in const_vars])
352
+ title += "s"
353
+ title += ": " + ", ".join([f"{c[0].name}={c[1]}" for c in const_vars])
343
354
  else:
344
355
  raise RuntimeError("you must pass a title, or define inputs or consts")
345
356
 
@@ -373,6 +384,14 @@ class Bench(BenchPlotServer):
373
384
  "## Results Description\nPlease set post_description to explain these results"
374
385
  )
375
386
 
387
+ if plot_callbacks is None:
388
+ if self.plot_callbacks is not None and len(self.plot_callbacks) == 0:
389
+ plot_callbacks = [BenchResult.to_auto_plots]
390
+ else:
391
+ plot_callbacks = self.plot_callbacks
392
+ elif isinstance(plot_callbacks, bool):
393
+ plot_callbacks = [BenchResult.to_auto_plots] if plot_callbacks else []
394
+
376
395
  bench_cfg = BenchCfg(
377
396
  input_vars=input_vars,
378
397
  result_vars=result_vars_only,
@@ -384,7 +403,13 @@ class Bench(BenchPlotServer):
384
403
  title=title,
385
404
  pass_repeat=pass_repeat,
386
405
  tag=run_cfg.run_tag + tag,
406
+ plot_callbacks=plot_callbacks,
387
407
  )
408
+ return self.run_sweep(bench_cfg, run_cfg, time_src)
409
+
410
+ def run_sweep(
411
+ self, bench_cfg: BenchCfg, run_cfg: BenchRunCfg, time_src: datetime
412
+ ) -> BenchResult:
388
413
  print("tag", bench_cfg.tag)
389
414
 
390
415
  bench_cfg.param.update(run_cfg.param.values())
@@ -439,8 +464,9 @@ class Bench(BenchPlotServer):
439
464
 
440
465
  bench_res.post_setup()
441
466
 
442
- if plot and bench_res.bench_cfg.auto_plot:
467
+ if bench_cfg.auto_plot:
443
468
  self.report.append_result(bench_res)
469
+
444
470
  self.results.append(bench_res)
445
471
  return bench_res
446
472
 
@@ -532,7 +558,7 @@ class Bench(BenchPlotServer):
532
558
  time_src (datetime | str): a representation of the sample time
533
559
 
534
560
  Returns:
535
- _type_: _description_
561
+ tuple[BenchResult, List, List]: bench_result, function intputs, dimension names
536
562
  """
537
563
 
538
564
  if time_src is None:
@@ -540,13 +566,11 @@ class Bench(BenchPlotServer):
540
566
  bench_cfg.meta_vars = self.define_extra_vars(bench_cfg, bench_cfg.repeats, time_src)
541
567
 
542
568
  bench_cfg.all_vars = bench_cfg.input_vars + bench_cfg.meta_vars
543
-
544
569
  # bench_cfg.all_vars = bench_cfg.iv_time + bench_cfg.input_vars +[ bench_cfg.iv_repeat]
545
-
546
570
  # bench_cfg.all_vars = [ bench_cfg.iv_repeat] +bench_cfg.input_vars + bench_cfg.iv_time
547
571
 
548
572
  for i in bench_cfg.all_vars:
549
- logging.info(i.sampling_str(bench_cfg.debug))
573
+ logging.info(i.sampling_str())
550
574
 
551
575
  dims_cfg = DimsCfg(bench_cfg)
552
576
  function_inputs = list(
@@ -562,7 +586,9 @@ class Bench(BenchPlotServer):
562
586
  if isinstance(rv, ResultReference):
563
587
  result_data = np.full(dims_cfg.dims_size, -1, dtype=int)
564
588
  data_vars[rv.name] = (dims_cfg.dims_name, result_data)
565
- if isinstance(rv, (ResultVideo, ResultImage, ResultString, ResultContainer)):
589
+ if isinstance(
590
+ rv, (ResultPath, ResultVideo, ResultImage, ResultString, ResultContainer)
591
+ ):
566
592
  result_data = np.full(dims_cfg.dims_size, "NAN", dtype=object)
567
593
  data_vars[rv.name] = (dims_cfg.dims_name, result_data)
568
594
  elif type(rv) == ResultVec:
@@ -604,7 +630,6 @@ class Bench(BenchPlotServer):
604
630
  default=repeats,
605
631
  bounds=[1, repeats],
606
632
  samples=repeats,
607
- samples_debug=2 if repeats > 2 else 1,
608
633
  units="repeats",
609
634
  doc="The number of times a sample was measured",
610
635
  )
@@ -700,7 +725,15 @@ class Bench(BenchPlotServer):
700
725
  logging.info(f"{rv.name}: {result_value}")
701
726
 
702
727
  if isinstance(
703
- rv, (ResultVar, ResultVideo, ResultImage, ResultString, ResultContainer)
728
+ rv,
729
+ (
730
+ ResultVar,
731
+ ResultVideo,
732
+ ResultImage,
733
+ ResultString,
734
+ ResultContainer,
735
+ ResultPath,
736
+ ),
704
737
  ):
705
738
  set_xarray_multidim(bench_res.ds[rv.name], worker_job.index_tuple, result_value)
706
739
  elif isinstance(rv, ResultReference):
@@ -791,3 +824,7 @@ class Bench(BenchPlotServer):
791
824
 
792
825
  def get_result(self, index: int = -1) -> BenchResult:
793
826
  return self.results[index]
827
+
828
+ def publish(self, remote_callback: Callable) -> str:
829
+ branch_name = f"{self.bench_name}_{self.run_cfg.run_tag}"
830
+ return self.report.publish(remote_callback, branch_name=branch_name)
bencher/class_enum.py ADDED
@@ -0,0 +1,52 @@
1
+ from __future__ import annotations
2
+ from strenum import StrEnum
3
+ from typing import Any
4
+ import importlib
5
+ from abc import abstractmethod
6
+ from dataclasses import dataclass
7
+ from enum import auto
8
+
9
+
10
+ class ClassEnum(StrEnum):
11
+ """A ClassEnum is a pattern to make it easier to create factory a factory method that converts from an enum to a corresponding class. Subclasses should implement to_class(enum_instance:EnumType) which takes an enum returns the corresponding instance of that class."""
12
+
13
+ @classmethod
14
+ def to_class_generic(cls, module_import: str, class_name: str) -> Any:
15
+ """Create an instance of the class referred to by this enum
16
+
17
+ Returns:
18
+ Any: instance of the class
19
+ """
20
+
21
+ class_def = getattr(importlib.import_module(module_import), class_name)
22
+ return class_def()
23
+
24
+ @classmethod
25
+ @abstractmethod
26
+ def to_class(cls, enum_val: ClassEnum) -> Any:
27
+ """Subclasses should overrides this method to take an enum returns the corresponding instance of that class."""
28
+ raise NotImplementedError()
29
+
30
+
31
+ @dataclass
32
+ class BaseClass:
33
+ baseclassname: str = "class0"
34
+
35
+
36
+ @dataclass
37
+ class Class1(BaseClass):
38
+ classname: str = "class1"
39
+
40
+
41
+ @dataclass
42
+ class Class2(BaseClass):
43
+ classname: str = "class2"
44
+
45
+
46
+ class ExampleEnum(ClassEnum):
47
+ Class1 = auto()
48
+ Class2 = auto()
49
+
50
+ @classmethod
51
+ def to_class(cls, enum_val: ExampleEnum) -> BaseClass:
52
+ return cls.to_class_generic("bencher.class_enum", enum_val)
bencher/job.py CHANGED
@@ -80,7 +80,8 @@ class FutureCache:
80
80
  size_limit: int = int(20e9), # 20 GB
81
81
  use_cache=True,
82
82
  ):
83
- self.executor = Executors.factory(executor)
83
+ self.executor_type = executor
84
+ self.executor = None
84
85
  if use_cache:
85
86
  self.cache = Cache(f"cachedir/{cache_name}", tag_index=tag_index, size_limit=size_limit)
86
87
  logging.info(f"cache dir: {self.cache.directory}")
@@ -110,6 +111,9 @@ class FutureCache:
110
111
 
111
112
  self.worker_fn_call_count += 1
112
113
 
114
+ if self.executor_type is not Executors.SERIAL:
115
+ if self.executor is None:
116
+ self.executor = Executors.factory(self.executor_type)
113
117
  if self.executor is not None:
114
118
  self.overwrite_msg(job, " starting parallel job...")
115
119
  return JobFuture(
@@ -148,9 +152,7 @@ class FutureCache:
148
152
  self.cache.close()
149
153
  if self.executor:
150
154
  self.executor.shutdown()
151
-
152
- # def __del__(self):
153
- # self.close()
155
+ self.executor = None
154
156
 
155
157
  def stats(self) -> str:
156
158
  logging.info(f"job calls: {self.worker_wrapper_call_count}")
@@ -30,7 +30,7 @@ def optuna_grid_search(bench_cfg: BenchCfg) -> optuna.Study:
30
30
  """
31
31
  search_space = {}
32
32
  for iv in bench_cfg.all_vars:
33
- search_space[iv.name] = iv.values(bench_cfg.debug)
33
+ search_space[iv.name] = iv.values()
34
34
  directions = []
35
35
  for rv in bench_cfg.optuna_targets(True):
36
36
  directions.append(rv.direction)