holobench 1.3.5__py3-none-any.whl → 1.22.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. bencher/__init__.py +3 -0
  2. bencher/bench_cfg.py +29 -33
  3. bencher/bench_plot_server.py +5 -1
  4. bencher/bench_report.py +14 -14
  5. bencher/bench_runner.py +2 -1
  6. bencher/bencher.py +77 -52
  7. bencher/class_enum.py +52 -0
  8. bencher/job.py +6 -4
  9. bencher/optuna_conversions.py +1 -1
  10. bencher/utils.py +42 -4
  11. bencher/video_writer.py +101 -10
  12. holobench-1.22.2.data/data/share/bencher/package.xml +33 -0
  13. holobench-1.22.2.dist-info/LICENSE +21 -0
  14. {holobench-1.3.5.dist-info → holobench-1.22.2.dist-info}/METADATA +39 -31
  15. holobench-1.22.2.dist-info/RECORD +20 -0
  16. {holobench-1.3.5.dist-info → holobench-1.22.2.dist-info}/WHEEL +2 -1
  17. holobench-1.22.2.dist-info/top_level.txt +1 -0
  18. bencher/example/benchmark_data.py +0 -200
  19. bencher/example/example_all.py +0 -45
  20. bencher/example/example_categorical.py +0 -99
  21. bencher/example/example_custom_sweep.py +0 -59
  22. bencher/example/example_docs.py +0 -34
  23. bencher/example/example_float3D.py +0 -101
  24. bencher/example/example_float_cat.py +0 -98
  25. bencher/example/example_floats.py +0 -89
  26. bencher/example/example_floats2D.py +0 -93
  27. bencher/example/example_holosweep.py +0 -104
  28. bencher/example/example_holosweep_objects.py +0 -111
  29. bencher/example/example_holosweep_tap.py +0 -144
  30. bencher/example/example_image.py +0 -82
  31. bencher/example/example_levels.py +0 -181
  32. bencher/example/example_pareto.py +0 -53
  33. bencher/example/example_sample_cache.py +0 -85
  34. bencher/example/example_sample_cache_context.py +0 -116
  35. bencher/example/example_simple.py +0 -134
  36. bencher/example/example_simple_bool.py +0 -34
  37. bencher/example/example_simple_cat.py +0 -47
  38. bencher/example/example_simple_float.py +0 -38
  39. bencher/example/example_strings.py +0 -46
  40. bencher/example/example_time_event.py +0 -62
  41. bencher/example/example_video.py +0 -124
  42. bencher/example/example_workflow.py +0 -189
  43. bencher/example/experimental/example_bokeh_plotly.py +0 -38
  44. bencher/example/experimental/example_hover_ex.py +0 -45
  45. bencher/example/experimental/example_hvplot_explorer.py +0 -39
  46. bencher/example/experimental/example_interactive.py +0 -75
  47. bencher/example/experimental/example_streamnd.py +0 -49
  48. bencher/example/experimental/example_streams.py +0 -36
  49. bencher/example/experimental/example_template.py +0 -40
  50. bencher/example/experimental/example_updates.py +0 -84
  51. bencher/example/experimental/example_vector.py +0 -84
  52. bencher/example/meta/example_meta.py +0 -171
  53. bencher/example/meta/example_meta_cat.py +0 -25
  54. bencher/example/meta/example_meta_float.py +0 -23
  55. bencher/example/meta/example_meta_levels.py +0 -26
  56. bencher/example/optuna/example_optuna.py +0 -78
  57. bencher/example/shelved/example_float2D_scatter.py +0 -109
  58. bencher/example/shelved/example_float3D_cone.py +0 -96
  59. bencher/example/shelved/example_kwargs.py +0 -63
  60. bencher/plotting/__init__.py +0 -0
  61. bencher/plotting/plot_filter.py +0 -110
  62. bencher/plotting/plt_cnt_cfg.py +0 -74
  63. bencher/results/__init__.py +0 -0
  64. bencher/results/bench_result.py +0 -80
  65. bencher/results/bench_result_base.py +0 -405
  66. bencher/results/float_formatter.py +0 -44
  67. bencher/results/holoview_result.py +0 -592
  68. bencher/results/optuna_result.py +0 -354
  69. bencher/results/panel_result.py +0 -113
  70. bencher/results/plotly_result.py +0 -65
  71. bencher/variables/inputs.py +0 -193
  72. bencher/variables/parametrised_sweep.py +0 -206
  73. bencher/variables/results.py +0 -176
  74. bencher/variables/sweep_base.py +0 -167
  75. bencher/variables/time.py +0 -74
  76. holobench-1.3.5.dist-info/RECORD +0 -74
  77. /bencher/example/__init__.py → /holobench-1.22.2.data/data/share/ament_index/resource_index/packages/bencher +0 -0
bencher/__init__.py CHANGED
@@ -11,6 +11,7 @@ from .variables.results import (
11
11
  ResultVar,
12
12
  ResultVec,
13
13
  ResultHmap,
14
+ ResultPath,
14
15
  ResultVideo,
15
16
  ResultImage,
16
17
  ResultString,
@@ -30,6 +31,7 @@ from .utils import (
30
31
  gen_image_path,
31
32
  gen_video_path,
32
33
  lerp,
34
+ tabs_in_markdown,
33
35
  )
34
36
  from .variables.parametrised_sweep import ParametrizedSweep
35
37
  from .caching import CachedParams
@@ -39,3 +41,4 @@ from .results.holoview_result import ReduceType, HoloviewResult
39
41
  from .bench_report import BenchReport
40
42
  from .job import Executors
41
43
  from .video_writer import VideoWriter, add_image
44
+ from .class_enum import ClassEnum, ExampleEnum
bencher/bench_cfg.py CHANGED
@@ -38,10 +38,6 @@ class BenchRunCfg(BenchPlotSrvCfg):
38
38
  doc="If true each time the function is called it will plot a timeseries of historical and the latest result.",
39
39
  )
40
40
 
41
- debug: bool = param.Boolean(
42
- False, doc="Debug the sampling faster by reducing the dimension sampling resolution"
43
- )
44
-
45
41
  use_optuna: bool = param.Boolean(False, doc="show optuna plots")
46
42
 
47
43
  summarise_constant_inputs = param.Boolean(
@@ -303,6 +299,11 @@ class BenchCfg(BenchRunCfg):
303
299
  doc="store the hash value of the config to avoid having to hash multiple times",
304
300
  )
305
301
 
302
+ plot_callbacks = param.List(
303
+ None,
304
+ doc="A callable that takes a BenchResult and returns panel representation of the results",
305
+ )
306
+
306
307
  def __init__(self, **params):
307
308
  super().__init__(**params)
308
309
  self.plot_lib = None
@@ -328,7 +329,6 @@ class BenchCfg(BenchRunCfg):
328
329
  hash_sha1(str(self.title)),
329
330
  hash_sha1(self.over_time),
330
331
  repeats_hash,
331
- hash_sha1(self.debug),
332
332
  hash_sha1(self.tag),
333
333
  )
334
334
  )
@@ -344,9 +344,13 @@ class BenchCfg(BenchRunCfg):
344
344
  def inputs_as_str(self) -> List[str]:
345
345
  return [i.name for i in self.input_vars]
346
346
 
347
- def describe_sweep(self, width: int = 800) -> pn.pane.Markdown:
347
+ def describe_sweep(self, width: int = 800, accordion=True) -> pn.pane.Markdown:
348
348
  """Produce a markdown summary of the sweep settings"""
349
- return pn.pane.Markdown(self.describe_benchmark(), width=width)
349
+
350
+ desc = pn.pane.Markdown(self.describe_benchmark(), width=width)
351
+ if accordion:
352
+ return pn.Accordion(("Data Collection Parameters", desc))
353
+ return desc
350
354
 
351
355
  def describe_benchmark(self) -> str:
352
356
  """Generate a string summary of the inputs and results from a BenchCfg
@@ -359,37 +363,30 @@ class BenchCfg(BenchRunCfg):
359
363
 
360
364
  benchmark_sampling_str.append("Input Variables:")
361
365
  for iv in self.input_vars:
362
- benchmark_sampling_str.extend(describe_variable(iv, self.debug, True))
366
+ benchmark_sampling_str.extend(describe_variable(iv, True))
363
367
 
364
368
  if self.const_vars and (self.summarise_constant_inputs):
365
369
  benchmark_sampling_str.append("\nConstants:")
366
370
  for cv in self.const_vars:
367
- benchmark_sampling_str.extend(describe_variable(cv[0], False, False, cv[1]))
371
+ benchmark_sampling_str.extend(describe_variable(cv[0], False, cv[1]))
368
372
 
369
373
  benchmark_sampling_str.append("\nResult Variables:")
370
374
  for rv in self.result_vars:
371
- benchmark_sampling_str.extend(describe_variable(rv, self.debug, False))
372
-
373
- print_meta = True
374
- # if len(self.meta_vars) == 1:
375
- # mv = self.meta_vars[0]
376
- # if mv.name == "repeat" and mv.samples == 1:
377
- # print_meta = False
378
-
379
- if print_meta:
380
- benchmark_sampling_str.append("\nMeta Variables:")
381
- benchmark_sampling_str.append(f" run date: {self.run_date}")
382
- if self.run_tag is not None and len(self.run_tag) > 0:
383
- benchmark_sampling_str.append(f" run tag: {self.run_tag}")
384
- if self.level is not None:
385
- benchmark_sampling_str.append(f" bench level: {self.level}")
386
- benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
387
- benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
388
- benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
389
- benchmark_sampling_str.append(f" parallel: {self.executor}")
390
-
391
- for mv in self.meta_vars:
392
- benchmark_sampling_str.extend(describe_variable(mv, self.debug, True))
375
+ benchmark_sampling_str.extend(describe_variable(rv, False))
376
+
377
+ benchmark_sampling_str.append("\nMeta Variables:")
378
+ benchmark_sampling_str.append(f" run date: {self.run_date}")
379
+ if self.run_tag:
380
+ benchmark_sampling_str.append(f" run tag: {self.run_tag}")
381
+ if self.level is not None:
382
+ benchmark_sampling_str.append(f" bench level: {self.level}")
383
+ benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
384
+ benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
385
+ benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
386
+ benchmark_sampling_str.append(f" executor: {self.executor}")
387
+
388
+ for mv in self.meta_vars:
389
+ benchmark_sampling_str.extend(describe_variable(mv, True))
393
390
 
394
391
  benchmark_sampling_str.append("```")
395
392
 
@@ -427,7 +424,6 @@ class BenchCfg(BenchRunCfg):
427
424
  if self.description is not None and description:
428
425
  col.append(self.to_description())
429
426
  if describe_sweep:
430
- col.append(pn.pane.Markdown("## Data Collection Parameters:"))
431
427
  col.append(self.describe_sweep())
432
428
  if results_suffix:
433
429
  col.append(pn.pane.Markdown("## Results:"))
@@ -451,7 +447,7 @@ class DimsCfg:
451
447
  self.dims_name = [i.name for i in bench_cfg.all_vars]
452
448
 
453
449
  self.dim_ranges = []
454
- self.dim_ranges = [i.values(bench_cfg.debug) for i in bench_cfg.all_vars]
450
+ self.dim_ranges = [i.values() for i in bench_cfg.all_vars]
455
451
  self.dims_size = [len(p) for p in self.dim_ranges]
456
452
  self.dim_ranges_index = [list(range(i)) for i in self.dims_size]
457
453
  self.dim_ranges_str = [f"{s}\n" for s in self.dim_ranges]
@@ -1,4 +1,5 @@
1
1
  """A server for display plots of benchmark results"""
2
+
2
3
  import logging
3
4
  import os
4
5
  from typing import List, Tuple
@@ -86,6 +87,10 @@ class BenchPlotServer:
86
87
  port (int): use a fixed port to lauch the server
87
88
  """
88
89
 
90
+ # suppress verbose tornado and bokeh output
91
+ for logger in ["tornado", "bokeh"]:
92
+ logging.getLogger(logger).setLevel(logging.WARNING)
93
+
89
94
  if port is not None:
90
95
  return pn.serve(
91
96
  plots_instance,
@@ -96,5 +101,4 @@ class BenchPlotServer:
96
101
  show=show,
97
102
  )
98
103
 
99
- logging.getLogger().setLevel(logging.WARNING)
100
104
  return pn.serve(plots_instance, title=bench_name, threaded=True, show=show)
bencher/bench_report.py CHANGED
@@ -3,7 +3,8 @@ from typing import Callable
3
3
  import os
4
4
  import panel as pn
5
5
  from pathlib import Path
6
- import shutil
6
+ import tempfile
7
+
7
8
  from threading import Thread
8
9
 
9
10
  from bencher.results.bench_result import BenchResult
@@ -47,7 +48,7 @@ class BenchReport(BenchPlotServer):
47
48
  self.pane.append(col)
48
49
 
49
50
  def append_result(self, bench_res: BenchResult) -> None:
50
- self.append_tab(bench_res.to_auto_plots(), bench_res.bench_cfg.title)
51
+ self.append_tab(bench_res.plot(), bench_res.bench_cfg.title)
51
52
 
52
53
  def append_tab(self, pane: pn.panel, name: str = None) -> None:
53
54
  if pane is not None:
@@ -137,24 +138,23 @@ class BenchReport(BenchPlotServer):
137
138
 
138
139
  remote, publish_url = remote_callback(branch_name)
139
140
 
140
- directory = "tmpgit"
141
- report_path = self.save(directory, filename="index.html", in_html_folder=False)
142
- logging.info(f"created report at: {report_path.absolute()}")
141
+ with tempfile.TemporaryDirectory() as td:
142
+ directory = td
143
+ report_path = self.save(directory, filename="index.html", in_html_folder=False)
144
+ logging.info(f"created report at: {report_path.absolute()}")
143
145
 
144
- cd_dir = f"cd {directory} &&"
146
+ cd_dir = f"cd {directory} &&"
145
147
 
146
- os.system(f"{cd_dir} git init")
147
- os.system(f"{cd_dir} git checkout -b {branch_name}")
148
- os.system(f"{cd_dir} git add index.html")
149
- os.system(f'{cd_dir} git commit -m "publish {branch_name}"')
150
- os.system(f"{cd_dir} git remote add origin {remote}")
151
- os.system(f"{cd_dir} git push --set-upstream origin {branch_name} -f")
148
+ os.system(f"{cd_dir} git init")
149
+ os.system(f"{cd_dir} git checkout -b {branch_name}")
150
+ os.system(f"{cd_dir} git add index.html")
151
+ os.system(f'{cd_dir} git commit -m "publish {branch_name}"')
152
+ os.system(f"{cd_dir} git remote add origin {remote}")
153
+ os.system(f"{cd_dir} git push --set-upstream origin {branch_name} -f")
152
154
 
153
155
  logging.info("Published report @")
154
156
  logging.info(publish_url)
155
157
 
156
- shutil.rmtree(directory)
157
-
158
158
  return publish_url
159
159
 
160
160
 
bencher/bench_runner.py CHANGED
@@ -103,7 +103,7 @@ class BenchRunner:
103
103
  for r in range(1, repeats + 1):
104
104
  for lvl in range(min_level, max_level + 1):
105
105
  if grouped:
106
- report_level = BenchReport(self.name)
106
+ report_level = BenchReport(f"{run_cfg.run_tag}_{self.name}")
107
107
 
108
108
  for bch_fn in self.bench_fns:
109
109
  run_lvl = deepcopy(run_cfg)
@@ -114,6 +114,7 @@ class BenchRunner:
114
114
  res = bch_fn(run_lvl, report_level)
115
115
  else:
116
116
  res = bch_fn(run_lvl, BenchReport())
117
+ res.report.bench_name = f"{run_cfg.run_tag}_{res.report.bench_name}"
117
118
  self.show_publish(res.report, show, publish, save, debug)
118
119
  self.results.append(res)
119
120
  if grouped:
bencher/bencher.py CHANGED
@@ -10,6 +10,7 @@ import xarray as xr
10
10
  from diskcache import Cache
11
11
  from contextlib import suppress
12
12
  from functools import partial
13
+ import panel as pn
13
14
 
14
15
  from bencher.worker_job import WorkerJob
15
16
 
@@ -23,6 +24,7 @@ from bencher.variables.results import (
23
24
  ResultVar,
24
25
  ResultVec,
25
26
  ResultHmap,
27
+ ResultPath,
26
28
  ResultVideo,
27
29
  ResultImage,
28
30
  ResultString,
@@ -32,6 +34,7 @@ from bencher.variables.results import (
32
34
  from bencher.results.bench_result import BenchResult
33
35
  from bencher.variables.parametrised_sweep import ParametrizedSweep
34
36
  from bencher.job import Job, FutureCache, JobFuture, Executors
37
+ from bencher.utils import params_to_str
35
38
 
36
39
  # Customize the formatter
37
40
  formatter = logging.Formatter("%(levelname)s: %(message)s")
@@ -164,6 +167,23 @@ class Bench(BenchPlotServer):
164
167
 
165
168
  self.cache_size = int(100e9) # default to 100gb
166
169
 
170
+ # self.bench_cfg = BenchCfg()
171
+
172
+ # Maybe put this in SweepCfg
173
+ self.input_vars = None
174
+ self.result_vars = None
175
+ self.const_vars = None
176
+ self.plot_callbacks = []
177
+ self.plot = True
178
+
179
+ def add_plot_callback(self, callback: Callable[[BenchResult], pn.panel], **kwargs) -> None:
180
+ """Add a plotting callback that will be called on any result produced when calling a sweep funciton. You can pass additional arguments to the plotting function with kwargs. e.g. add_plot_callback(bch.BenchResult.to_video_grid,)
181
+
182
+ Args:
183
+ callback (Callable[[BenchResult], pn.panel]): _description_
184
+ """
185
+ self.plot_callbacks.append(partial(callback, **kwargs))
186
+
167
187
  def set_worker(self, worker: Callable, worker_input_cfg: ParametrizedSweep = None) -> None:
168
188
  """Set the benchmark worker function and optionally the type the worker expects
169
189
 
@@ -187,34 +207,6 @@ class Bench(BenchPlotServer):
187
207
  logging.info(f"setting worker {worker}")
188
208
  self.worker_input_cfg = worker_input_cfg
189
209
 
190
- def sweep(
191
- self,
192
- input_vars: List[ParametrizedSweep] = None,
193
- result_vars: List[ParametrizedSweep] = None,
194
- const_vars: List[ParametrizedSweep] = None,
195
- time_src: datetime = None,
196
- description: str = None,
197
- post_description: str = None,
198
- pass_repeat: bool = False,
199
- tag: str = "",
200
- run_cfg: BenchRunCfg = None,
201
- plot: bool = False,
202
- ) -> BenchResult:
203
- title = "Sweeping " + " vs ".join([self.get_name(i) for i in input_vars])
204
- return self.plot_sweep(
205
- title,
206
- input_vars=input_vars,
207
- result_vars=result_vars,
208
- const_vars=const_vars,
209
- time_src=time_src,
210
- description=description,
211
- post_description=post_description,
212
- pass_repeat=pass_repeat,
213
- tag=tag,
214
- run_cfg=run_cfg,
215
- plot=plot,
216
- )
217
-
218
210
  def sweep_sequential(
219
211
  self,
220
212
  title="",
@@ -226,15 +218,14 @@ class Bench(BenchPlotServer):
226
218
  group_size: int = 1,
227
219
  iterations: int = 1,
228
220
  relationship_cb=None,
221
+ plot_callbacks: List | bool = None,
229
222
  ) -> List[BenchResult]:
230
223
  results = []
231
224
  if relationship_cb is None:
232
225
  relationship_cb = combinations
233
226
  for it in range(iterations):
234
227
  for input_group in relationship_cb(input_vars, group_size):
235
- title_gen = (
236
- title + "Sweeping " + " vs ".join([self.get_name(i) for i in input_group])
237
- )
228
+ title_gen = title + "Sweeping " + " vs ".join(params_to_str(input_group))
238
229
  if iterations > 1:
239
230
  title_gen += f" iteration:{it}"
240
231
  res = self.plot_sweep(
@@ -243,8 +234,9 @@ class Bench(BenchPlotServer):
243
234
  result_vars=result_vars,
244
235
  const_vars=const_vars,
245
236
  run_cfg=run_cfg,
246
- plot=True,
237
+ plot_callbacks=plot_callbacks,
247
238
  )
239
+
248
240
  if optimise_var is not None:
249
241
  const_vars = res.get_optimal_inputs(optimise_var, True)
250
242
  results.append(res)
@@ -262,7 +254,7 @@ class Bench(BenchPlotServer):
262
254
  pass_repeat: bool = False,
263
255
  tag: str = "",
264
256
  run_cfg: BenchRunCfg = None,
265
- plot: bool = True,
257
+ plot_callbacks: List | bool = None,
266
258
  ) -> BenchResult:
267
259
  """The all in 1 function benchmarker and results plotter.
268
260
 
@@ -277,7 +269,8 @@ class Bench(BenchPlotServer):
277
269
  pass_repeat (bool,optional) By default do not pass the kwarg 'repeat' to the benchmark function. Set to true if
278
270
  you want the benchmark function to be passed the repeat number
279
271
  tag (str,optional): Use tags to group different benchmarks together.
280
- run_cfg: (BenchRunCfg, optional): A config for storing how the benchmarks and run and plotted
272
+ run_cfg: (BenchRunCfg, optional): A config for storing how the benchmarks and run
273
+ plot_callbacks: (List | bool) A list of plot callbacks to call on the results. Pass false or an empty list to turn off plotting
281
274
  Raises:
282
275
  ValueError: If a result variable is not set
283
276
 
@@ -290,18 +283,26 @@ class Bench(BenchPlotServer):
290
283
  logging.info(
291
284
  "No input variables passed, using all param variables in bench class as inputs"
292
285
  )
293
- input_vars = self.worker_class_instance.get_inputs_only()
286
+ if self.input_vars is None:
287
+ input_vars = self.worker_class_instance.get_inputs_only()
288
+ else:
289
+ input_vars = self.input_vars
294
290
  for i in input_vars:
295
291
  logging.info(f"input var: {i.name}")
296
292
  if result_vars is None:
297
293
  logging.info(
298
294
  "No results variables passed, using all result variables in bench class:"
299
295
  )
300
- result_vars = self.worker_class_instance.get_results_only()
301
- for r in result_vars:
302
- logging.info(f"result var: {r.name}")
296
+ if self.result_vars is None:
297
+ result_vars = self.worker_class_instance.get_results_only()
298
+ else:
299
+ result_vars = self.result_vars
300
+
303
301
  if const_vars is None:
304
- const_vars = self.worker_class_instance.get_input_defaults()
302
+ if self.const_vars is None:
303
+ const_vars = self.worker_class_instance.get_input_defaults()
304
+ else:
305
+ const_vars = self.const_vars
305
306
  else:
306
307
  if input_vars is None:
307
308
  input_vars = []
@@ -317,6 +318,9 @@ class Bench(BenchPlotServer):
317
318
  for i in range(len(result_vars)):
318
319
  result_vars[i] = self.convert_vars_to_params(result_vars[i], "result")
319
320
 
321
+ for r in result_vars:
322
+ logging.info(f"result var: {r.name}")
323
+
320
324
  if isinstance(const_vars, dict):
321
325
  const_vars = list(const_vars.items())
322
326
 
@@ -380,6 +384,14 @@ class Bench(BenchPlotServer):
380
384
  "## Results Description\nPlease set post_description to explain these results"
381
385
  )
382
386
 
387
+ if plot_callbacks is None:
388
+ if self.plot_callbacks is not None and len(self.plot_callbacks) == 0:
389
+ plot_callbacks = [BenchResult.to_auto_plots]
390
+ else:
391
+ plot_callbacks = self.plot_callbacks
392
+ elif isinstance(plot_callbacks, bool):
393
+ plot_callbacks = [BenchResult.to_auto_plots] if plot_callbacks else []
394
+
383
395
  bench_cfg = BenchCfg(
384
396
  input_vars=input_vars,
385
397
  result_vars=result_vars_only,
@@ -391,7 +403,13 @@ class Bench(BenchPlotServer):
391
403
  title=title,
392
404
  pass_repeat=pass_repeat,
393
405
  tag=run_cfg.run_tag + tag,
406
+ plot_callbacks=plot_callbacks,
394
407
  )
408
+ return self.run_sweep(bench_cfg, run_cfg, time_src)
409
+
410
+ def run_sweep(
411
+ self, bench_cfg: BenchCfg, run_cfg: BenchRunCfg, time_src: datetime
412
+ ) -> BenchResult:
395
413
  print("tag", bench_cfg.tag)
396
414
 
397
415
  bench_cfg.param.update(run_cfg.param.values())
@@ -446,16 +464,12 @@ class Bench(BenchPlotServer):
446
464
 
447
465
  bench_res.post_setup()
448
466
 
449
- if plot and bench_res.bench_cfg.auto_plot:
467
+ if bench_cfg.auto_plot:
450
468
  self.report.append_result(bench_res)
469
+
451
470
  self.results.append(bench_res)
452
471
  return bench_res
453
472
 
454
- def get_name(self, var):
455
- if isinstance(var, param.Parameter):
456
- return var.name
457
- return var
458
-
459
473
  def convert_vars_to_params(self, variable: param.Parameter, var_type: str):
460
474
  """check that a variable is a subclass of param
461
475
 
@@ -544,7 +558,7 @@ class Bench(BenchPlotServer):
544
558
  time_src (datetime | str): a representation of the sample time
545
559
 
546
560
  Returns:
547
- _type_: _description_
561
+ tuple[BenchResult, List, List]: bench_result, function intputs, dimension names
548
562
  """
549
563
 
550
564
  if time_src is None:
@@ -552,13 +566,11 @@ class Bench(BenchPlotServer):
552
566
  bench_cfg.meta_vars = self.define_extra_vars(bench_cfg, bench_cfg.repeats, time_src)
553
567
 
554
568
  bench_cfg.all_vars = bench_cfg.input_vars + bench_cfg.meta_vars
555
-
556
569
  # bench_cfg.all_vars = bench_cfg.iv_time + bench_cfg.input_vars +[ bench_cfg.iv_repeat]
557
-
558
570
  # bench_cfg.all_vars = [ bench_cfg.iv_repeat] +bench_cfg.input_vars + bench_cfg.iv_time
559
571
 
560
572
  for i in bench_cfg.all_vars:
561
- logging.info(i.sampling_str(bench_cfg.debug))
573
+ logging.info(i.sampling_str())
562
574
 
563
575
  dims_cfg = DimsCfg(bench_cfg)
564
576
  function_inputs = list(
@@ -574,7 +586,9 @@ class Bench(BenchPlotServer):
574
586
  if isinstance(rv, ResultReference):
575
587
  result_data = np.full(dims_cfg.dims_size, -1, dtype=int)
576
588
  data_vars[rv.name] = (dims_cfg.dims_name, result_data)
577
- if isinstance(rv, (ResultVideo, ResultImage, ResultString, ResultContainer)):
589
+ if isinstance(
590
+ rv, (ResultPath, ResultVideo, ResultImage, ResultString, ResultContainer)
591
+ ):
578
592
  result_data = np.full(dims_cfg.dims_size, "NAN", dtype=object)
579
593
  data_vars[rv.name] = (dims_cfg.dims_name, result_data)
580
594
  elif type(rv) == ResultVec:
@@ -616,7 +630,6 @@ class Bench(BenchPlotServer):
616
630
  default=repeats,
617
631
  bounds=[1, repeats],
618
632
  samples=repeats,
619
- samples_debug=2 if repeats > 2 else 1,
620
633
  units="repeats",
621
634
  doc="The number of times a sample was measured",
622
635
  )
@@ -712,7 +725,15 @@ class Bench(BenchPlotServer):
712
725
  logging.info(f"{rv.name}: {result_value}")
713
726
 
714
727
  if isinstance(
715
- rv, (ResultVar, ResultVideo, ResultImage, ResultString, ResultContainer)
728
+ rv,
729
+ (
730
+ ResultVar,
731
+ ResultVideo,
732
+ ResultImage,
733
+ ResultString,
734
+ ResultContainer,
735
+ ResultPath,
736
+ ),
716
737
  ):
717
738
  set_xarray_multidim(bench_res.ds[rv.name], worker_job.index_tuple, result_value)
718
739
  elif isinstance(rv, ResultReference):
@@ -803,3 +824,7 @@ class Bench(BenchPlotServer):
803
824
 
804
825
  def get_result(self, index: int = -1) -> BenchResult:
805
826
  return self.results[index]
827
+
828
+ def publish(self, remote_callback: Callable) -> str:
829
+ branch_name = f"{self.bench_name}_{self.run_cfg.run_tag}"
830
+ return self.report.publish(remote_callback, branch_name=branch_name)
bencher/class_enum.py ADDED
@@ -0,0 +1,52 @@
1
+ from __future__ import annotations
2
+ from strenum import StrEnum
3
+ from typing import Any
4
+ import importlib
5
+ from abc import abstractmethod
6
+ from dataclasses import dataclass
7
+ from enum import auto
8
+
9
+
10
+ class ClassEnum(StrEnum):
11
+ """A ClassEnum is a pattern to make it easier to create factory a factory method that converts from an enum to a corresponding class. Subclasses should implement to_class(enum_instance:EnumType) which takes an enum returns the corresponding instance of that class."""
12
+
13
+ @classmethod
14
+ def to_class_generic(cls, module_import: str, class_name: str) -> Any:
15
+ """Create an instance of the class referred to by this enum
16
+
17
+ Returns:
18
+ Any: instance of the class
19
+ """
20
+
21
+ class_def = getattr(importlib.import_module(module_import), class_name)
22
+ return class_def()
23
+
24
+ @classmethod
25
+ @abstractmethod
26
+ def to_class(cls, enum_val: ClassEnum) -> Any:
27
+ """Subclasses should overrides this method to take an enum returns the corresponding instance of that class."""
28
+ raise NotImplementedError()
29
+
30
+
31
+ @dataclass
32
+ class BaseClass:
33
+ baseclassname: str = "class0"
34
+
35
+
36
+ @dataclass
37
+ class Class1(BaseClass):
38
+ classname: str = "class1"
39
+
40
+
41
+ @dataclass
42
+ class Class2(BaseClass):
43
+ classname: str = "class2"
44
+
45
+
46
+ class ExampleEnum(ClassEnum):
47
+ Class1 = auto()
48
+ Class2 = auto()
49
+
50
+ @classmethod
51
+ def to_class(cls, enum_val: ExampleEnum) -> BaseClass:
52
+ return cls.to_class_generic("bencher.class_enum", enum_val)
bencher/job.py CHANGED
@@ -80,7 +80,8 @@ class FutureCache:
80
80
  size_limit: int = int(20e9), # 20 GB
81
81
  use_cache=True,
82
82
  ):
83
- self.executor = Executors.factory(executor)
83
+ self.executor_type = executor
84
+ self.executor = None
84
85
  if use_cache:
85
86
  self.cache = Cache(f"cachedir/{cache_name}", tag_index=tag_index, size_limit=size_limit)
86
87
  logging.info(f"cache dir: {self.cache.directory}")
@@ -110,6 +111,9 @@ class FutureCache:
110
111
 
111
112
  self.worker_fn_call_count += 1
112
113
 
114
+ if self.executor_type is not Executors.SERIAL:
115
+ if self.executor is None:
116
+ self.executor = Executors.factory(self.executor_type)
113
117
  if self.executor is not None:
114
118
  self.overwrite_msg(job, " starting parallel job...")
115
119
  return JobFuture(
@@ -148,9 +152,7 @@ class FutureCache:
148
152
  self.cache.close()
149
153
  if self.executor:
150
154
  self.executor.shutdown()
151
-
152
- # def __del__(self):
153
- # self.close()
155
+ self.executor = None
154
156
 
155
157
  def stats(self) -> str:
156
158
  logging.info(f"job calls: {self.worker_wrapper_call_count}")
@@ -30,7 +30,7 @@ def optuna_grid_search(bench_cfg: BenchCfg) -> optuna.Study:
30
30
  """
31
31
  search_space = {}
32
32
  for iv in bench_cfg.all_vars:
33
- search_space[iv.name] = iv.values(bench_cfg.debug)
33
+ search_space[iv.name] = iv.values()
34
34
  directions = []
35
35
  for rv in bench_cfg.optuna_targets(True):
36
36
  directions.append(rv.direction)