holobench 1.34.0__tar.gz → 1.36.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. {holobench-1.34.0 → holobench-1.36.0}/PKG-INFO +1 -1
  2. {holobench-1.34.0 → holobench-1.36.0}/bencher/__init__.py +7 -3
  3. {holobench-1.34.0 → holobench-1.36.0}/bencher/bench_cfg.py +6 -6
  4. {holobench-1.34.0 → holobench-1.36.0}/bencher/bench_runner.py +7 -7
  5. {holobench-1.34.0 → holobench-1.36.0}/bencher/bencher.py +7 -4
  6. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_composable_container.py +1 -1
  7. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_composable_container2.py +2 -2
  8. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_holosweep.py +1 -1
  9. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_image.py +2 -2
  10. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_image1.py +1 -1
  11. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_sample_cache.py +4 -4
  12. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_sample_cache_context.py +1 -1
  13. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_video.py +2 -2
  14. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_hvplot_explorer.py +1 -1
  15. holobench-1.36.0/bencher/example/inputs_0D/example_0D.py +34 -0
  16. holobench-1.36.0/bencher/example/inputs_1D/example_1D.py +62 -0
  17. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/shelved/example_float3D_cone.py +1 -1
  18. {holobench-1.34.0 → holobench-1.36.0}/bencher/job.py +3 -3
  19. {holobench-1.34.0 → holobench-1.36.0}/bencher/plotting/plot_filter.py +16 -5
  20. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/bench_result.py +3 -1
  21. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/bench_result_base.py +45 -12
  22. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/holoview_result.py +21 -9
  23. holobench-1.36.0/bencher/results/hvplot_result.py +54 -0
  24. {holobench-1.34.0 → holobench-1.36.0}/bencher/variables/parametrised_sweep.py +6 -1
  25. {holobench-1.34.0 → holobench-1.36.0}/pyproject.toml +2 -2
  26. {holobench-1.34.0 → holobench-1.36.0}/.gitignore +0 -0
  27. {holobench-1.34.0 → holobench-1.36.0}/LICENSE +0 -0
  28. {holobench-1.34.0 → holobench-1.36.0}/README.md +0 -0
  29. {holobench-1.34.0 → holobench-1.36.0}/bencher/bench_plot_server.py +0 -0
  30. {holobench-1.34.0 → holobench-1.36.0}/bencher/bench_report.py +0 -0
  31. {holobench-1.34.0 → holobench-1.36.0}/bencher/caching.py +0 -0
  32. {holobench-1.34.0 → holobench-1.36.0}/bencher/class_enum.py +0 -0
  33. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/__init__.py +0 -0
  34. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/benchmark_data.py +0 -0
  35. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_all.py +0 -0
  36. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_categorical.py +0 -0
  37. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_consts.py +0 -0
  38. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_custom_sweep.py +0 -0
  39. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_custom_sweep2.py +0 -0
  40. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_dataframe.py +0 -0
  41. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_docs.py +0 -0
  42. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_filepath.py +0 -0
  43. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_float3D.py +0 -0
  44. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_float_cat.py +0 -0
  45. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_floats.py +0 -0
  46. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_floats2D.py +0 -0
  47. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_holosweep_objects.py +0 -0
  48. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_holosweep_tap.py +0 -0
  49. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_levels.py +0 -0
  50. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_levels2.py +0 -0
  51. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_pareto.py +0 -0
  52. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_publish.py +0 -0
  53. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_rerun.py +0 -0
  54. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_rerun2.py +0 -0
  55. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_simple.py +0 -0
  56. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_simple_bool.py +0 -0
  57. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_simple_cat.py +0 -0
  58. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_simple_float.py +0 -0
  59. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_simple_float2d.py +0 -0
  60. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_strings.py +0 -0
  61. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_time_event.py +0 -0
  62. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/example_workflow.py +0 -0
  63. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_bokeh_plotly.py +0 -0
  64. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_hover_ex.py +0 -0
  65. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_interactive.py +0 -0
  66. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_streamnd.py +0 -0
  67. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_streams.py +0 -0
  68. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_template.py +0 -0
  69. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_updates.py +0 -0
  70. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/experimental/example_vector.py +0 -0
  71. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/meta/example_meta.py +0 -0
  72. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/meta/example_meta_cat.py +0 -0
  73. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/meta/example_meta_float.py +0 -0
  74. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/meta/example_meta_levels.py +0 -0
  75. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/optuna/example_optuna.py +0 -0
  76. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/shelved/example_float2D_scatter.py +0 -0
  77. {holobench-1.34.0 → holobench-1.36.0}/bencher/example/shelved/example_kwargs.py +0 -0
  78. {holobench-1.34.0 → holobench-1.36.0}/bencher/flask_server.py +0 -0
  79. {holobench-1.34.0 → holobench-1.36.0}/bencher/optuna_conversions.py +0 -0
  80. {holobench-1.34.0 → holobench-1.36.0}/bencher/plotting/__init__.py +0 -0
  81. {holobench-1.34.0 → holobench-1.36.0}/bencher/plotting/plt_cnt_cfg.py +0 -0
  82. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/__init__.py +0 -0
  83. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/composable_container/__init__.py +0 -0
  84. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_base.py +0 -0
  85. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_dataframe.py +0 -0
  86. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_panel.py +0 -0
  87. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_video.py +0 -0
  88. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/dataset_result.py +0 -0
  89. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/float_formatter.py +0 -0
  90. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/optuna_result.py +0 -0
  91. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/panel_result.py +0 -0
  92. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/plotly_result.py +0 -0
  93. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/video_result.py +0 -0
  94. {holobench-1.34.0 → holobench-1.36.0}/bencher/results/video_summary.py +0 -0
  95. {holobench-1.34.0 → holobench-1.36.0}/bencher/utils.py +0 -0
  96. {holobench-1.34.0 → holobench-1.36.0}/bencher/utils_rerun.py +0 -0
  97. {holobench-1.34.0 → holobench-1.36.0}/bencher/variables/__init__.py +0 -0
  98. {holobench-1.34.0 → holobench-1.36.0}/bencher/variables/inputs.py +0 -0
  99. {holobench-1.34.0 → holobench-1.36.0}/bencher/variables/results.py +0 -0
  100. {holobench-1.34.0 → holobench-1.36.0}/bencher/variables/sweep_base.py +0 -0
  101. {holobench-1.34.0 → holobench-1.36.0}/bencher/variables/time.py +0 -0
  102. {holobench-1.34.0 → holobench-1.36.0}/bencher/video_writer.py +0 -0
  103. {holobench-1.34.0 → holobench-1.36.0}/bencher/worker_job.py +0 -0
  104. {holobench-1.34.0 → holobench-1.36.0}/resource/bencher +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: holobench
3
- Version: 1.34.0
3
+ Version: 1.36.0
4
4
  Summary: A package for benchmarking the performance of arbitrary functions
5
5
  Project-URL: Repository, https://github.com/dyson-ai/bencher
6
6
  Project-URL: Home, https://github.com/dyson-ai/bencher
@@ -56,7 +56,13 @@ from .utils import (
56
56
  publish_file,
57
57
  github_content,
58
58
  )
59
- from .utils_rerun import publish_and_view_rrd, rrd_to_pane, capture_rerun_window
59
+
60
+ try:
61
+ from .utils_rerun import publish_and_view_rrd, rrd_to_pane, capture_rerun_window
62
+ from .flask_server import run_flask_in_thread
63
+ except ModuleNotFoundError as e:
64
+ pass
65
+
60
66
 
61
67
  from .plotting.plot_filter import VarRange, PlotFilter
62
68
  from .variables.parametrised_sweep import ParametrizedSweep
@@ -68,5 +74,3 @@ from .bench_report import BenchReport, GithubPagesCfg
68
74
  from .job import Executors
69
75
  from .video_writer import VideoWriter, add_image
70
76
  from .class_enum import ClassEnum, ExampleEnum
71
-
72
- from .flask_server import run_flask_in_thread
@@ -83,16 +83,16 @@ class BenchRunCfg(BenchPlotSrvCfg):
83
83
 
84
84
  raise_duplicate_exception: bool = param.Boolean(False, doc=" Used to debug unique plot names.")
85
85
 
86
- use_cache: bool = param.Boolean(
86
+ cache_results: bool = param.Boolean(
87
87
  False,
88
- doc="This is a benchmark level cache that stores the results of a fully completed benchmark. At the end of a benchmark the values are added to the cache but are not if the benchmark does not complete. If you want to cache values during the benchmark you need to use the use_sample_cache option. Beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
88
+ doc="This is a benchmark level cache that stores the results of a fully completed benchmark. At the end of a benchmark the values are added to the cache but are not if the benchmark does not complete. If you want to cache values during the benchmark you need to use the cache_samples option. Beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
89
89
  )
90
90
 
91
91
  clear_cache: bool = param.Boolean(
92
92
  False, doc=" Clear the cache of saved input->output mappings."
93
93
  )
94
94
 
95
- use_sample_cache: bool = param.Boolean(
95
+ cache_samples: bool = param.Boolean(
96
96
  False,
97
97
  doc="If true, every time the benchmark function is called, bencher will check if that value has been calculated before and if so load the from the cache. Note that the sample level cache is different from the benchmark level cache which only caches the aggregate of all the results at the end of the benchmark. This cache lets you stop a benchmark halfway through and continue. However, beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
98
98
  )
@@ -182,7 +182,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
182
182
  parser.add_argument(
183
183
  "--use-cache",
184
184
  action="store_true",
185
- help=BenchRunCfg.param.use_cache.doc,
185
+ help=BenchRunCfg.param.cache_results.doc,
186
186
  )
187
187
 
188
188
  parser.add_argument(
@@ -380,8 +380,8 @@ class BenchCfg(BenchRunCfg):
380
380
  benchmark_sampling_str.append(f" run tag: {self.run_tag}")
381
381
  if self.level is not None:
382
382
  benchmark_sampling_str.append(f" bench level: {self.level}")
383
- benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
384
- benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
383
+ benchmark_sampling_str.append(f" cache_results: {self.cache_results}")
384
+ benchmark_sampling_str.append(f" cache_samples {self.cache_samples}")
385
385
  benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
386
386
  benchmark_sampling_str.append(f" executor: {self.executor}")
387
387
 
@@ -33,11 +33,11 @@ class BenchRunner:
33
33
 
34
34
  @staticmethod
35
35
  def setup_run_cfg(
36
- run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, use_cache=True
36
+ run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, cache_results=True
37
37
  ) -> BenchRunCfg:
38
38
  run_cfg_out = deepcopy(run_cfg)
39
- run_cfg_out.use_sample_cache = use_cache
40
- run_cfg_out.only_hash_tag = use_cache
39
+ run_cfg_out.cache_samples = cache_results
40
+ run_cfg_out.only_hash_tag = cache_results
41
41
  run_cfg_out.level = level
42
42
  return run_cfg_out
43
43
 
@@ -78,9 +78,9 @@ class BenchRunner:
78
78
  show: bool = False,
79
79
  save: bool = False,
80
80
  grouped: bool = True,
81
- use_cache: bool = True,
81
+ cache_results: bool = True,
82
82
  ) -> List[Bench]:
83
- """This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default use_cache=True so that previous values are reused.
83
+ """This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default cache_results=True so that previous values are reused.
84
84
 
85
85
  Args:
86
86
  min_level (int, optional): The minimum level to start sampling at. Defaults to 2.
@@ -93,14 +93,14 @@ class BenchRunner:
93
93
  show (bool, optional): show the results in the local web browser. Defaults to False.
94
94
  save (bool, optional): save the results to disk in index.html. Defaults to False.
95
95
  grouped (bool, optional): Produce a single html page with all the benchmarks included. Defaults to True.
96
- use_cache (bool, optional): Use the sample cache to reused previous results. Defaults to True.
96
+ cache_results (bool, optional): Use the sample cache to reused previous results. Defaults to True.
97
97
 
98
98
  Returns:
99
99
  List[BenchCfg]: A list of bencher instances
100
100
  """
101
101
  if run_cfg is None:
102
102
  run_cfg = deepcopy(self.run_cfg)
103
- run_cfg = BenchRunner.setup_run_cfg(run_cfg, use_cache=use_cache)
103
+ run_cfg = BenchRunner.setup_run_cfg(run_cfg, cache_results=cache_results)
104
104
 
105
105
  if level is not None:
106
106
  min_level = level
@@ -327,7 +327,7 @@ class Bench(BenchPlotServer):
327
327
  logging.info("Copy run cfg from bench class")
328
328
 
329
329
  if run_cfg.only_plot:
330
- run_cfg.use_cache = True
330
+ run_cfg.cache_results = True
331
331
 
332
332
  self.last_run_cfg = run_cfg
333
333
 
@@ -371,7 +371,7 @@ class Bench(BenchPlotServer):
371
371
  title += "s"
372
372
  title += ": " + ", ".join([f"{c[0].name}={c[1]}" for c in const_vars_in])
373
373
  else:
374
- raise RuntimeError("you must pass a title, or define inputs or consts")
374
+ title = "Recording: " + ", ".join([i.name for i in result_vars_in])
375
375
 
376
376
  if run_cfg.level > 0:
377
377
  inputs = []
@@ -448,7 +448,7 @@ class Bench(BenchPlotServer):
448
448
  if run_cfg.clear_cache:
449
449
  c.delete(bench_cfg_hash)
450
450
  logging.info("cleared cache")
451
- elif run_cfg.use_cache:
451
+ elif run_cfg.cache_results:
452
452
  logging.info(
453
453
  f"checking for previously calculated results with key: {bench_cfg_hash}"
454
454
  )
@@ -813,7 +813,7 @@ class Bench(BenchPlotServer):
813
813
  cache_name="sample_cache",
814
814
  tag_index=True,
815
815
  size_limit=self.cache_size,
816
- use_cache=run_cfg.use_sample_cache,
816
+ cache_results=run_cfg.cache_samples,
817
817
  )
818
818
 
819
819
  def clear_tag_from_sample_cache(self, tag: str, run_cfg):
@@ -871,6 +871,9 @@ class Bench(BenchPlotServer):
871
871
  def get_result(self, index: int = -1) -> BenchResult:
872
872
  return self.results[index]
873
873
 
874
+ def get_ds(self, index: int = -1) -> xr.Dataset:
875
+ return self.get_result(index).to_xarray()
876
+
874
877
  def publish(self, remote_callback: Callable) -> str:
875
878
  branch_name = f"{self.bench_name}_{self.run_cfg.run_tag}"
876
879
  return self.report.publish(remote_callback, branch_name=branch_name)
@@ -98,7 +98,7 @@ def example_composable_container_video(
98
98
 
99
99
  if __name__ == "__main__":
100
100
  ex_run_cfg = bch.BenchRunCfg()
101
- ex_run_cfg.use_sample_cache = False
101
+ ex_run_cfg.cache_samples = False
102
102
  # ex_run_cfg.level = 2
103
103
  ex_report = bch.BenchReport()
104
104
  example_composable_container_image(ex_run_cfg, report=ex_report)
@@ -144,7 +144,7 @@ def example_composable_container_image(
144
144
 
145
145
  # if __name__ == "__main__":
146
146
  # ex_run_cfg = bch.BenchRunCfg()
147
- # ex_run_cfg.use_sample_cache = False
147
+ # ex_run_cfg.cache_samples = False
148
148
  # # ex_run_cfg.level = 2
149
149
  # ex_report = bch.BenchReport()
150
150
  # example_composable_container_image(ex_run_cfg, report=ex_report)
@@ -157,4 +157,4 @@ if __name__ == "__main__":
157
157
  # bench_runner.add_run(bench_image)
158
158
  bench_runner.add_run(example_composable_container_image)
159
159
 
160
- bench_runner.run(level=6, show=True, use_cache=False)
160
+ bench_runner.run(level=6, show=True, cache_results=False)
@@ -95,4 +95,4 @@ if __name__ == "__main__":
95
95
  PlotFunctions().to_gui()
96
96
  bench_run = bch.BenchRunner("bench_runner_test")
97
97
  bench_run.add_run(example_holosweep)
98
- bench_run.run(level=6, show=True, use_cache=False)
98
+ bench_run.run(level=6, show=True, cache_results=False)
@@ -58,7 +58,7 @@ class BenchPolygons(bch.ParametrizedSweep):
58
58
  def example_image(
59
59
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
60
60
  ) -> bch.Bench:
61
- run_cfg.use_cache = False
61
+ run_cfg.cache_results = False
62
62
  bench = bch.Bench("polygons", BenchPolygons(), run_cfg=run_cfg, report=report)
63
63
 
64
64
  bench.result_vars = ["polygon", "area"]
@@ -142,7 +142,7 @@ if __name__ == "__main__":
142
142
  # def example_image_pairs()
143
143
 
144
144
  ex_run_cfg = bch.BenchRunCfg()
145
- ex_run_cfg.use_sample_cache = True
145
+ ex_run_cfg.cache_samples = True
146
146
  # ex_run_cfg.debug = True
147
147
  # ex_run_cfg.repeats = 2
148
148
  ex_run_cfg.level = 4
@@ -73,7 +73,7 @@ def example_image_vid_sequential1(
73
73
 
74
74
  if __name__ == "__main__":
75
75
  ex_run_cfg = bch.BenchRunCfg()
76
- ex_run_cfg.use_sample_cache = True
76
+ ex_run_cfg.cache_samples = True
77
77
  ex_run_cfg.overwrite_sample_cache = True
78
78
  ex_run_cfg.level = 3
79
79
 
@@ -2,7 +2,7 @@ import bencher as bch
2
2
 
3
3
 
4
4
  class UnreliableClass(bch.ParametrizedSweep):
5
- """This class helps demonstrate benchmarking a function that sometimes crashes during sampling. By using BenchRunCfg.use_sample_cache you can store the results of every call to the benchmark function so data is not lost in the event of a crash. However, because cache invalidation is hard (https://martinfowler.com/bliki/TwoHardThings.html) you need to be mindful of how you could get bad results due to incorrect cache data. For example if you change your benchmark function and use the sample cache you will not get correct values; you will need to use BenchRunCfg.clear_sample_cache to purge any out of date results."""
5
+ """This class helps demonstrate benchmarking a function that sometimes crashes during sampling. By using BenchRunCfg.cache_samples you can store the results of every call to the benchmark function so data is not lost in the event of a crash. However, because cache invalidation is hard (https://martinfowler.com/bliki/TwoHardThings.html) you need to be mindful of how you could get bad results due to incorrect cache data. For example if you change your benchmark function and use the sample cache you will not get correct values; you will need to use BenchRunCfg.clear_sample_cache to purge any out of date results."""
6
6
 
7
7
  input_val = bch.IntSweep(
8
8
  default=0,
@@ -31,7 +31,7 @@ def example_sample_cache(
31
31
  report: bch.BenchReport = bch.BenchReport(),
32
32
  trigger_crash: bool = False,
33
33
  ) -> bch.Bench:
34
- """This example shows how to use the use_sample_cache option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run
34
+ """This example shows how to use the cache_samples option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run
35
35
 
36
36
  Args:
37
37
  run_cfg (BenchRunCfg): configuration of how to perform the param sweep
@@ -50,7 +50,7 @@ def example_sample_cache(
50
50
  title="Example Crashy Function with the sample_cache",
51
51
  input_vars=[UnreliableClass.param.input_val],
52
52
  result_vars=[UnreliableClass.param.return_value, UnreliableClass.param.trigger_crash],
53
- description="""This example shows how to use the use_sample_cache option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run""",
53
+ description="""This example shows how to use the cache_samples option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run""",
54
54
  run_cfg=run_cfg,
55
55
  post_description="The input_val vs return value graph is a straight line as expected and there is no record of the fact the benchmark crashed halfway through. The second graph shows that for values >1 the trigger_crash value had to be 0 in order to proceed",
56
56
  )
@@ -63,7 +63,7 @@ if __name__ == "__main__":
63
63
  ex_run_cfg.executor = bch.Executors.SCOOP
64
64
 
65
65
  # this will store the result of of every call to crashy_fn
66
- ex_run_cfg.use_sample_cache = True
66
+ ex_run_cfg.cache_samples = True
67
67
  ex_run_cfg.clear_sample_cache = True
68
68
 
69
69
  try:
@@ -51,7 +51,7 @@ def assert_call_counts(bencher, run_cfg, wrapper_calls=-1, fn_calls=-1, cache_ca
51
51
 
52
52
  def example_cache_context() -> bch.Bench:
53
53
  run_cfg = bch.BenchRunCfg()
54
- run_cfg.use_sample_cache = True
54
+ run_cfg.cache_samples = True
55
55
  run_cfg.only_hash_tag = True
56
56
  run_cfg.repeats = 2
57
57
  run_cfg.parallel = False
@@ -79,7 +79,7 @@ def example_video(
79
79
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
80
80
  ) -> bch.Bench:
81
81
  # run_cfg.auto_plot = False
82
- # run_cfg.use_sample_cache = True
82
+ # run_cfg.cache_samples = True
83
83
  bench = bch.Bench("example_video", TuringPattern(), run_cfg=run_cfg, report=report)
84
84
 
85
85
  bench.plot_sweep(
@@ -111,7 +111,7 @@ def example_video_tap(
111
111
  if __name__ == "__main__":
112
112
  run_cfg_ex = bch.BenchRunCfg()
113
113
  run_cfg_ex.level = 2
114
- run_cfg_ex.use_sample_cache = True
114
+ run_cfg_ex.cache_samples = True
115
115
  run_cfg_ex.only_hash_tag = True
116
116
 
117
117
  # example_video(run_cfg_ex).report.show()
@@ -30,7 +30,7 @@ if __name__ == "__main__":
30
30
  post_description="Here you can see the output plot of sin theta between 0 and pi. In the tabs at the top you can also view 3 tabular representations of the data",
31
31
  run_cfg=bch.BenchRunCfg(
32
32
  auto_plot=True,
33
- use_cache=False,
33
+ cache_results=False,
34
34
  repeats=2,
35
35
  ),
36
36
  )
@@ -0,0 +1,34 @@
1
+ """This file has some examples for how to perform basic benchmarking parameter sweeps"""
2
+
3
+ import bencher as bch
4
+ import random
5
+
6
+
7
+ class SimpleFloat0D(bch.ParametrizedSweep):
8
+ """This class has 0 input dimensions and 1 output dimensions. It samples from a gaussian distribution"""
9
+
10
+ # This defines a variable that we want to plot
11
+ output = bch.ResultVar(units="ul", doc="a sample from a gaussian distribution")
12
+
13
+ def __call__(self, **kwargs) -> dict:
14
+ """Generate a sample from a uniform distribution
15
+
16
+ Returns:
17
+ dict: a dictionary with all the result variables in the ParametrisedSweep class as named key value pairs.
18
+ """
19
+
20
+ self.output = random.gauss(mu=0.0, sigma=1.0)
21
+ return super().__call__(**kwargs)
22
+
23
+
24
+ def example_0D(run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None) -> bch.Bench:
25
+ """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
26
+
27
+ bench = SimpleFloat0D().to_bench(run_cfg, report)
28
+ bench.plot_sweep()
29
+ return bench
30
+
31
+
32
+ if __name__ == "__main__":
33
+ run_config = bch.BenchRunCfg(repeats=100)
34
+ example_0D(run_config).report.show()
@@ -0,0 +1,62 @@
1
+ """This file has some examples for how to perform basic benchmarking parameter sweeps"""
2
+
3
+ import bencher as bch
4
+
5
+
6
+ class DataSource:
7
+ def __init__(self):
8
+ self.data = [
9
+ [0, 0, 0, 0],
10
+ [1, 1, 1, 1],
11
+ [1, 1, 1, 1],
12
+ [2, 1, 1, 0],
13
+ [2, 2, 0, 0],
14
+ [2, 2, 1, 1],
15
+ ]
16
+
17
+ def call(self, index, repeat):
18
+ return self.data[index][repeat - 1]
19
+
20
+
21
+ class Example1D(bch.ParametrizedSweep):
22
+ index = bch.IntSweep(default=0, bounds=[0, 5], doc="Input angle", units="rad", samples=30)
23
+ output = bch.ResultVar(units="v", doc="sin of theta")
24
+
25
+ def __call__(self, **kwargs):
26
+ self.update_params_from_kwargs(**kwargs)
27
+ self.output = DataSource().call(self.index, kwargs["repeat"])
28
+ return super().__call__(**kwargs)
29
+
30
+
31
+ def example_1D_float_repeats(
32
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
33
+ ) -> bch.Bench:
34
+ """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
35
+
36
+ bench = Example1D().to_bench(run_cfg, report)
37
+ # bench.plot_sweep(pass_repeat=True,plot_callbacks=False)
38
+
39
+ # res = bench.get_result()
40
+ bench.run_cfg = bch.BenchRunCfg(repeats=4)
41
+ # bench.plot_sweep(pass_repeat=True, plot_callbacks=False)
42
+ bench.plot_sweep(pass_repeat=True)
43
+
44
+ res = bench.get_result()
45
+ bench.report.append(res.to_curve())
46
+ # bench.report.append(hv.Table(res.to_hv_dataset(bch.ReduceType.MINMAX)))
47
+ # bench.report.append(res.to_curve() + res.to_scatter_jitter(override=True))
48
+ # bench.report.append(res.to_line())
49
+ bench.report.append(res.to_scatter_jitter(override=True))
50
+ # bench.report.append(res.to_error_bar())
51
+ # bench.report.append(res.to_explorer())
52
+ # bench.report.append(res.to_error_bar()
53
+
54
+ # bench.report.append(res.to_dataset())
55
+ # bench.report.append(res.to_xarray().hvplot.plot(kind="andrews_curves"))
56
+ # print(res.to_xarray())
57
+ # bench.report.append()
58
+ return bench
59
+
60
+
61
+ if __name__ == "__main__":
62
+ example_1D_float_repeats().report.show()
@@ -92,5 +92,5 @@
92
92
 
93
93
  # if __name__ == "__main__":
94
94
  # ex_run_cfg = bch.BenchRunCfg()
95
- # ex_run_cfg.use_cache = True
95
+ # ex_run_cfg.cache_results = True
96
96
  # example_cone(ex_run_cfg).report.show()
@@ -59,7 +59,7 @@ class Executors(StrEnum):
59
59
  # THREADS=auto() #not that useful as most bench code is cpu bound
60
60
 
61
61
  @staticmethod
62
- def factory(provider: Executors) -> Future():
62
+ def factory(provider: Executors) -> Future:
63
63
  providers = {
64
64
  Executors.SERIAL: None,
65
65
  Executors.MULTIPROCESSING: ProcessPoolExecutor(),
@@ -78,11 +78,11 @@ class FutureCache:
78
78
  cache_name: str = "fcache",
79
79
  tag_index: bool = True,
80
80
  size_limit: int = int(20e9), # 20 GB
81
- use_cache=True,
81
+ cache_results=True,
82
82
  ):
83
83
  self.executor_type = executor
84
84
  self.executor = None
85
- if use_cache:
85
+ if cache_results:
86
86
  self.cache = Cache(f"cachedir/{cache_name}", tag_index=tag_index, size_limit=size_limit)
87
87
  logging.info(f"cache dir: {self.cache.directory}")
88
88
  else:
@@ -64,16 +64,24 @@ class PlotFilter:
64
64
  repeats_range: VarRange = VarRange(1, None)
65
65
  input_range: VarRange = VarRange(1, None)
66
66
 
67
- def matches_result(self, plt_cnt_cfg: PltCntCfg, plot_name: str) -> PlotMatchesResult:
67
+ def matches_result(
68
+ self, plt_cnt_cfg: PltCntCfg, plot_name: str, override: bool = False
69
+ ) -> PlotMatchesResult:
68
70
  """Checks if the result data signature matches the type of data the plot is able to display."""
69
- return PlotMatchesResult(self, plt_cnt_cfg, plot_name)
71
+ return PlotMatchesResult(self, plt_cnt_cfg, plot_name, override)
70
72
 
71
73
 
72
74
  # @dataclass
73
75
  class PlotMatchesResult:
74
76
  """Stores information about which properties match the requirements of a particular plotter"""
75
77
 
76
- def __init__(self, plot_filter: PlotFilter, plt_cnt_cfg: PltCntCfg, plot_name: str):
78
+ def __init__(
79
+ self,
80
+ plot_filter: PlotFilter,
81
+ plt_cnt_cfg: PltCntCfg,
82
+ plot_name: str,
83
+ override: bool = False,
84
+ ):
77
85
  match_info = []
78
86
  matches = []
79
87
 
@@ -92,8 +100,11 @@ class PlotMatchesResult:
92
100
  matches.append(match)
93
101
  if not match:
94
102
  match_info.append(info)
95
-
96
- self.overall = all(matches)
103
+ if override:
104
+ match_info.append(f"override: {override}")
105
+ self.overall = True
106
+ else:
107
+ self.overall = all(matches)
97
108
 
98
109
  match_info.insert(0, f"plot {plot_name} matches: {self.overall}")
99
110
  self.matches_info = "\n".join(match_info).strip()
@@ -7,11 +7,12 @@ from bencher.results.video_summary import VideoSummaryResult
7
7
  from bencher.results.panel_result import PanelResult
8
8
  from bencher.results.plotly_result import PlotlyResult
9
9
  from bencher.results.holoview_result import HoloviewResult
10
+ from bencher.results.hvplot_result import HvplotResult
10
11
  from bencher.results.dataset_result import DataSetResult
11
12
  from bencher.utils import listify
12
13
 
13
14
 
14
- class BenchResult(PlotlyResult, HoloviewResult, VideoSummaryResult, DataSetResult):
15
+ class BenchResult(PlotlyResult, HoloviewResult, HvplotResult, VideoSummaryResult, DataSetResult): # noqa pylint: disable=too-many-ancestors
15
16
  """Contains the results of the benchmark and has methods to cast the results to various datatypes and graphical representations"""
16
17
 
17
18
  def __init__(self, bench_cfg) -> None:
@@ -28,6 +29,7 @@ class BenchResult(PlotlyResult, HoloviewResult, VideoSummaryResult, DataSetResul
28
29
  HoloviewResult.to_curve,
29
30
  HoloviewResult.to_line,
30
31
  HoloviewResult.to_heatmap,
32
+ HvplotResult.to_histogram,
31
33
  PlotlyResult.to_volume,
32
34
  # PanelResult.to_video,
33
35
  PanelResult.to_panes,
@@ -21,7 +21,9 @@ from bencher.utils import listify
21
21
 
22
22
  from bencher.variables.results import ResultReference, ResultDataSet
23
23
 
24
- from bencher.results.composable_container.composable_container_panel import ComposableContainerPanel
24
+ from bencher.results.composable_container.composable_container_panel import (
25
+ ComposableContainerPanel,
26
+ )
25
27
 
26
28
  # todo add plugins
27
29
  # https://gist.github.com/dorneanu/cce1cd6711969d581873a88e0257e312
@@ -31,7 +33,8 @@ from bencher.results.composable_container.composable_container_panel import Comp
31
33
  class ReduceType(Enum):
32
34
  AUTO = auto() # automatically determine the best way to reduce the dataset
33
35
  SQUEEZE = auto() # remove any dimensions of length 1
34
- REDUCE = auto() # get the mean and std dev of the the "repeat" dimension
36
+ REDUCE = auto() # get the mean and std dev of the data along the "repeat" dimension
37
+ MINMAX = auto() # get the minimum and maximum of data along the "repeat" dimension
35
38
  NONE = auto() # don't reduce
36
39
 
37
40
 
@@ -55,7 +58,10 @@ class BenchResultBase(OptunaResult):
55
58
  return self.ds.count()
56
59
 
57
60
  def to_hv_dataset(
58
- self, reduce: ReduceType = ReduceType.AUTO, result_var: ResultVar = None, level: int = None
61
+ self,
62
+ reduce: ReduceType = ReduceType.AUTO,
63
+ result_var: ResultVar = None,
64
+ level: int = None,
59
65
  ) -> hv.Dataset:
60
66
  """Generate a holoviews dataset from the xarray dataset.
61
67
 
@@ -72,7 +78,10 @@ class BenchResultBase(OptunaResult):
72
78
  return hv.Dataset(self.to_dataset(reduce, result_var, level))
73
79
 
74
80
  def to_dataset(
75
- self, reduce: ReduceType = ReduceType.AUTO, result_var: ResultVar = None, level: int = None
81
+ self,
82
+ reduce: ReduceType = ReduceType.AUTO,
83
+ result_var: ResultVar = None,
84
+ level: int = None,
76
85
  ) -> xr.Dataset:
77
86
  """Generate a summarised xarray dataset.
78
87
 
@@ -85,16 +94,35 @@ class BenchResultBase(OptunaResult):
85
94
  if reduce == ReduceType.AUTO:
86
95
  reduce = ReduceType.REDUCE if self.bench_cfg.repeats > 1 else ReduceType.SQUEEZE
87
96
 
88
- ds_out = self.ds if result_var is None else self.ds[result_var.name]
97
+ ds_out = self.ds.copy()
98
+
99
+ if result_var is not None:
100
+ ds_out = ds_out[result_var.name]
101
+
102
+ def rename_ds(dataset: xr.Dataset, suffix: str):
103
+ # var_name =
104
+ rename_dict = {var: f"{var}_{suffix}" for var in dataset.data_vars}
105
+ ds = dataset.rename_vars(rename_dict)
106
+ return ds
89
107
 
90
108
  match reduce:
91
109
  case ReduceType.REDUCE:
92
110
  ds_reduce_mean = ds_out.mean(dim="repeat", keep_attrs=True)
93
- ds_reduce_std = ds_out.std(dim="repeat", keep_attrs=True)
94
-
95
- for v in ds_reduce_mean.data_vars:
96
- ds_reduce_mean[f"{v}_std"] = ds_reduce_std[v]
97
- ds_out = ds_reduce_mean
111
+ ds_reduce_std = ds_out.std(dim="repeat", keep_attrs=False)
112
+ ds_reduce_std = rename_ds(ds_reduce_std, "std")
113
+ ds_out = xr.merge([ds_reduce_mean, ds_reduce_std])
114
+ ds_out = xr.merge(
115
+ [
116
+ ds_reduce_mean,
117
+ ds_reduce_std,
118
+ ]
119
+ )
120
+ case ReduceType.MINMAX: # TODO, need to pass mean, center of minmax, and minmax
121
+ ds_reduce_mean = ds_out.mean(dim="repeat", keep_attrs=True)
122
+ ds_reduce_min = ds_out.min(dim="repeat")
123
+ ds_reduce_max = ds_out.max(dim="repeat")
124
+ ds_reduce_range = rename_ds(ds_reduce_max - ds_reduce_min, "range")
125
+ ds_out = xr.merge([ds_reduce_mean, ds_reduce_range])
98
126
  case ReduceType.SQUEEZE:
99
127
  ds_out = ds_out.squeeze(drop=True)
100
128
  if level is not None:
@@ -331,6 +359,7 @@ class BenchResultBase(OptunaResult):
331
359
  result_var: ResultVar = None,
332
360
  result_types=None,
333
361
  pane_collection: pn.pane = None,
362
+ override=False,
334
363
  **kwargs,
335
364
  ):
336
365
  plot_filter = PlotFilter(
@@ -342,7 +371,9 @@ class BenchResultBase(OptunaResult):
342
371
  repeats_range=repeats_range,
343
372
  input_range=input_range,
344
373
  )
345
- matches_res = plot_filter.matches_result(self.plt_cnt_cfg, callable_name(plot_callback))
374
+ matches_res = plot_filter.matches_result(
375
+ self.plt_cnt_cfg, callable_name(plot_callback), override
376
+ )
346
377
  if matches_res.overall:
347
378
  return self.map_plot_panes(
348
379
  plot_callback=plot_callback,
@@ -400,7 +431,9 @@ class BenchResultBase(OptunaResult):
400
431
  dim_color = color_tuple_to_css(int_to_col(num_dims - 2, 0.05, 1.0))
401
432
 
402
433
  outer_container = ComposableContainerPanel(
403
- name=" vs ".join(dims), background_col=dim_color, horizontal=not horizontal
434
+ name=" vs ".join(dims),
435
+ background_col=dim_color,
436
+ horizontal=not horizontal,
404
437
  )
405
438
  max_len = 0
406
439
  for i in range(dataset.sizes[selected_dim]):
@@ -6,6 +6,7 @@ import holoviews as hv
6
6
  from param import Parameter
7
7
  from functools import partial
8
8
  import hvplot.xarray # noqa pylint: disable=duplicate-code,unused-import
9
+ import hvplot.pandas # noqa pylint: disable=duplicate-code,unused-import
9
10
  import xarray as xr
10
11
 
11
12
  from bencher.utils import (
@@ -86,7 +87,9 @@ class HoloviewResult(PanelResult):
86
87
 
87
88
  # return time_widget_args
88
89
 
89
- def to_bar(self, result_var: Parameter = None, **kwargs) -> Optional[pn.panel]:
90
+ def to_bar(
91
+ self, result_var: Parameter = None, override: bool = False, **kwargs
92
+ ) -> Optional[pn.panel]:
90
93
  return self.filter(
91
94
  self.to_bar_ds,
92
95
  float_range=VarRange(0, 0),
@@ -97,6 +100,7 @@ class HoloviewResult(PanelResult):
97
100
  target_dimension=2,
98
101
  result_var=result_var,
99
102
  result_types=(ResultVar),
103
+ override=override,
100
104
  **kwargs,
101
105
  )
102
106
 
@@ -186,6 +190,7 @@ class HoloviewResult(PanelResult):
186
190
  cat_range=VarRange(0, None),
187
191
  repeats_range=VarRange(2, None),
188
192
  reduce=ReduceType.REDUCE,
193
+ # reduce=ReduceType.MINMAX,
189
194
  target_dimension=2,
190
195
  result_var=result_var,
191
196
  result_types=(ResultVar),
@@ -196,7 +201,6 @@ class HoloviewResult(PanelResult):
196
201
  self, dataset: xr.Dataset, result_var: Parameter, **kwargs
197
202
  ) -> Optional[hv.Curve]:
198
203
  hvds = hv.Dataset(dataset)
199
- # result_var = self.get_results_var_list(result_var)[0]
200
204
  title = self.title_from_ds(dataset, result_var, **kwargs)
201
205
  pt = hvds.to(hv.Curve).opts(title=title, **kwargs)
202
206
  pt *= hvds.to(hv.Spread).opts(alpha=0.2)
@@ -541,17 +545,22 @@ class HoloviewResult(PanelResult):
541
545
  def to_scatter_jitter(
542
546
  self,
543
547
  result_var: Parameter = None,
548
+ override: bool = False,
544
549
  **kwargs, # pylint: disable=unused-argument
545
550
  ) -> List[hv.Scatter]:
546
- return self.overlay_plots(partial(self.to_scatter_jitter_single, **kwargs))
551
+ return self.overlay_plots(
552
+ partial(self.to_scatter_jitter_single, override=override, **kwargs)
553
+ )
547
554
 
548
- def to_scatter_jitter_single(self, result_var: Parameter, **kwargs) -> Optional[hv.Scatter]:
555
+ def to_scatter_jitter_single(
556
+ self, result_var: Parameter, override: bool = True, **kwargs
557
+ ) -> Optional[hv.Scatter]:
549
558
  matches = PlotFilter(
550
559
  float_range=VarRange(0, 0),
551
560
  cat_range=VarRange(0, None),
552
561
  repeats_range=VarRange(2, None),
553
562
  input_range=VarRange(1, None),
554
- ).matches_result(self.plt_cnt_cfg, "to_scatter_jitter")
563
+ ).matches_result(self.plt_cnt_cfg, "to_scatter_jitter", override)
555
564
  if matches.overall:
556
565
  ds = self.to_hv_dataset(ReduceType.NONE)
557
566
  pt = (
@@ -640,9 +649,6 @@ class HoloviewResult(PanelResult):
640
649
 
641
650
  return hv.DynamicMap(cb, kdims=kdims)
642
651
 
643
- def to_explorer(self):
644
- return self.to_xarray().hvplot.explorer()
645
-
646
652
  def to_grid(self, inputs=None):
647
653
  if inputs is None:
648
654
  inputs = self.bench_cfg.inputs_as_str()
@@ -653,7 +659,13 @@ class HoloviewResult(PanelResult):
653
659
  def to_table(self):
654
660
  return self.to(hv.Table, ReduceType.SQUEEZE)
655
661
 
656
- def to_surface(self, result_var: Parameter = None, **kwargs) -> Optional[pn.panel]:
662
+ def to_tabulator(self, **kwargs):
663
+ """Passes the data to the panel Tabulator type to display an interactive table
664
+ see https://panel.holoviz.org/reference/widgets/Tabulator.html for extra options
665
+ """
666
+ return pn.widgets.Tabulator(self.to_pandas(), **kwargs)
667
+
668
+ def to_surface(self, result_var: Parameter = None, **kwargs) -> Optional[pn.pane.Pane]:
657
669
  return self.filter(
658
670
  self.to_surface_ds,
659
671
  float_range=VarRange(2, None),
@@ -0,0 +1,54 @@
1
+ from __future__ import annotations
2
+ from typing import Optional
3
+ import panel as pn
4
+ from param import Parameter
5
+ import hvplot.xarray # noqa pylint: disable=duplicate-code,unused-import
6
+ import hvplot.pandas # noqa pylint: disable=duplicate-code,unused-import
7
+ import xarray as xr
8
+
9
+ from bencher.results.panel_result import PanelResult
10
+ from bencher.results.bench_result_base import ReduceType
11
+
12
+ from bencher.plotting.plot_filter import VarRange
13
+ from bencher.variables.results import ResultVar
14
+
15
+
16
+ class HvplotResult(PanelResult):
17
+ def to_explorer(self) -> pn.pane.Pane:
18
+ """Produces a hvplot explorer instance to explore the generated dataset
19
+ see: https://hvplot.holoviz.org/getting_started/explorer.html
20
+
21
+ Returns:
22
+ pn.pane.Pane: A dynamic pane for exploring a dataset
23
+ """
24
+
25
+ if len(self.bench_cfg.input_vars) > 0:
26
+ return self.to_xarray().hvplot.explorer()
27
+
28
+ # For some reason hvplot doesn't like 1D datasets in xarray, so convert to pandas which it has no problem with
29
+ # TODO look into why this is, its probably due to how I am setting up the indexing in xarray.
30
+ return self.to_pandas().hvplot.explorer()
31
+
32
+ def to_histogram(self, result_var: Parameter = None, **kwargs) -> Optional[pn.pane.Pane]:
33
+ return self.filter(
34
+ self.to_histogram_ds,
35
+ float_range=VarRange(0, 0),
36
+ cat_range=VarRange(0, None),
37
+ input_range=VarRange(0, 0),
38
+ reduce=ReduceType.NONE,
39
+ target_dimension=2,
40
+ result_var=result_var,
41
+ result_types=(ResultVar),
42
+ **kwargs,
43
+ )
44
+
45
+ def to_histogram_ds(self, dataset: xr.Dataset, result_var: Parameter, **kwargs):
46
+ return dataset.hvplot(
47
+ kind="hist",
48
+ y=[result_var.name],
49
+ ylabel="count",
50
+ legend="bottom_right",
51
+ widget_location="bottom",
52
+ title=f"{result_var.name} vs Count",
53
+ **kwargs,
54
+ )
@@ -192,7 +192,12 @@ class ParametrizedSweep(Parameterized):
192
192
  )
193
193
  )
194
194
 
195
- def __call__(self, **kwargs):
195
+ def __call__(self, **kwargs) -> dict:
196
+ """This is the function that is called to record data samples in the benchmarking function. It should be overridden with your custom logic and then call the parent method "return super().__call__(**kwargs)"
197
+
198
+ Returns:
199
+ dict: a dictionary with all the result variables in the ParametrisedSweep class as named key value pairs.
200
+ """
196
201
  return self.get_results_values_as_dict()
197
202
 
198
203
  def plot_hmap(self, **kwargs):
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "holobench"
3
- version = "1.34.0"
3
+ version = "1.36.0"
4
4
 
5
5
  authors = [{ name = "Austin Gregg-Smith", email = "blooop@gmail.com" }]
6
6
  description = "A package for benchmarking the performance of arbitrary functions"
@@ -97,7 +97,7 @@ coverage-report = "coverage report -m"
97
97
  update-lock = "pixi update && git commit -a -m'update pixi.lock' || true"
98
98
  push = "git push"
99
99
  update-lock-push = { depends-on = ["update-lock", "push"] }
100
- fix = { depends-on = ["update-lock", "format", "ruff-lint"] }
100
+ fix = { depends-on = ["update-lock", "format", "ruff-lint", "pre-commit"] }
101
101
  fix-commit-push = { depends-on = ["fix", "commit-format", "update-lock-push"] }
102
102
  ci-no-cover = { depends-on = ["style", "test"] }
103
103
  ci = { depends-on = [
File without changes
File without changes
File without changes
File without changes
File without changes