holobench 1.35.0__tar.gz → 1.36.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. {holobench-1.35.0 → holobench-1.36.0}/PKG-INFO +1 -1
  2. {holobench-1.35.0 → holobench-1.36.0}/bencher/bench_cfg.py +6 -6
  3. {holobench-1.35.0 → holobench-1.36.0}/bencher/bench_runner.py +7 -7
  4. {holobench-1.35.0 → holobench-1.36.0}/bencher/bencher.py +4 -4
  5. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_composable_container.py +1 -1
  6. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_composable_container2.py +2 -2
  7. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_holosweep.py +1 -1
  8. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_image.py +2 -2
  9. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_image1.py +1 -1
  10. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_sample_cache.py +4 -4
  11. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_sample_cache_context.py +1 -1
  12. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_video.py +2 -2
  13. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_hvplot_explorer.py +1 -1
  14. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/inputs_1D/example_1D.py +11 -2
  15. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/shelved/example_float3D_cone.py +1 -1
  16. {holobench-1.35.0 → holobench-1.36.0}/bencher/job.py +3 -3
  17. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/bench_result_base.py +27 -7
  18. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/holoview_result.py +1 -1
  19. {holobench-1.35.0 → holobench-1.36.0}/pyproject.toml +1 -1
  20. {holobench-1.35.0 → holobench-1.36.0}/.gitignore +0 -0
  21. {holobench-1.35.0 → holobench-1.36.0}/LICENSE +0 -0
  22. {holobench-1.35.0 → holobench-1.36.0}/README.md +0 -0
  23. {holobench-1.35.0 → holobench-1.36.0}/bencher/__init__.py +0 -0
  24. {holobench-1.35.0 → holobench-1.36.0}/bencher/bench_plot_server.py +0 -0
  25. {holobench-1.35.0 → holobench-1.36.0}/bencher/bench_report.py +0 -0
  26. {holobench-1.35.0 → holobench-1.36.0}/bencher/caching.py +0 -0
  27. {holobench-1.35.0 → holobench-1.36.0}/bencher/class_enum.py +0 -0
  28. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/__init__.py +0 -0
  29. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/benchmark_data.py +0 -0
  30. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_all.py +0 -0
  31. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_categorical.py +0 -0
  32. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_consts.py +0 -0
  33. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_custom_sweep.py +0 -0
  34. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_custom_sweep2.py +0 -0
  35. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_dataframe.py +0 -0
  36. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_docs.py +0 -0
  37. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_filepath.py +0 -0
  38. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_float3D.py +0 -0
  39. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_float_cat.py +0 -0
  40. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_floats.py +0 -0
  41. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_floats2D.py +0 -0
  42. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_holosweep_objects.py +0 -0
  43. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_holosweep_tap.py +0 -0
  44. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_levels.py +0 -0
  45. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_levels2.py +0 -0
  46. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_pareto.py +0 -0
  47. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_publish.py +0 -0
  48. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_rerun.py +0 -0
  49. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_rerun2.py +0 -0
  50. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_simple.py +0 -0
  51. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_simple_bool.py +0 -0
  52. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_simple_cat.py +0 -0
  53. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_simple_float.py +0 -0
  54. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_simple_float2d.py +0 -0
  55. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_strings.py +0 -0
  56. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_time_event.py +0 -0
  57. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/example_workflow.py +0 -0
  58. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_bokeh_plotly.py +0 -0
  59. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_hover_ex.py +0 -0
  60. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_interactive.py +0 -0
  61. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_streamnd.py +0 -0
  62. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_streams.py +0 -0
  63. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_template.py +0 -0
  64. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_updates.py +0 -0
  65. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/experimental/example_vector.py +0 -0
  66. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/inputs_0D/example_0D.py +0 -0
  67. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/meta/example_meta.py +0 -0
  68. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/meta/example_meta_cat.py +0 -0
  69. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/meta/example_meta_float.py +0 -0
  70. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/meta/example_meta_levels.py +0 -0
  71. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/optuna/example_optuna.py +0 -0
  72. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/shelved/example_float2D_scatter.py +0 -0
  73. {holobench-1.35.0 → holobench-1.36.0}/bencher/example/shelved/example_kwargs.py +0 -0
  74. {holobench-1.35.0 → holobench-1.36.0}/bencher/flask_server.py +0 -0
  75. {holobench-1.35.0 → holobench-1.36.0}/bencher/optuna_conversions.py +0 -0
  76. {holobench-1.35.0 → holobench-1.36.0}/bencher/plotting/__init__.py +0 -0
  77. {holobench-1.35.0 → holobench-1.36.0}/bencher/plotting/plot_filter.py +0 -0
  78. {holobench-1.35.0 → holobench-1.36.0}/bencher/plotting/plt_cnt_cfg.py +0 -0
  79. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/__init__.py +0 -0
  80. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/bench_result.py +0 -0
  81. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/composable_container/__init__.py +0 -0
  82. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_base.py +0 -0
  83. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_dataframe.py +0 -0
  84. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_panel.py +0 -0
  85. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/composable_container/composable_container_video.py +0 -0
  86. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/dataset_result.py +0 -0
  87. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/float_formatter.py +0 -0
  88. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/hvplot_result.py +0 -0
  89. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/optuna_result.py +0 -0
  90. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/panel_result.py +0 -0
  91. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/plotly_result.py +0 -0
  92. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/video_result.py +0 -0
  93. {holobench-1.35.0 → holobench-1.36.0}/bencher/results/video_summary.py +0 -0
  94. {holobench-1.35.0 → holobench-1.36.0}/bencher/utils.py +0 -0
  95. {holobench-1.35.0 → holobench-1.36.0}/bencher/utils_rerun.py +0 -0
  96. {holobench-1.35.0 → holobench-1.36.0}/bencher/variables/__init__.py +0 -0
  97. {holobench-1.35.0 → holobench-1.36.0}/bencher/variables/inputs.py +0 -0
  98. {holobench-1.35.0 → holobench-1.36.0}/bencher/variables/parametrised_sweep.py +0 -0
  99. {holobench-1.35.0 → holobench-1.36.0}/bencher/variables/results.py +0 -0
  100. {holobench-1.35.0 → holobench-1.36.0}/bencher/variables/sweep_base.py +0 -0
  101. {holobench-1.35.0 → holobench-1.36.0}/bencher/variables/time.py +0 -0
  102. {holobench-1.35.0 → holobench-1.36.0}/bencher/video_writer.py +0 -0
  103. {holobench-1.35.0 → holobench-1.36.0}/bencher/worker_job.py +0 -0
  104. {holobench-1.35.0 → holobench-1.36.0}/resource/bencher +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: holobench
3
- Version: 1.35.0
3
+ Version: 1.36.0
4
4
  Summary: A package for benchmarking the performance of arbitrary functions
5
5
  Project-URL: Repository, https://github.com/dyson-ai/bencher
6
6
  Project-URL: Home, https://github.com/dyson-ai/bencher
@@ -83,16 +83,16 @@ class BenchRunCfg(BenchPlotSrvCfg):
83
83
 
84
84
  raise_duplicate_exception: bool = param.Boolean(False, doc=" Used to debug unique plot names.")
85
85
 
86
- use_cache: bool = param.Boolean(
86
+ cache_results: bool = param.Boolean(
87
87
  False,
88
- doc="This is a benchmark level cache that stores the results of a fully completed benchmark. At the end of a benchmark the values are added to the cache but are not if the benchmark does not complete. If you want to cache values during the benchmark you need to use the use_sample_cache option. Beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
88
+ doc="This is a benchmark level cache that stores the results of a fully completed benchmark. At the end of a benchmark the values are added to the cache but are not if the benchmark does not complete. If you want to cache values during the benchmark you need to use the cache_samples option. Beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
89
89
  )
90
90
 
91
91
  clear_cache: bool = param.Boolean(
92
92
  False, doc=" Clear the cache of saved input->output mappings."
93
93
  )
94
94
 
95
- use_sample_cache: bool = param.Boolean(
95
+ cache_samples: bool = param.Boolean(
96
96
  False,
97
97
  doc="If true, every time the benchmark function is called, bencher will check if that value has been calculated before and if so load the from the cache. Note that the sample level cache is different from the benchmark level cache which only caches the aggregate of all the results at the end of the benchmark. This cache lets you stop a benchmark halfway through and continue. However, beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
98
98
  )
@@ -182,7 +182,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
182
182
  parser.add_argument(
183
183
  "--use-cache",
184
184
  action="store_true",
185
- help=BenchRunCfg.param.use_cache.doc,
185
+ help=BenchRunCfg.param.cache_results.doc,
186
186
  )
187
187
 
188
188
  parser.add_argument(
@@ -380,8 +380,8 @@ class BenchCfg(BenchRunCfg):
380
380
  benchmark_sampling_str.append(f" run tag: {self.run_tag}")
381
381
  if self.level is not None:
382
382
  benchmark_sampling_str.append(f" bench level: {self.level}")
383
- benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
384
- benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
383
+ benchmark_sampling_str.append(f" cache_results: {self.cache_results}")
384
+ benchmark_sampling_str.append(f" cache_samples {self.cache_samples}")
385
385
  benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
386
386
  benchmark_sampling_str.append(f" executor: {self.executor}")
387
387
 
@@ -33,11 +33,11 @@ class BenchRunner:
33
33
 
34
34
  @staticmethod
35
35
  def setup_run_cfg(
36
- run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, use_cache=True
36
+ run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, cache_results=True
37
37
  ) -> BenchRunCfg:
38
38
  run_cfg_out = deepcopy(run_cfg)
39
- run_cfg_out.use_sample_cache = use_cache
40
- run_cfg_out.only_hash_tag = use_cache
39
+ run_cfg_out.cache_samples = cache_results
40
+ run_cfg_out.only_hash_tag = cache_results
41
41
  run_cfg_out.level = level
42
42
  return run_cfg_out
43
43
 
@@ -78,9 +78,9 @@ class BenchRunner:
78
78
  show: bool = False,
79
79
  save: bool = False,
80
80
  grouped: bool = True,
81
- use_cache: bool = True,
81
+ cache_results: bool = True,
82
82
  ) -> List[Bench]:
83
- """This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default use_cache=True so that previous values are reused.
83
+ """This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default cache_results=True so that previous values are reused.
84
84
 
85
85
  Args:
86
86
  min_level (int, optional): The minimum level to start sampling at. Defaults to 2.
@@ -93,14 +93,14 @@ class BenchRunner:
93
93
  show (bool, optional): show the results in the local web browser. Defaults to False.
94
94
  save (bool, optional): save the results to disk in index.html. Defaults to False.
95
95
  grouped (bool, optional): Produce a single html page with all the benchmarks included. Defaults to True.
96
- use_cache (bool, optional): Use the sample cache to reused previous results. Defaults to True.
96
+ cache_results (bool, optional): Use the sample cache to reused previous results. Defaults to True.
97
97
 
98
98
  Returns:
99
99
  List[BenchCfg]: A list of bencher instances
100
100
  """
101
101
  if run_cfg is None:
102
102
  run_cfg = deepcopy(self.run_cfg)
103
- run_cfg = BenchRunner.setup_run_cfg(run_cfg, use_cache=use_cache)
103
+ run_cfg = BenchRunner.setup_run_cfg(run_cfg, cache_results=cache_results)
104
104
 
105
105
  if level is not None:
106
106
  min_level = level
@@ -327,7 +327,7 @@ class Bench(BenchPlotServer):
327
327
  logging.info("Copy run cfg from bench class")
328
328
 
329
329
  if run_cfg.only_plot:
330
- run_cfg.use_cache = True
330
+ run_cfg.cache_results = True
331
331
 
332
332
  self.last_run_cfg = run_cfg
333
333
 
@@ -371,7 +371,7 @@ class Bench(BenchPlotServer):
371
371
  title += "s"
372
372
  title += ": " + ", ".join([f"{c[0].name}={c[1]}" for c in const_vars_in])
373
373
  else:
374
- title = " ".join([i.name for i in result_vars_in])
374
+ title = "Recording: " + ", ".join([i.name for i in result_vars_in])
375
375
 
376
376
  if run_cfg.level > 0:
377
377
  inputs = []
@@ -448,7 +448,7 @@ class Bench(BenchPlotServer):
448
448
  if run_cfg.clear_cache:
449
449
  c.delete(bench_cfg_hash)
450
450
  logging.info("cleared cache")
451
- elif run_cfg.use_cache:
451
+ elif run_cfg.cache_results:
452
452
  logging.info(
453
453
  f"checking for previously calculated results with key: {bench_cfg_hash}"
454
454
  )
@@ -813,7 +813,7 @@ class Bench(BenchPlotServer):
813
813
  cache_name="sample_cache",
814
814
  tag_index=True,
815
815
  size_limit=self.cache_size,
816
- use_cache=run_cfg.use_sample_cache,
816
+ cache_results=run_cfg.cache_samples,
817
817
  )
818
818
 
819
819
  def clear_tag_from_sample_cache(self, tag: str, run_cfg):
@@ -98,7 +98,7 @@ def example_composable_container_video(
98
98
 
99
99
  if __name__ == "__main__":
100
100
  ex_run_cfg = bch.BenchRunCfg()
101
- ex_run_cfg.use_sample_cache = False
101
+ ex_run_cfg.cache_samples = False
102
102
  # ex_run_cfg.level = 2
103
103
  ex_report = bch.BenchReport()
104
104
  example_composable_container_image(ex_run_cfg, report=ex_report)
@@ -144,7 +144,7 @@ def example_composable_container_image(
144
144
 
145
145
  # if __name__ == "__main__":
146
146
  # ex_run_cfg = bch.BenchRunCfg()
147
- # ex_run_cfg.use_sample_cache = False
147
+ # ex_run_cfg.cache_samples = False
148
148
  # # ex_run_cfg.level = 2
149
149
  # ex_report = bch.BenchReport()
150
150
  # example_composable_container_image(ex_run_cfg, report=ex_report)
@@ -157,4 +157,4 @@ if __name__ == "__main__":
157
157
  # bench_runner.add_run(bench_image)
158
158
  bench_runner.add_run(example_composable_container_image)
159
159
 
160
- bench_runner.run(level=6, show=True, use_cache=False)
160
+ bench_runner.run(level=6, show=True, cache_results=False)
@@ -95,4 +95,4 @@ if __name__ == "__main__":
95
95
  PlotFunctions().to_gui()
96
96
  bench_run = bch.BenchRunner("bench_runner_test")
97
97
  bench_run.add_run(example_holosweep)
98
- bench_run.run(level=6, show=True, use_cache=False)
98
+ bench_run.run(level=6, show=True, cache_results=False)
@@ -58,7 +58,7 @@ class BenchPolygons(bch.ParametrizedSweep):
58
58
  def example_image(
59
59
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
60
60
  ) -> bch.Bench:
61
- run_cfg.use_cache = False
61
+ run_cfg.cache_results = False
62
62
  bench = bch.Bench("polygons", BenchPolygons(), run_cfg=run_cfg, report=report)
63
63
 
64
64
  bench.result_vars = ["polygon", "area"]
@@ -142,7 +142,7 @@ if __name__ == "__main__":
142
142
  # def example_image_pairs()
143
143
 
144
144
  ex_run_cfg = bch.BenchRunCfg()
145
- ex_run_cfg.use_sample_cache = True
145
+ ex_run_cfg.cache_samples = True
146
146
  # ex_run_cfg.debug = True
147
147
  # ex_run_cfg.repeats = 2
148
148
  ex_run_cfg.level = 4
@@ -73,7 +73,7 @@ def example_image_vid_sequential1(
73
73
 
74
74
  if __name__ == "__main__":
75
75
  ex_run_cfg = bch.BenchRunCfg()
76
- ex_run_cfg.use_sample_cache = True
76
+ ex_run_cfg.cache_samples = True
77
77
  ex_run_cfg.overwrite_sample_cache = True
78
78
  ex_run_cfg.level = 3
79
79
 
@@ -2,7 +2,7 @@ import bencher as bch
2
2
 
3
3
 
4
4
  class UnreliableClass(bch.ParametrizedSweep):
5
- """This class helps demonstrate benchmarking a function that sometimes crashes during sampling. By using BenchRunCfg.use_sample_cache you can store the results of every call to the benchmark function so data is not lost in the event of a crash. However, because cache invalidation is hard (https://martinfowler.com/bliki/TwoHardThings.html) you need to be mindful of how you could get bad results due to incorrect cache data. For example if you change your benchmark function and use the sample cache you will not get correct values; you will need to use BenchRunCfg.clear_sample_cache to purge any out of date results."""
5
+ """This class helps demonstrate benchmarking a function that sometimes crashes during sampling. By using BenchRunCfg.cache_samples you can store the results of every call to the benchmark function so data is not lost in the event of a crash. However, because cache invalidation is hard (https://martinfowler.com/bliki/TwoHardThings.html) you need to be mindful of how you could get bad results due to incorrect cache data. For example if you change your benchmark function and use the sample cache you will not get correct values; you will need to use BenchRunCfg.clear_sample_cache to purge any out of date results."""
6
6
 
7
7
  input_val = bch.IntSweep(
8
8
  default=0,
@@ -31,7 +31,7 @@ def example_sample_cache(
31
31
  report: bch.BenchReport = bch.BenchReport(),
32
32
  trigger_crash: bool = False,
33
33
  ) -> bch.Bench:
34
- """This example shows how to use the use_sample_cache option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run
34
+ """This example shows how to use the cache_samples option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run
35
35
 
36
36
  Args:
37
37
  run_cfg (BenchRunCfg): configuration of how to perform the param sweep
@@ -50,7 +50,7 @@ def example_sample_cache(
50
50
  title="Example Crashy Function with the sample_cache",
51
51
  input_vars=[UnreliableClass.param.input_val],
52
52
  result_vars=[UnreliableClass.param.return_value, UnreliableClass.param.trigger_crash],
53
- description="""This example shows how to use the use_sample_cache option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run""",
53
+ description="""This example shows how to use the cache_samples option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run""",
54
54
  run_cfg=run_cfg,
55
55
  post_description="The input_val vs return value graph is a straight line as expected and there is no record of the fact the benchmark crashed halfway through. The second graph shows that for values >1 the trigger_crash value had to be 0 in order to proceed",
56
56
  )
@@ -63,7 +63,7 @@ if __name__ == "__main__":
63
63
  ex_run_cfg.executor = bch.Executors.SCOOP
64
64
 
65
65
  # this will store the result of of every call to crashy_fn
66
- ex_run_cfg.use_sample_cache = True
66
+ ex_run_cfg.cache_samples = True
67
67
  ex_run_cfg.clear_sample_cache = True
68
68
 
69
69
  try:
@@ -51,7 +51,7 @@ def assert_call_counts(bencher, run_cfg, wrapper_calls=-1, fn_calls=-1, cache_ca
51
51
 
52
52
  def example_cache_context() -> bch.Bench:
53
53
  run_cfg = bch.BenchRunCfg()
54
- run_cfg.use_sample_cache = True
54
+ run_cfg.cache_samples = True
55
55
  run_cfg.only_hash_tag = True
56
56
  run_cfg.repeats = 2
57
57
  run_cfg.parallel = False
@@ -79,7 +79,7 @@ def example_video(
79
79
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
80
80
  ) -> bch.Bench:
81
81
  # run_cfg.auto_plot = False
82
- # run_cfg.use_sample_cache = True
82
+ # run_cfg.cache_samples = True
83
83
  bench = bch.Bench("example_video", TuringPattern(), run_cfg=run_cfg, report=report)
84
84
 
85
85
  bench.plot_sweep(
@@ -111,7 +111,7 @@ def example_video_tap(
111
111
  if __name__ == "__main__":
112
112
  run_cfg_ex = bch.BenchRunCfg()
113
113
  run_cfg_ex.level = 2
114
- run_cfg_ex.use_sample_cache = True
114
+ run_cfg_ex.cache_samples = True
115
115
  run_cfg_ex.only_hash_tag = True
116
116
 
117
117
  # example_video(run_cfg_ex).report.show()
@@ -30,7 +30,7 @@ if __name__ == "__main__":
30
30
  post_description="Here you can see the output plot of sin theta between 0 and pi. In the tabs at the top you can also view 3 tabular representations of the data",
31
31
  run_cfg=bch.BenchRunCfg(
32
32
  auto_plot=True,
33
- use_cache=False,
33
+ cache_results=False,
34
34
  repeats=2,
35
35
  ),
36
36
  )
@@ -38,13 +38,22 @@ def example_1D_float_repeats(
38
38
 
39
39
  # res = bench.get_result()
40
40
  bench.run_cfg = bch.BenchRunCfg(repeats=4)
41
+ # bench.plot_sweep(pass_repeat=True, plot_callbacks=False)
41
42
  bench.plot_sweep(pass_repeat=True)
42
43
 
43
44
  res = bench.get_result()
44
- bench.report.append(res.to_auto())
45
- bench.report.append(res.to_scatter())
45
+ bench.report.append(res.to_curve())
46
+ # bench.report.append(hv.Table(res.to_hv_dataset(bch.ReduceType.MINMAX)))
47
+ # bench.report.append(res.to_curve() + res.to_scatter_jitter(override=True))
48
+ # bench.report.append(res.to_line())
46
49
  bench.report.append(res.to_scatter_jitter(override=True))
50
+ # bench.report.append(res.to_error_bar())
51
+ # bench.report.append(res.to_explorer())
52
+ # bench.report.append(res.to_error_bar()
47
53
 
54
+ # bench.report.append(res.to_dataset())
55
+ # bench.report.append(res.to_xarray().hvplot.plot(kind="andrews_curves"))
56
+ # print(res.to_xarray())
48
57
  # bench.report.append()
49
58
  return bench
50
59
 
@@ -92,5 +92,5 @@
92
92
 
93
93
  # if __name__ == "__main__":
94
94
  # ex_run_cfg = bch.BenchRunCfg()
95
- # ex_run_cfg.use_cache = True
95
+ # ex_run_cfg.cache_results = True
96
96
  # example_cone(ex_run_cfg).report.show()
@@ -59,7 +59,7 @@ class Executors(StrEnum):
59
59
  # THREADS=auto() #not that useful as most bench code is cpu bound
60
60
 
61
61
  @staticmethod
62
- def factory(provider: Executors) -> Future():
62
+ def factory(provider: Executors) -> Future:
63
63
  providers = {
64
64
  Executors.SERIAL: None,
65
65
  Executors.MULTIPROCESSING: ProcessPoolExecutor(),
@@ -78,11 +78,11 @@ class FutureCache:
78
78
  cache_name: str = "fcache",
79
79
  tag_index: bool = True,
80
80
  size_limit: int = int(20e9), # 20 GB
81
- use_cache=True,
81
+ cache_results=True,
82
82
  ):
83
83
  self.executor_type = executor
84
84
  self.executor = None
85
- if use_cache:
85
+ if cache_results:
86
86
  self.cache = Cache(f"cachedir/{cache_name}", tag_index=tag_index, size_limit=size_limit)
87
87
  logging.info(f"cache dir: {self.cache.directory}")
88
88
  else:
@@ -33,7 +33,8 @@ from bencher.results.composable_container.composable_container_panel import (
33
33
  class ReduceType(Enum):
34
34
  AUTO = auto() # automatically determine the best way to reduce the dataset
35
35
  SQUEEZE = auto() # remove any dimensions of length 1
36
- REDUCE = auto() # get the mean and std dev of the the "repeat" dimension
36
+ REDUCE = auto() # get the mean and std dev of the data along the "repeat" dimension
37
+ MINMAX = auto() # get the minimum and maximum of data along the "repeat" dimension
37
38
  NONE = auto() # don't reduce
38
39
 
39
40
 
@@ -93,16 +94,35 @@ class BenchResultBase(OptunaResult):
93
94
  if reduce == ReduceType.AUTO:
94
95
  reduce = ReduceType.REDUCE if self.bench_cfg.repeats > 1 else ReduceType.SQUEEZE
95
96
 
96
- ds_out = self.ds if result_var is None else self.ds[result_var.name]
97
+ ds_out = self.ds.copy()
98
+
99
+ if result_var is not None:
100
+ ds_out = ds_out[result_var.name]
101
+
102
+ def rename_ds(dataset: xr.Dataset, suffix: str):
103
+ # var_name =
104
+ rename_dict = {var: f"{var}_{suffix}" for var in dataset.data_vars}
105
+ ds = dataset.rename_vars(rename_dict)
106
+ return ds
97
107
 
98
108
  match reduce:
99
109
  case ReduceType.REDUCE:
100
110
  ds_reduce_mean = ds_out.mean(dim="repeat", keep_attrs=True)
101
- ds_reduce_std = ds_out.std(dim="repeat", keep_attrs=True)
102
-
103
- for v in ds_reduce_mean.data_vars:
104
- ds_reduce_mean[f"{v}_std"] = ds_reduce_std[v]
105
- ds_out = ds_reduce_mean
111
+ ds_reduce_std = ds_out.std(dim="repeat", keep_attrs=False)
112
+ ds_reduce_std = rename_ds(ds_reduce_std, "std")
113
+ ds_out = xr.merge([ds_reduce_mean, ds_reduce_std])
114
+ ds_out = xr.merge(
115
+ [
116
+ ds_reduce_mean,
117
+ ds_reduce_std,
118
+ ]
119
+ )
120
+ case ReduceType.MINMAX: # TODO, need to pass mean, center of minmax, and minmax
121
+ ds_reduce_mean = ds_out.mean(dim="repeat", keep_attrs=True)
122
+ ds_reduce_min = ds_out.min(dim="repeat")
123
+ ds_reduce_max = ds_out.max(dim="repeat")
124
+ ds_reduce_range = rename_ds(ds_reduce_max - ds_reduce_min, "range")
125
+ ds_out = xr.merge([ds_reduce_mean, ds_reduce_range])
106
126
  case ReduceType.SQUEEZE:
107
127
  ds_out = ds_out.squeeze(drop=True)
108
128
  if level is not None:
@@ -190,6 +190,7 @@ class HoloviewResult(PanelResult):
190
190
  cat_range=VarRange(0, None),
191
191
  repeats_range=VarRange(2, None),
192
192
  reduce=ReduceType.REDUCE,
193
+ # reduce=ReduceType.MINMAX,
193
194
  target_dimension=2,
194
195
  result_var=result_var,
195
196
  result_types=(ResultVar),
@@ -200,7 +201,6 @@ class HoloviewResult(PanelResult):
200
201
  self, dataset: xr.Dataset, result_var: Parameter, **kwargs
201
202
  ) -> Optional[hv.Curve]:
202
203
  hvds = hv.Dataset(dataset)
203
- # result_var = self.get_results_var_list(result_var)[0]
204
204
  title = self.title_from_ds(dataset, result_var, **kwargs)
205
205
  pt = hvds.to(hv.Curve).opts(title=title, **kwargs)
206
206
  pt *= hvds.to(hv.Spread).opts(alpha=0.2)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "holobench"
3
- version = "1.35.0"
3
+ version = "1.36.0"
4
4
 
5
5
  authors = [{ name = "Austin Gregg-Smith", email = "blooop@gmail.com" }]
6
6
  description = "A package for benchmarking the performance of arbitrary functions"
File without changes
File without changes
File without changes
File without changes
File without changes