holobench 1.35.0__tar.gz → 1.36.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. {holobench-1.35.0 → holobench-1.36.1}/.gitignore +4 -0
  2. {holobench-1.35.0 → holobench-1.36.1}/PKG-INFO +1 -1
  3. {holobench-1.35.0 → holobench-1.36.1}/bencher/bench_cfg.py +6 -6
  4. {holobench-1.35.0 → holobench-1.36.1}/bencher/bench_runner.py +8 -8
  5. {holobench-1.35.0 → holobench-1.36.1}/bencher/bencher.py +4 -4
  6. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_composable_container.py +1 -1
  7. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_composable_container2.py +2 -2
  8. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_holosweep.py +1 -1
  9. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_image.py +2 -2
  10. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_image1.py +1 -1
  11. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_sample_cache.py +4 -4
  12. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_sample_cache_context.py +1 -1
  13. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_video.py +13 -13
  14. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_hvplot_explorer.py +1 -1
  15. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/inputs_1D/example_1D.py +0 -9
  16. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/shelved/example_float3D_cone.py +1 -1
  17. {holobench-1.35.0 → holobench-1.36.1}/bencher/job.py +7 -4
  18. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/bench_result_base.py +27 -7
  19. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/composable_container/composable_container_video.py +3 -3
  20. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/holoview_result.py +1 -1
  21. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/video_summary.py +5 -0
  22. {holobench-1.35.0 → holobench-1.36.1}/pyproject.toml +28 -2
  23. {holobench-1.35.0 → holobench-1.36.1}/LICENSE +0 -0
  24. {holobench-1.35.0 → holobench-1.36.1}/README.md +0 -0
  25. {holobench-1.35.0 → holobench-1.36.1}/bencher/__init__.py +0 -0
  26. {holobench-1.35.0 → holobench-1.36.1}/bencher/bench_plot_server.py +0 -0
  27. {holobench-1.35.0 → holobench-1.36.1}/bencher/bench_report.py +0 -0
  28. {holobench-1.35.0 → holobench-1.36.1}/bencher/caching.py +0 -0
  29. {holobench-1.35.0 → holobench-1.36.1}/bencher/class_enum.py +0 -0
  30. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/__init__.py +0 -0
  31. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/benchmark_data.py +0 -0
  32. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_all.py +0 -0
  33. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_categorical.py +0 -0
  34. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_consts.py +0 -0
  35. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_custom_sweep.py +0 -0
  36. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_custom_sweep2.py +0 -0
  37. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_dataframe.py +0 -0
  38. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_docs.py +0 -0
  39. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_filepath.py +0 -0
  40. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_float3D.py +0 -0
  41. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_float_cat.py +0 -0
  42. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_floats.py +0 -0
  43. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_floats2D.py +0 -0
  44. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_holosweep_objects.py +0 -0
  45. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_holosweep_tap.py +0 -0
  46. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_levels.py +0 -0
  47. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_levels2.py +0 -0
  48. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_pareto.py +0 -0
  49. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_publish.py +0 -0
  50. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_rerun.py +0 -0
  51. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_rerun2.py +0 -0
  52. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_simple.py +0 -0
  53. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_simple_bool.py +0 -0
  54. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_simple_cat.py +0 -0
  55. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_simple_float.py +0 -0
  56. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_simple_float2d.py +0 -0
  57. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_strings.py +0 -0
  58. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_time_event.py +0 -0
  59. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/example_workflow.py +0 -0
  60. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_bokeh_plotly.py +0 -0
  61. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_hover_ex.py +0 -0
  62. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_interactive.py +0 -0
  63. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_streamnd.py +0 -0
  64. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_streams.py +0 -0
  65. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_template.py +0 -0
  66. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_updates.py +0 -0
  67. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/experimental/example_vector.py +0 -0
  68. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/inputs_0D/example_0D.py +0 -0
  69. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/meta/example_meta.py +0 -0
  70. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/meta/example_meta_cat.py +0 -0
  71. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/meta/example_meta_float.py +0 -0
  72. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/meta/example_meta_levels.py +0 -0
  73. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/optuna/example_optuna.py +0 -0
  74. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/shelved/example_float2D_scatter.py +0 -0
  75. {holobench-1.35.0 → holobench-1.36.1}/bencher/example/shelved/example_kwargs.py +0 -0
  76. {holobench-1.35.0 → holobench-1.36.1}/bencher/flask_server.py +0 -0
  77. {holobench-1.35.0 → holobench-1.36.1}/bencher/optuna_conversions.py +0 -0
  78. {holobench-1.35.0 → holobench-1.36.1}/bencher/plotting/__init__.py +0 -0
  79. {holobench-1.35.0 → holobench-1.36.1}/bencher/plotting/plot_filter.py +0 -0
  80. {holobench-1.35.0 → holobench-1.36.1}/bencher/plotting/plt_cnt_cfg.py +0 -0
  81. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/__init__.py +0 -0
  82. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/bench_result.py +0 -0
  83. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/composable_container/__init__.py +0 -0
  84. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/composable_container/composable_container_base.py +0 -0
  85. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/composable_container/composable_container_dataframe.py +0 -0
  86. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/composable_container/composable_container_panel.py +0 -0
  87. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/dataset_result.py +0 -0
  88. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/float_formatter.py +0 -0
  89. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/hvplot_result.py +0 -0
  90. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/optuna_result.py +0 -0
  91. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/panel_result.py +0 -0
  92. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/plotly_result.py +0 -0
  93. {holobench-1.35.0 → holobench-1.36.1}/bencher/results/video_result.py +0 -0
  94. {holobench-1.35.0 → holobench-1.36.1}/bencher/utils.py +0 -0
  95. {holobench-1.35.0 → holobench-1.36.1}/bencher/utils_rerun.py +0 -0
  96. {holobench-1.35.0 → holobench-1.36.1}/bencher/variables/__init__.py +0 -0
  97. {holobench-1.35.0 → holobench-1.36.1}/bencher/variables/inputs.py +0 -0
  98. {holobench-1.35.0 → holobench-1.36.1}/bencher/variables/parametrised_sweep.py +0 -0
  99. {holobench-1.35.0 → holobench-1.36.1}/bencher/variables/results.py +0 -0
  100. {holobench-1.35.0 → holobench-1.36.1}/bencher/variables/sweep_base.py +0 -0
  101. {holobench-1.35.0 → holobench-1.36.1}/bencher/variables/time.py +0 -0
  102. {holobench-1.35.0 → holobench-1.36.1}/bencher/video_writer.py +0 -0
  103. {holobench-1.35.0 → holobench-1.36.1}/bencher/worker_job.py +0 -0
  104. {holobench-1.35.0 → holobench-1.36.1}/resource/bencher +0 -0
@@ -179,3 +179,7 @@ log/**
179
179
  managed_context/metadata.json
180
180
  test_suite_analysis/metadata.json
181
181
  *.rrd
182
+ docs/builtdocs*
183
+ docs/jupyter_execute*
184
+ thumbnails/
185
+ docs/autoapi/*
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: holobench
3
- Version: 1.35.0
3
+ Version: 1.36.1
4
4
  Summary: A package for benchmarking the performance of arbitrary functions
5
5
  Project-URL: Repository, https://github.com/dyson-ai/bencher
6
6
  Project-URL: Home, https://github.com/dyson-ai/bencher
@@ -83,16 +83,16 @@ class BenchRunCfg(BenchPlotSrvCfg):
83
83
 
84
84
  raise_duplicate_exception: bool = param.Boolean(False, doc=" Used to debug unique plot names.")
85
85
 
86
- use_cache: bool = param.Boolean(
86
+ cache_results: bool = param.Boolean(
87
87
  False,
88
- doc="This is a benchmark level cache that stores the results of a fully completed benchmark. At the end of a benchmark the values are added to the cache but are not if the benchmark does not complete. If you want to cache values during the benchmark you need to use the use_sample_cache option. Beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
88
+ doc="This is a benchmark level cache that stores the results of a fully completed benchmark. At the end of a benchmark the values are added to the cache but are not if the benchmark does not complete. If you want to cache values during the benchmark you need to use the cache_samples option. Beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
89
89
  )
90
90
 
91
91
  clear_cache: bool = param.Boolean(
92
92
  False, doc=" Clear the cache of saved input->output mappings."
93
93
  )
94
94
 
95
- use_sample_cache: bool = param.Boolean(
95
+ cache_samples: bool = param.Boolean(
96
96
  False,
97
97
  doc="If true, every time the benchmark function is called, bencher will check if that value has been calculated before and if so load the from the cache. Note that the sample level cache is different from the benchmark level cache which only caches the aggregate of all the results at the end of the benchmark. This cache lets you stop a benchmark halfway through and continue. However, beware that depending on how you change code in the objective function, the cache could provide values that are not correct.",
98
98
  )
@@ -182,7 +182,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
182
182
  parser.add_argument(
183
183
  "--use-cache",
184
184
  action="store_true",
185
- help=BenchRunCfg.param.use_cache.doc,
185
+ help=BenchRunCfg.param.cache_results.doc,
186
186
  )
187
187
 
188
188
  parser.add_argument(
@@ -380,8 +380,8 @@ class BenchCfg(BenchRunCfg):
380
380
  benchmark_sampling_str.append(f" run tag: {self.run_tag}")
381
381
  if self.level is not None:
382
382
  benchmark_sampling_str.append(f" bench level: {self.level}")
383
- benchmark_sampling_str.append(f" use_cache: {self.use_cache}")
384
- benchmark_sampling_str.append(f" use_sample_cache: {self.use_sample_cache}")
383
+ benchmark_sampling_str.append(f" cache_results: {self.cache_results}")
384
+ benchmark_sampling_str.append(f" cache_samples {self.cache_samples}")
385
385
  benchmark_sampling_str.append(f" only_hash_tag: {self.only_hash_tag}")
386
386
  benchmark_sampling_str.append(f" executor: {self.executor}")
387
387
 
@@ -33,11 +33,11 @@ class BenchRunner:
33
33
 
34
34
  @staticmethod
35
35
  def setup_run_cfg(
36
- run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, use_cache=True
36
+ run_cfg: BenchRunCfg = BenchRunCfg(), level: int = 2, cache_results=True
37
37
  ) -> BenchRunCfg:
38
38
  run_cfg_out = deepcopy(run_cfg)
39
- run_cfg_out.use_sample_cache = use_cache
40
- run_cfg_out.only_hash_tag = use_cache
39
+ run_cfg_out.cache_samples = cache_results
40
+ run_cfg_out.only_hash_tag = cache_results
41
41
  run_cfg_out.level = level
42
42
  return run_cfg_out
43
43
 
@@ -78,9 +78,9 @@ class BenchRunner:
78
78
  show: bool = False,
79
79
  save: bool = False,
80
80
  grouped: bool = True,
81
- use_cache: bool = True,
81
+ cache_results: bool = True,
82
82
  ) -> List[Bench]:
83
- """This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default use_cache=True so that previous values are reused.
83
+ """This function controls how a benchmark or a set of benchmarks are run. If you are only running a single benchmark it can be simpler to just run it directly, but if you are running several benchmarks together and want them to be sampled at different levels of fidelity or published together in a single report this function enables that workflow. If you have an expensive function, it can be useful to view low fidelity results as they are computed but also continue to compute higher fidelity results while reusing previously computed values. The parameters min_level and max_level let you specify how to progressivly increase the sampling resolution of the benchmark sweep. By default cache_results=True so that previous values are reused.
84
84
 
85
85
  Args:
86
86
  min_level (int, optional): The minimum level to start sampling at. Defaults to 2.
@@ -93,14 +93,14 @@ class BenchRunner:
93
93
  show (bool, optional): show the results in the local web browser. Defaults to False.
94
94
  save (bool, optional): save the results to disk in index.html. Defaults to False.
95
95
  grouped (bool, optional): Produce a single html page with all the benchmarks included. Defaults to True.
96
- use_cache (bool, optional): Use the sample cache to reused previous results. Defaults to True.
96
+ cache_results (bool, optional): Use the sample cache to reused previous results. Defaults to True.
97
97
 
98
98
  Returns:
99
99
  List[BenchCfg]: A list of bencher instances
100
100
  """
101
101
  if run_cfg is None:
102
102
  run_cfg = deepcopy(self.run_cfg)
103
- run_cfg = BenchRunner.setup_run_cfg(run_cfg, use_cache=use_cache)
103
+ run_cfg = BenchRunner.setup_run_cfg(run_cfg, cache_results=cache_results)
104
104
 
105
105
  if level is not None:
106
106
  min_level = level
@@ -136,7 +136,7 @@ class BenchRunner:
136
136
  else:
137
137
  report.publish(remote_callback=self.publisher, debug=debug)
138
138
  if show:
139
- self.servers.append(report.show())
139
+ self.servers.append(report.show(self.run_cfg))
140
140
 
141
141
  def shutdown(self):
142
142
  while self.servers:
@@ -327,7 +327,7 @@ class Bench(BenchPlotServer):
327
327
  logging.info("Copy run cfg from bench class")
328
328
 
329
329
  if run_cfg.only_plot:
330
- run_cfg.use_cache = True
330
+ run_cfg.cache_results = True
331
331
 
332
332
  self.last_run_cfg = run_cfg
333
333
 
@@ -371,7 +371,7 @@ class Bench(BenchPlotServer):
371
371
  title += "s"
372
372
  title += ": " + ", ".join([f"{c[0].name}={c[1]}" for c in const_vars_in])
373
373
  else:
374
- title = " ".join([i.name for i in result_vars_in])
374
+ title = "Recording: " + ", ".join([i.name for i in result_vars_in])
375
375
 
376
376
  if run_cfg.level > 0:
377
377
  inputs = []
@@ -448,7 +448,7 @@ class Bench(BenchPlotServer):
448
448
  if run_cfg.clear_cache:
449
449
  c.delete(bench_cfg_hash)
450
450
  logging.info("cleared cache")
451
- elif run_cfg.use_cache:
451
+ elif run_cfg.cache_results:
452
452
  logging.info(
453
453
  f"checking for previously calculated results with key: {bench_cfg_hash}"
454
454
  )
@@ -813,7 +813,7 @@ class Bench(BenchPlotServer):
813
813
  cache_name="sample_cache",
814
814
  tag_index=True,
815
815
  size_limit=self.cache_size,
816
- use_cache=run_cfg.use_sample_cache,
816
+ cache_results=run_cfg.cache_samples,
817
817
  )
818
818
 
819
819
  def clear_tag_from_sample_cache(self, tag: str, run_cfg):
@@ -98,7 +98,7 @@ def example_composable_container_video(
98
98
 
99
99
  if __name__ == "__main__":
100
100
  ex_run_cfg = bch.BenchRunCfg()
101
- ex_run_cfg.use_sample_cache = False
101
+ ex_run_cfg.cache_samples = False
102
102
  # ex_run_cfg.level = 2
103
103
  ex_report = bch.BenchReport()
104
104
  example_composable_container_image(ex_run_cfg, report=ex_report)
@@ -144,7 +144,7 @@ def example_composable_container_image(
144
144
 
145
145
  # if __name__ == "__main__":
146
146
  # ex_run_cfg = bch.BenchRunCfg()
147
- # ex_run_cfg.use_sample_cache = False
147
+ # ex_run_cfg.cache_samples = False
148
148
  # # ex_run_cfg.level = 2
149
149
  # ex_report = bch.BenchReport()
150
150
  # example_composable_container_image(ex_run_cfg, report=ex_report)
@@ -157,4 +157,4 @@ if __name__ == "__main__":
157
157
  # bench_runner.add_run(bench_image)
158
158
  bench_runner.add_run(example_composable_container_image)
159
159
 
160
- bench_runner.run(level=6, show=True, use_cache=False)
160
+ bench_runner.run(level=6, show=True, cache_results=False)
@@ -95,4 +95,4 @@ if __name__ == "__main__":
95
95
  PlotFunctions().to_gui()
96
96
  bench_run = bch.BenchRunner("bench_runner_test")
97
97
  bench_run.add_run(example_holosweep)
98
- bench_run.run(level=6, show=True, use_cache=False)
98
+ bench_run.run(level=6, show=True, cache_results=False)
@@ -58,7 +58,7 @@ class BenchPolygons(bch.ParametrizedSweep):
58
58
  def example_image(
59
59
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
60
60
  ) -> bch.Bench:
61
- run_cfg.use_cache = False
61
+ run_cfg.cache_results = False
62
62
  bench = bch.Bench("polygons", BenchPolygons(), run_cfg=run_cfg, report=report)
63
63
 
64
64
  bench.result_vars = ["polygon", "area"]
@@ -142,7 +142,7 @@ if __name__ == "__main__":
142
142
  # def example_image_pairs()
143
143
 
144
144
  ex_run_cfg = bch.BenchRunCfg()
145
- ex_run_cfg.use_sample_cache = True
145
+ ex_run_cfg.cache_samples = True
146
146
  # ex_run_cfg.debug = True
147
147
  # ex_run_cfg.repeats = 2
148
148
  ex_run_cfg.level = 4
@@ -73,7 +73,7 @@ def example_image_vid_sequential1(
73
73
 
74
74
  if __name__ == "__main__":
75
75
  ex_run_cfg = bch.BenchRunCfg()
76
- ex_run_cfg.use_sample_cache = True
76
+ ex_run_cfg.cache_samples = True
77
77
  ex_run_cfg.overwrite_sample_cache = True
78
78
  ex_run_cfg.level = 3
79
79
 
@@ -2,7 +2,7 @@ import bencher as bch
2
2
 
3
3
 
4
4
  class UnreliableClass(bch.ParametrizedSweep):
5
- """This class helps demonstrate benchmarking a function that sometimes crashes during sampling. By using BenchRunCfg.use_sample_cache you can store the results of every call to the benchmark function so data is not lost in the event of a crash. However, because cache invalidation is hard (https://martinfowler.com/bliki/TwoHardThings.html) you need to be mindful of how you could get bad results due to incorrect cache data. For example if you change your benchmark function and use the sample cache you will not get correct values; you will need to use BenchRunCfg.clear_sample_cache to purge any out of date results."""
5
+ """This class helps demonstrate benchmarking a function that sometimes crashes during sampling. By using BenchRunCfg.cache_samples you can store the results of every call to the benchmark function so data is not lost in the event of a crash. However, because cache invalidation is hard (https://martinfowler.com/bliki/TwoHardThings.html) you need to be mindful of how you could get bad results due to incorrect cache data. For example if you change your benchmark function and use the sample cache you will not get correct values; you will need to use BenchRunCfg.clear_sample_cache to purge any out of date results."""
6
6
 
7
7
  input_val = bch.IntSweep(
8
8
  default=0,
@@ -31,7 +31,7 @@ def example_sample_cache(
31
31
  report: bch.BenchReport = bch.BenchReport(),
32
32
  trigger_crash: bool = False,
33
33
  ) -> bch.Bench:
34
- """This example shows how to use the use_sample_cache option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run
34
+ """This example shows how to use the cache_samples option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run
35
35
 
36
36
  Args:
37
37
  run_cfg (BenchRunCfg): configuration of how to perform the param sweep
@@ -50,7 +50,7 @@ def example_sample_cache(
50
50
  title="Example Crashy Function with the sample_cache",
51
51
  input_vars=[UnreliableClass.param.input_val],
52
52
  result_vars=[UnreliableClass.param.return_value, UnreliableClass.param.trigger_crash],
53
- description="""This example shows how to use the use_sample_cache option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run""",
53
+ description="""This example shows how to use the cache_samples option to deal with unreliable functions and to continue benchmarking using previously calculated results even if the code crashed during the run""",
54
54
  run_cfg=run_cfg,
55
55
  post_description="The input_val vs return value graph is a straight line as expected and there is no record of the fact the benchmark crashed halfway through. The second graph shows that for values >1 the trigger_crash value had to be 0 in order to proceed",
56
56
  )
@@ -63,7 +63,7 @@ if __name__ == "__main__":
63
63
  ex_run_cfg.executor = bch.Executors.SCOOP
64
64
 
65
65
  # this will store the result of of every call to crashy_fn
66
- ex_run_cfg.use_sample_cache = True
66
+ ex_run_cfg.cache_samples = True
67
67
  ex_run_cfg.clear_sample_cache = True
68
68
 
69
69
  try:
@@ -51,7 +51,7 @@ def assert_call_counts(bencher, run_cfg, wrapper_calls=-1, fn_calls=-1, cache_ca
51
51
 
52
52
  def example_cache_context() -> bch.Bench:
53
53
  run_cfg = bch.BenchRunCfg()
54
- run_cfg.use_sample_cache = True
54
+ run_cfg.cache_samples = True
55
55
  run_cfg.only_hash_tag = True
56
56
  run_cfg.repeats = 2
57
57
  run_cfg.parallel = False
@@ -78,14 +78,11 @@ class TuringPattern(bch.ParametrizedSweep):
78
78
  def example_video(
79
79
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
80
80
  ) -> bch.Bench:
81
- # run_cfg.auto_plot = False
82
- # run_cfg.use_sample_cache = True
83
- bench = bch.Bench("example_video", TuringPattern(), run_cfg=run_cfg, report=report)
81
+ bench = TuringPattern().to_bench(run_cfg, report)
84
82
 
85
83
  bench.plot_sweep(
86
84
  "Turing patterns with different parameters",
87
- input_vars=[TuringPattern.param.alpha, TuringPattern.param.beta],
88
- # input_vars=[TuringPattern.param.alpha],
85
+ input_vars=["alpha", "beta"],
89
86
  result_vars=[TuringPattern.param.video],
90
87
  )
91
88
 
@@ -96,14 +93,17 @@ def example_video_tap(
96
93
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
97
94
  ) -> bch.Bench: # pragma: no cover
98
95
  bench = TuringPattern().to_bench(run_cfg=run_cfg, report=report)
99
- res = bench.plot_sweep(
100
- input_vars=["alpha", "beta"],
101
- # result_vars=["video","score"],
102
- # result_vars=["score"],
103
- run_cfg=run_cfg,
104
- )
96
+ res = bench.plot_sweep(input_vars=["alpha", "beta"])
105
97
 
106
- bench.report.append(res.to_video_grid())
98
+ bench.report.append(res.to_video_grid(result_types=(bch.ResultVideo)))
99
+
100
+ res = bench.plot_sweep(input_vars=["alpha"])
101
+ bench.report.append(
102
+ res.to_video_grid(
103
+ result_types=(bch.ResultVideo),
104
+ compose_method_list=[bch.ComposeType.right],
105
+ )
106
+ )
107
107
 
108
108
  return bench
109
109
 
@@ -111,7 +111,7 @@ def example_video_tap(
111
111
  if __name__ == "__main__":
112
112
  run_cfg_ex = bch.BenchRunCfg()
113
113
  run_cfg_ex.level = 2
114
- run_cfg_ex.use_sample_cache = True
114
+ run_cfg_ex.cache_samples = True
115
115
  run_cfg_ex.only_hash_tag = True
116
116
 
117
117
  # example_video(run_cfg_ex).report.show()
@@ -30,7 +30,7 @@ if __name__ == "__main__":
30
30
  post_description="Here you can see the output plot of sin theta between 0 and pi. In the tabs at the top you can also view 3 tabular representations of the data",
31
31
  run_cfg=bch.BenchRunCfg(
32
32
  auto_plot=True,
33
- use_cache=False,
33
+ cache_results=False,
34
34
  repeats=2,
35
35
  ),
36
36
  )
@@ -34,18 +34,9 @@ def example_1D_float_repeats(
34
34
  """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
35
35
 
36
36
  bench = Example1D().to_bench(run_cfg, report)
37
- # bench.plot_sweep(pass_repeat=True,plot_callbacks=False)
38
-
39
- # res = bench.get_result()
40
37
  bench.run_cfg = bch.BenchRunCfg(repeats=4)
41
38
  bench.plot_sweep(pass_repeat=True)
42
39
 
43
- res = bench.get_result()
44
- bench.report.append(res.to_auto())
45
- bench.report.append(res.to_scatter())
46
- bench.report.append(res.to_scatter_jitter(override=True))
47
-
48
- # bench.report.append()
49
40
  return bench
50
41
 
51
42
 
@@ -92,5 +92,5 @@
92
92
 
93
93
  # if __name__ == "__main__":
94
94
  # ex_run_cfg = bch.BenchRunCfg()
95
- # ex_run_cfg.use_cache = True
95
+ # ex_run_cfg.cache_results = True
96
96
  # example_cone(ex_run_cfg).report.show()
@@ -36,7 +36,10 @@ class JobFuture:
36
36
  self.res = res
37
37
  self.future = future
38
38
  # either a result or a future needs to be passed
39
- assert self.res is not None or self.future is not None
39
+ assert (
40
+ self.res is not None or self.future is not None
41
+ ), "make sure you are returning a dict or super().__call__(**kwargs) from your __call__ function"
42
+
40
43
  self.cache = cache
41
44
 
42
45
  def result(self):
@@ -59,7 +62,7 @@ class Executors(StrEnum):
59
62
  # THREADS=auto() #not that useful as most bench code is cpu bound
60
63
 
61
64
  @staticmethod
62
- def factory(provider: Executors) -> Future():
65
+ def factory(provider: Executors) -> Future:
63
66
  providers = {
64
67
  Executors.SERIAL: None,
65
68
  Executors.MULTIPROCESSING: ProcessPoolExecutor(),
@@ -78,11 +81,11 @@ class FutureCache:
78
81
  cache_name: str = "fcache",
79
82
  tag_index: bool = True,
80
83
  size_limit: int = int(20e9), # 20 GB
81
- use_cache=True,
84
+ cache_results=True,
82
85
  ):
83
86
  self.executor_type = executor
84
87
  self.executor = None
85
- if use_cache:
88
+ if cache_results:
86
89
  self.cache = Cache(f"cachedir/{cache_name}", tag_index=tag_index, size_limit=size_limit)
87
90
  logging.info(f"cache dir: {self.cache.directory}")
88
91
  else:
@@ -33,7 +33,8 @@ from bencher.results.composable_container.composable_container_panel import (
33
33
  class ReduceType(Enum):
34
34
  AUTO = auto() # automatically determine the best way to reduce the dataset
35
35
  SQUEEZE = auto() # remove any dimensions of length 1
36
- REDUCE = auto() # get the mean and std dev of the the "repeat" dimension
36
+ REDUCE = auto() # get the mean and std dev of the data along the "repeat" dimension
37
+ MINMAX = auto() # get the minimum and maximum of data along the "repeat" dimension
37
38
  NONE = auto() # don't reduce
38
39
 
39
40
 
@@ -93,16 +94,35 @@ class BenchResultBase(OptunaResult):
93
94
  if reduce == ReduceType.AUTO:
94
95
  reduce = ReduceType.REDUCE if self.bench_cfg.repeats > 1 else ReduceType.SQUEEZE
95
96
 
96
- ds_out = self.ds if result_var is None else self.ds[result_var.name]
97
+ ds_out = self.ds.copy()
98
+
99
+ if result_var is not None:
100
+ ds_out = ds_out[result_var.name]
101
+
102
+ def rename_ds(dataset: xr.Dataset, suffix: str):
103
+ # var_name =
104
+ rename_dict = {var: f"{var}_{suffix}" for var in dataset.data_vars}
105
+ ds = dataset.rename_vars(rename_dict)
106
+ return ds
97
107
 
98
108
  match reduce:
99
109
  case ReduceType.REDUCE:
100
110
  ds_reduce_mean = ds_out.mean(dim="repeat", keep_attrs=True)
101
- ds_reduce_std = ds_out.std(dim="repeat", keep_attrs=True)
102
-
103
- for v in ds_reduce_mean.data_vars:
104
- ds_reduce_mean[f"{v}_std"] = ds_reduce_std[v]
105
- ds_out = ds_reduce_mean
111
+ ds_reduce_std = ds_out.std(dim="repeat", keep_attrs=False)
112
+ ds_reduce_std = rename_ds(ds_reduce_std, "std")
113
+ ds_out = xr.merge([ds_reduce_mean, ds_reduce_std])
114
+ ds_out = xr.merge(
115
+ [
116
+ ds_reduce_mean,
117
+ ds_reduce_std,
118
+ ]
119
+ )
120
+ case ReduceType.MINMAX: # TODO, need to pass mean, center of minmax, and minmax
121
+ ds_reduce_mean = ds_out.mean(dim="repeat", keep_attrs=True)
122
+ ds_reduce_min = ds_out.min(dim="repeat")
123
+ ds_reduce_max = ds_out.max(dim="repeat")
124
+ ds_reduce_range = rename_ds(ds_reduce_max - ds_reduce_min, "range")
125
+ ds_out = xr.merge([ds_reduce_mean, ds_reduce_range])
106
126
  case ReduceType.SQUEEZE:
107
127
  ds_out = ds_out.squeeze(drop=True)
108
128
  if level is not None:
@@ -100,7 +100,7 @@ class ComposableContainerVideo(ComposableContainerBase):
100
100
  print("rc", render_cfg)
101
101
  _, frame_duration = self.calculate_duration(float(len(self.container)), render_cfg)
102
102
  out = None
103
- print(f"using compose type{render_cfg.compose_method}")
103
+ print(f"using compose type: {render_cfg.compose_method}")
104
104
  max_duration = 0.0
105
105
 
106
106
  for i in range(len(self.container)):
@@ -129,8 +129,8 @@ class ComposableContainerVideo(ComposableContainerBase):
129
129
  # case ComposeType.overlay:
130
130
  # for i in range(len(self.container)):
131
131
  # self.container[i].alpha = 1./len(self.container)
132
- # out = CompositeVideoClip(self.container, bg_color=render_args.background_col)
133
- # out.duration = fps
132
+ # out = CompositeVideoClip(self.container, bg_color=render_cfg.background_col)
133
+ # # out.duration = fps
134
134
  case _:
135
135
  raise RuntimeError("This compose type is not supported")
136
136
 
@@ -190,6 +190,7 @@ class HoloviewResult(PanelResult):
190
190
  cat_range=VarRange(0, None),
191
191
  repeats_range=VarRange(2, None),
192
192
  reduce=ReduceType.REDUCE,
193
+ # reduce=ReduceType.MINMAX,
193
194
  target_dimension=2,
194
195
  result_var=result_var,
195
196
  result_types=(ResultVar),
@@ -200,7 +201,6 @@ class HoloviewResult(PanelResult):
200
201
  self, dataset: xr.Dataset, result_var: Parameter, **kwargs
201
202
  ) -> Optional[hv.Curve]:
202
203
  hvds = hv.Dataset(dataset)
203
- # result_var = self.get_results_var_list(result_var)[0]
204
204
  title = self.title_from_ds(dataset, result_var, **kwargs)
205
205
  pt = hvds.to(hv.Curve).opts(title=title, **kwargs)
206
206
  pt *= hvds.to(hv.Spread).opts(alpha=0.2)
@@ -39,6 +39,7 @@ class VideoSummaryResult(BenchResultBase):
39
39
  pane_collection: pn.pane = None,
40
40
  time_sequence_dimension=0,
41
41
  target_duration: float = None,
42
+ compose_method_list: List = None,
42
43
  **kwargs,
43
44
  ) -> Optional[pn.panel]:
44
45
  """Returns the results compiled into a video
@@ -47,6 +48,7 @@ class VideoSummaryResult(BenchResultBase):
47
48
  result_var (Parameter, optional): The result var to plot. Defaults to None.
48
49
  result_types (tuple, optional): The types of result var to convert to video. Defaults to (ResultImage,).
49
50
  collection (pn.pane, optional): If there are multiple results, use this collection to stack them. Defaults to pn.Row().
51
+ compose_method_list (List: optional): Defines how each of the dimensions is composed in the video. ie, concatenate the videos horizontally, vertically, sequentially or alpha overlay. Seee bch.ComposeType for the options.
50
52
 
51
53
  Returns:
52
54
  Optional[pn.panel]: a panel pane with a video of all results concatenated together
@@ -74,6 +76,7 @@ class VideoSummaryResult(BenchResultBase):
74
76
  rv,
75
77
  time_sequence_dimension=time_sequence_dimension,
76
78
  target_duration=target_duration,
79
+ compose_method_list=compose_method_list,
77
80
  **kwargs,
78
81
  )
79
82
  )
@@ -88,6 +91,7 @@ class VideoSummaryResult(BenchResultBase):
88
91
  time_sequence_dimension=0,
89
92
  video_controls: VideoControls = None,
90
93
  target_duration: float = None,
94
+ compose_method_list: List = None,
91
95
  **kwargs,
92
96
  ):
93
97
  cvc = self._to_video_panes_ds(
@@ -100,6 +104,7 @@ class VideoSummaryResult(BenchResultBase):
100
104
  result_var=result_var,
101
105
  final=True,
102
106
  reverse=reverse,
107
+ compose_method_list=compose_method_list,
103
108
  target_duration=target_duration,
104
109
  **kwargs,
105
110
  )
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "holobench"
3
- version = "1.35.0"
3
+ version = "1.36.1"
4
4
 
5
5
  authors = [{ name = "Austin Gregg-Smith", email = "blooop@gmail.com" }]
6
6
  description = "A package for benchmarking the performance of arbitrary functions"
@@ -52,6 +52,26 @@ python = "3.12.*"
52
52
  [tool.pixi.pypi-dependencies]
53
53
  holobench = { path = ".", editable = true }
54
54
 
55
+ # Define a consolidated docs feature with all dependencies
56
+ [tool.pixi.feature.docs.dependencies]
57
+ python = "3.10.*"
58
+ firefox = ">=134.0,<135" # Conda package
59
+ geckodriver = "*" # Conda package
60
+ sphinx = "*"
61
+ gtk3 = "*"
62
+ python-chromedriver-binary = "*"
63
+
64
+ [tool.pixi.feature.docs.pypi-dependencies]
65
+ # All Python documentation dependencies
66
+ pydata-sphinx-theme = "*"
67
+ sphinx-rtd-theme = "*"
68
+ sphinxcontrib-napoleon = "*"
69
+ sphinx-autoapi = "*"
70
+ nbsite = "==0.8.7"
71
+ jupyter_bokeh = "*"
72
+ selenium = "*"
73
+ chromedriver_binary = "*"
74
+
55
75
  [project.optional-dependencies]
56
76
  test = [
57
77
  "pylint>=3.2.5,<=3.3.3",
@@ -63,6 +83,7 @@ test = [
63
83
  "pre-commit<=4.0.1",
64
84
  ]
65
85
 
86
+
66
87
  #adds support for embedding rerun windows (alpha)
67
88
  rerun = ["rerun-sdk==0.21.0", "rerun-notebook", "flask", "flask-cors"]
68
89
 
@@ -75,7 +96,7 @@ include = ["bencher"]
75
96
 
76
97
  # Environments
77
98
  [tool.pixi.environments]
78
- default = { features = ["test", "rerun"], solve-group = "default" }
99
+ default = { features = ["test", "rerun", "docs"], solve-group = "default" }
79
100
  py310 = ["py310", "test", "rerun"]
80
101
  py311 = ["py311", "test", "rerun"]
81
102
  py312 = ["py312", "test", "rerun"]
@@ -112,13 +133,18 @@ clear-pixi = "rm -rf .pixi pixi.lock"
112
133
  setup-git-merge-driver = "git config merge.ours.driver true"
113
134
  update-from-template-repo = "./scripts/update_from_template.sh"
114
135
 
136
+ docs = "rm -rf docs/builtdocs; rm -rf docs/jupyter_execute; sphinx-build -b html docs docs/builtdocs"
137
+
138
+
115
139
  #demos
116
140
  demo = "python3 bencher/example/example_image.py"
117
141
  demo_rerun = "python3 bencher/example/example_rerun.py"
118
142
 
143
+
119
144
  [tool.pylint]
120
145
  extension-pkg-whitelist = ["numpy", "scipy"]
121
146
  jobs = 16 #detect number of cores
147
+ ignore-paths = "docs/*"
122
148
 
123
149
  [tool.pylint.'MESSAGES CONTROL']
124
150
  disable = "C,logging-fstring-interpolation,line-too-long,fixme,missing-module-docstring,too-many-instance-attributes,too-few-public-methods,too-many-arguments,too-many-locals,too-many-branches,too-many-statements,use-dict-literal,duplicate-code,too-many-public-methods,too-many-nested-blocks,cyclic-import,too-many-positional-arguments"
File without changes
File without changes
File without changes
File without changes