holobench 1.30.1__tar.gz → 1.30.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. {holobench-1.30.1 → holobench-1.30.3}/PKG-INFO +14 -15
  2. {holobench-1.30.1 → holobench-1.30.3}/README.md +1 -2
  3. {holobench-1.30.1 → holobench-1.30.3}/bencher/bench_cfg.py +9 -9
  4. {holobench-1.30.1 → holobench-1.30.3}/bencher/bench_plot_server.py +1 -1
  5. {holobench-1.30.1 → holobench-1.30.3}/bencher/bench_runner.py +1 -1
  6. {holobench-1.30.1 → holobench-1.30.3}/bencher/bencher.py +7 -7
  7. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/benchmark_data.py +1 -1
  8. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_categorical.py +1 -1
  9. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_custom_sweep.py +1 -1
  10. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_custom_sweep2.py +1 -1
  11. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_dataframe.py +0 -1
  12. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_float3D.py +24 -44
  13. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_image1.py +0 -1
  14. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_levels.py +1 -1
  15. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_levels2.py +1 -1
  16. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_pareto.py +1 -1
  17. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_sample_cache_context.py +2 -2
  18. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_simple.py +5 -5
  19. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/meta/example_meta.py +1 -1
  20. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/shelved/example_kwargs.py +1 -1
  21. {holobench-1.30.1 → holobench-1.30.3}/bencher/plotting/plot_filter.py +2 -2
  22. {holobench-1.30.1 → holobench-1.30.3}/bencher/plotting/plt_cnt_cfg.py +10 -3
  23. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/bench_result_base.py +5 -5
  24. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/composable_container/composable_container_base.py +1 -1
  25. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/optuna_result.py +3 -3
  26. {holobench-1.30.1 → holobench-1.30.3}/bencher/variables/inputs.py +5 -5
  27. {holobench-1.30.1 → holobench-1.30.3}/bencher/variables/sweep_base.py +1 -1
  28. {holobench-1.30.1 → holobench-1.30.3}/bencher/variables/time.py +3 -3
  29. {holobench-1.30.1 → holobench-1.30.3}/bencher/video_writer.py +1 -1
  30. {holobench-1.30.1 → holobench-1.30.3}/pyproject.toml +18 -38
  31. {holobench-1.30.1 → holobench-1.30.3}/.gitignore +0 -0
  32. {holobench-1.30.1 → holobench-1.30.3}/LICENSE +0 -0
  33. {holobench-1.30.1 → holobench-1.30.3}/bencher/__init__.py +0 -0
  34. {holobench-1.30.1 → holobench-1.30.3}/bencher/bench_report.py +0 -0
  35. {holobench-1.30.1 → holobench-1.30.3}/bencher/caching.py +0 -0
  36. {holobench-1.30.1 → holobench-1.30.3}/bencher/class_enum.py +0 -0
  37. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/__init__.py +0 -0
  38. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_all.py +0 -0
  39. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_composable_container.py +0 -0
  40. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_composable_container2.py +0 -0
  41. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_consts.py +0 -0
  42. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_docs.py +0 -0
  43. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_filepath.py +0 -0
  44. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_float_cat.py +0 -0
  45. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_floats.py +0 -0
  46. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_floats2D.py +0 -0
  47. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_holosweep.py +0 -0
  48. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_holosweep_objects.py +0 -0
  49. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_holosweep_tap.py +0 -0
  50. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_image.py +0 -0
  51. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_sample_cache.py +0 -0
  52. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_simple_bool.py +0 -0
  53. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_simple_cat.py +0 -0
  54. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_simple_float.py +0 -0
  55. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_simple_float2d.py +0 -0
  56. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_strings.py +0 -0
  57. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_time_event.py +0 -0
  58. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_video.py +0 -0
  59. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/example_workflow.py +0 -0
  60. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_bokeh_plotly.py +0 -0
  61. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_hover_ex.py +0 -0
  62. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_hvplot_explorer.py +0 -0
  63. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_interactive.py +0 -0
  64. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_streamnd.py +0 -0
  65. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_streams.py +0 -0
  66. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_template.py +0 -0
  67. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_updates.py +0 -0
  68. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/experimental/example_vector.py +0 -0
  69. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/meta/example_meta_cat.py +0 -0
  70. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/meta/example_meta_float.py +0 -0
  71. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/meta/example_meta_levels.py +0 -0
  72. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/optuna/example_optuna.py +0 -0
  73. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/shelved/example_float2D_scatter.py +0 -0
  74. {holobench-1.30.1 → holobench-1.30.3}/bencher/example/shelved/example_float3D_cone.py +0 -0
  75. {holobench-1.30.1 → holobench-1.30.3}/bencher/job.py +0 -0
  76. {holobench-1.30.1 → holobench-1.30.3}/bencher/optuna_conversions.py +0 -0
  77. {holobench-1.30.1 → holobench-1.30.3}/bencher/plotting/__init__.py +0 -0
  78. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/__init__.py +0 -0
  79. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/bench_result.py +0 -0
  80. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/composable_container/__init__.py +0 -0
  81. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/composable_container/composable_container_dataframe.py +0 -0
  82. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/composable_container/composable_container_panel.py +0 -0
  83. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/composable_container/composable_container_video.py +0 -0
  84. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/dataset_result.py +0 -0
  85. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/float_formatter.py +0 -0
  86. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/holoview_result.py +0 -0
  87. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/panel_result.py +0 -0
  88. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/plotly_result.py +0 -0
  89. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/video_result.py +0 -0
  90. {holobench-1.30.1 → holobench-1.30.3}/bencher/results/video_summary.py +0 -0
  91. {holobench-1.30.1 → holobench-1.30.3}/bencher/utils.py +0 -0
  92. {holobench-1.30.1 → holobench-1.30.3}/bencher/variables/__init__.py +0 -0
  93. {holobench-1.30.1 → holobench-1.30.3}/bencher/variables/parametrised_sweep.py +0 -0
  94. {holobench-1.30.1 → holobench-1.30.3}/bencher/variables/results.py +0 -0
  95. {holobench-1.30.1 → holobench-1.30.3}/bencher/worker_job.py +0 -0
  96. {holobench-1.30.1 → holobench-1.30.3}/resource/bencher +0 -0
@@ -1,38 +1,38 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: holobench
3
- Version: 1.30.1
3
+ Version: 1.30.3
4
4
  Summary: A package for benchmarking the performance of arbitrary functions
5
5
  Project-URL: Repository, https://github.com/dyson-ai/bencher
6
6
  Project-URL: Home, https://github.com/dyson-ai/bencher
7
7
  Project-URL: Documentation, https://bencher.readthedocs.io/en/latest/
8
8
  Author-email: Austin Gregg-Smith <blooop@gmail.com>
9
- License-Expression: MIT
10
- License-File: LICENSE
9
+ License: MIT
10
+ Requires-Python: <3.13,>=3.10
11
11
  Requires-Dist: diskcache<=5.6.3,>=5.6
12
12
  Requires-Dist: holoviews<=1.19.1,>=1.15
13
- Requires-Dist: hvplot<=0.11.1,>=0.8
13
+ Requires-Dist: hvplot<=0.10.0,>=0.8
14
14
  Requires-Dist: matplotlib<=3.9.2,>=3.6.3
15
15
  Requires-Dist: moviepy-fix-codec
16
- Requires-Dist: numpy<=2.1.2,>=1.0
16
+ Requires-Dist: numpy<=2.1.3,>=1.0
17
17
  Requires-Dist: optuna<=4.0.0,>=3.2
18
- Requires-Dist: pandas<=2.2.3,>=2.0
19
- Requires-Dist: panel<=1.5.3,>=1.3.6
18
+ Requires-Dist: pandas<=2.2.2,>=2.0
19
+ Requires-Dist: panel<=1.4.5,>=1.3.6
20
20
  Requires-Dist: param<=2.1.1,>=1.13.0
21
21
  Requires-Dist: plotly<=5.24.1,>=5.15
22
- Requires-Dist: scikit-learn<=1.5.2,>=1.2
22
+ Requires-Dist: scikit-learn<=1.5.1,>=1.2
23
23
  Requires-Dist: scoop<=0.7.2.0,>=0.7.0
24
24
  Requires-Dist: sortedcontainers<=2.4,>=2.4
25
25
  Requires-Dist: str2bool<=1.1,>=1.1
26
26
  Requires-Dist: strenum<=0.4.15,>=0.4.0
27
- Requires-Dist: xarray<=2024.10.0,>=2023.7
27
+ Requires-Dist: xarray<=2024.7.0,>=2023.7
28
28
  Provides-Extra: test
29
- Requires-Dist: black<=24.10.0,>=23; extra == 'test'
30
- Requires-Dist: coverage<=7.6.4,>=7.5.4; extra == 'test'
31
- Requires-Dist: hypothesis<=6.116.0,>=6.104.2; extra == 'test'
29
+ Requires-Dist: coverage<=7.6.7,>=7.5.4; extra == 'test'
30
+ Requires-Dist: hypothesis<=6.119.1,>=6.104.2; extra == 'test'
31
+ Requires-Dist: pre-commit<=4.0.1; extra == 'test'
32
32
  Requires-Dist: pylint<=3.3.1,>=3.2.5; extra == 'test'
33
33
  Requires-Dist: pytest-cov<=6.0.0,>=4.1; extra == 'test'
34
34
  Requires-Dist: pytest<=8.3.3,>=7.4; extra == 'test'
35
- Requires-Dist: ruff<=0.7.2,>=0.5.0; extra == 'test'
35
+ Requires-Dist: ruff<=0.7.4,>=0.5.0; extra == 'test'
36
36
  Description-Content-Type: text/markdown
37
37
 
38
38
  # Bencher
@@ -62,7 +62,7 @@ Bencher is a tool to make it easy to benchmark the interactions between the inpu
62
62
 
63
63
  Parameters for bencher are defined using the [param](https://param.holoviz.org/) library as a config class with extra metadata that describes the bounds of the search space you want to measure. You must define a benchmarking function that accepts an instance of the config class and return a dictionary with string metric names and float values.
64
64
 
65
- Parameters are benchmarked by passing in a list N parameters, and an N-Dimensional tensor is returned. You can optionally sample each point multiple times to get back a distribution and also track its value over time. By default the data will be plotted automatically based on the types of parameters you are sampling (e.g, continous, discrete), but you can also pass in a callback to customize plotting.
65
+ Parameters are benchmarked by passing in a list N parameters, and an N-Dimensional tensor is returned. You can optionally sample each point multiple times to get back a distribution and also track its value over time. By default the data will be plotted automatically based on the types of parameters you are sampling (e.g, continuous, discrete), but you can also pass in a callback to customize plotting.
66
66
 
67
67
  The data is stored in a persistent database so that past performance is tracked.
68
68
 
@@ -124,4 +124,3 @@ Start with example_simple_float.py and explore other examples based on your data
124
124
  API documentation can be found at https://bencher.readthedocs.io/en/latest/
125
125
 
126
126
  More documentation is needed for the examples and general workflow.
127
-
@@ -25,7 +25,7 @@ Bencher is a tool to make it easy to benchmark the interactions between the inpu
25
25
 
26
26
  Parameters for bencher are defined using the [param](https://param.holoviz.org/) library as a config class with extra metadata that describes the bounds of the search space you want to measure. You must define a benchmarking function that accepts an instance of the config class and return a dictionary with string metric names and float values.
27
27
 
28
- Parameters are benchmarked by passing in a list N parameters, and an N-Dimensional tensor is returned. You can optionally sample each point multiple times to get back a distribution and also track its value over time. By default the data will be plotted automatically based on the types of parameters you are sampling (e.g, continous, discrete), but you can also pass in a callback to customize plotting.
28
+ Parameters are benchmarked by passing in a list N parameters, and an N-Dimensional tensor is returned. You can optionally sample each point multiple times to get back a distribution and also track its value over time. By default the data will be plotted automatically based on the types of parameters you are sampling (e.g, continuous, discrete), but you can also pass in a callback to customize plotting.
29
29
 
30
30
  The data is stored in a persistent database so that past performance is tracked.
31
31
 
@@ -87,4 +87,3 @@ Start with example_simple_float.py and explore other examples based on your data
87
87
  API documentation can be found at https://bencher.readthedocs.io/en/latest/
88
88
 
89
89
  More documentation is needed for the examples and general workflow.
90
-
@@ -69,7 +69,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
69
69
 
70
70
  serve_pandas_flat: bool = param.Boolean(
71
71
  True,
72
- doc="Serve a flattend pandas summary on the results webpage. If you have a large dataset consider setting this to false if the page loading is slow",
72
+ doc="Serve a flattened pandas summary on the results webpage. If you have a large dataset consider setting this to false if the page loading is slow",
73
73
  )
74
74
 
75
75
  serve_xarray: bool = param.Boolean(
@@ -78,7 +78,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
78
78
  )
79
79
 
80
80
  auto_plot: bool = param.Boolean(
81
- True, doc=" Automaticlly dedeuce the best type of plot for the results."
81
+ True, doc=" Automatically dedeuce the best type of plot for the results."
82
82
  )
83
83
 
84
84
  raise_duplicate_exception: bool = param.Boolean(False, doc=" Used to debug unique plot names.")
@@ -131,13 +131,13 @@ class BenchRunCfg(BenchPlotSrvCfg):
131
131
 
132
132
  render_plotly = param.Boolean(
133
133
  True,
134
- doc="Plotly and Bokeh don't play nicely together, so by default pre-render plotly figures to a non dynamic version so that bokeh plots correctly. If you want interactive 3D graphs, set this to true but be aware that your 2D interactive graphs will probalby stop working.",
134
+ doc="Plotly and Bokeh don't play nicely together, so by default pre-render plotly figures to a non dynamic version so that bokeh plots correctly. If you want interactive 3D graphs, set this to true but be aware that your 2D interactive graphs will probably stop working.",
135
135
  )
136
136
 
137
137
  level = param.Integer(
138
138
  default=0,
139
139
  bounds=[0, 12],
140
- doc="The level parameter is a method of defining the number samples to sweep over in a variable agnostic way, i.e you don't need to specficy the number of samples for each variable as they are calculated dynamically from the sampling level. See example_level.py for more information.",
140
+ doc="The level parameter is a method of defining the number samples to sweep over in a variable agnostic way, i.e you don't need to specify the number of samples for each variable as they are calculated dynamically from the sampling level. See example_level.py for more information.",
141
141
  )
142
142
 
143
143
  run_tag = param.String(
@@ -163,10 +163,10 @@ class BenchRunCfg(BenchPlotSrvCfg):
163
163
  plot_size = param.Integer(default=None, doc="Sets the width and height of the plot")
164
164
  plot_width = param.Integer(
165
165
  default=None,
166
- doc="Sets with width of the plots, this will ovverride the plot_size parameter",
166
+ doc="Sets with width of the plots, this will override the plot_size parameter",
167
167
  )
168
168
  plot_height = param.Integer(
169
- default=None, doc="Sets the height of the plot, this will ovverride the plot_size parameter"
169
+ default=None, doc="Sets the height of the plot, this will override the plot_size parameter"
170
170
  )
171
171
 
172
172
  @staticmethod
@@ -291,7 +291,7 @@ class BenchCfg(BenchRunCfg):
291
291
 
292
292
  tag: str = param.String(
293
293
  "",
294
- doc="Use tags to group different benchmarks together. By default benchmarks are considered distinct from eachother and are identified by the hash of their name and inputs, constants and results and tag, but you can optionally change the hash value to only depend on the tag. This way you can have multiple unrelated benchmarks share values with eachother based only on the tag value.",
294
+ doc="Use tags to group different benchmarks together. By default benchmarks are considered distinct from each other and are identified by the hash of their name and inputs, constants and results and tag, but you can optionally change the hash value to only depend on the tag. This way you can have multiple unrelated benchmarks share values with each other based only on the tag value.",
295
295
  )
296
296
 
297
297
  hash_value: str = param.String(
@@ -311,10 +311,10 @@ class BenchCfg(BenchRunCfg):
311
311
  self.iv_repeat = None
312
312
 
313
313
  def hash_persistent(self, include_repeats) -> str:
314
- """override the default hash function becuase the default hash function does not return the same value for the same inputs. It references internal variables that are unique per instance of BenchCfg
314
+ """override the default hash function because the default hash function does not return the same value for the same inputs. It references internal variables that are unique per instance of BenchCfg
315
315
 
316
316
  Args:
317
- include_repeats (bool) : by default include repeats as part of the hash execpt with using the sample cache
317
+ include_repeats (bool) : by default include repeats as part of the hash except with using the sample cache
318
318
  """
319
319
 
320
320
  if include_repeats:
@@ -84,7 +84,7 @@ class BenchPlotServer:
84
84
  Args:
85
85
  bench_cfg (BenchCfg): benchmark results
86
86
  plots_instance (List[pn.panel]): list of panel objects to display
87
- port (int): use a fixed port to lauch the server
87
+ port (int): use a fixed port to launch the server
88
88
  """
89
89
 
90
90
  # suppress verbose tornado and bokeh output
@@ -85,7 +85,7 @@ class BenchRunner:
85
85
  run_cfg (BenchRunCfg, optional): benchmark run configuration. Defaults to None.
86
86
  publish (bool, optional): Publish the results to git, requires a publish url to be set up. Defaults to False.
87
87
  debug (bool, optional): _description_. Defaults to False.
88
- show (bool, optional): show the results in the local web browswer. Defaults to False.
88
+ show (bool, optional): show the results in the local web browser. Defaults to False.
89
89
  save (bool, optional): save the results to disk in index.html. Defaults to False.
90
90
  grouped (bool, optional): Produce a single html page with all the benchmarks included. Defaults to True.
91
91
  use_cache (bool, optional): Use the sample cache to reused previous results. Defaults to True.
@@ -178,7 +178,7 @@ class Bench(BenchPlotServer):
178
178
  self.plot = True
179
179
 
180
180
  def add_plot_callback(self, callback: Callable[[BenchResult], pn.panel], **kwargs) -> None:
181
- """Add a plotting callback that will be called on any result produced when calling a sweep funciton. You can pass additional arguments to the plotting function with kwargs. e.g. add_plot_callback(bch.BenchResult.to_video_grid,)
181
+ """Add a plotting callback that will be called on any result produced when calling a sweep function. You can pass additional arguments to the plotting function with kwargs. e.g. add_plot_callback(bch.BenchResult.to_video_grid,)
182
182
 
183
183
  Args:
184
184
  callback (Callable[[BenchResult], pn.panel]): _description_
@@ -498,7 +498,7 @@ class Bench(BenchPlotServer):
498
498
  """check that a variable is a subclass of param
499
499
 
500
500
  Args:
501
- variable (param.Parameter): the varible to check
501
+ variable (param.Parameter): the variable to check
502
502
  var_type (str): a string representation of the variable type for better error messages
503
503
 
504
504
  Raises:
@@ -560,7 +560,7 @@ class Bench(BenchPlotServer):
560
560
  """Load historical data from a cache if over_time=true
561
561
 
562
562
  Args:
563
- ds (xr.Dataset): Freshly calcuated data
563
+ ds (xr.Dataset): Freshly calculated data
564
564
  bench_cfg_hash (int): Hash of the input variables used to generate the data
565
565
  clear_history (bool): Optionally clear the history
566
566
 
@@ -593,7 +593,7 @@ class Bench(BenchPlotServer):
593
593
  time_src (datetime | str): a representation of the sample time
594
594
 
595
595
  Returns:
596
- tuple[BenchResult, List, List]: bench_result, function intputs, dimension names
596
+ tuple[BenchResult, List, List]: bench_result, function inputs, dimension names
597
597
  """
598
598
 
599
599
  if time_src is None:
@@ -611,7 +611,7 @@ class Bench(BenchPlotServer):
611
611
  function_inputs = list(
612
612
  zip(product(*dims_cfg.dim_ranges_index), product(*dims_cfg.dim_ranges))
613
613
  )
614
- # xarray stores K N-dimensional arrays of data. Each array is named and in this case we have a nd array for each result variable
614
+ # xarray stores K N-dimensional arrays of data. Each array is named and in this case we have an ND array for each result variable
615
615
  data_vars = {}
616
616
  dataset_list = []
617
617
 
@@ -830,7 +830,7 @@ class Bench(BenchPlotServer):
830
830
 
831
831
  Args:
832
832
  bench_cfg (BenchCfg):
833
- input_var (ParametrizedSweep): The varible to extract metadata from
833
+ input_var (ParametrizedSweep): The variable to extract metadata from
834
834
  """
835
835
 
836
836
  for rv in bench_res.bench_cfg.result_vars:
@@ -852,7 +852,7 @@ class Bench(BenchPlotServer):
852
852
  dsvar.attrs["description"] = input_var.__doc__
853
853
 
854
854
  def report_results(self, bench_cfg: BenchCfg, print_xarray: bool, print_pandas: bool):
855
- """Optionally display the caculated benchmark data as either as pandas, xarray or plot
855
+ """Optionally display the calculated benchmark data as either as pandas, xarray or plot
856
856
 
857
857
  Args:
858
858
  bench_cfg (BenchCfg):
@@ -1,6 +1,6 @@
1
1
  """This file contains an example of how to define benchmarking parameters sweeps. Categorical values are defined as enums and passed to EnumSweep classes, other types of sweeps are defined by their respective classes.
2
2
 
3
- You can define a subclass which contains an input configuration which can be passed to a function in a type safe way. You can combine the subclass with a higher level class which contains more configuation parameters. This is to help manage the complexity of large configuration/parameter spaces.
3
+ You can define a subclass which contains an input configuration which can be passed to a function in a type safe way. You can combine the subclass with a higher level class which contains more configuration parameters. This is to help manage the complexity of large configuration/parameter spaces.
4
4
  """
5
5
 
6
6
  import math
@@ -86,7 +86,7 @@ def example_categorical(
86
86
  ],
87
87
  title="Categorical 3D Example Over Time",
88
88
  result_vars=[ExampleBenchCfgOut.param.out_sin],
89
- description="""Lastly, what if you want to track these distributions over time? Set over_time=True and bencher will cache and display historical resuts alongside the latest result. Use clear_history=True to clear that cache.""",
89
+ description="""Lastly, what if you want to track these distributions over time? Set over_time=True and bencher will cache and display historical results alongside the latest result. Use clear_history=True to clear that cache.""",
90
90
  post_description="The output shows faceted line plot with confidence intervals for the mean value over time.",
91
91
  run_cfg=run_cfg,
92
92
  )
@@ -23,7 +23,7 @@ class Square(bch.ParametrizedSweep):
23
23
  def example_custom_sweep(
24
24
  run_cfg: bch.BenchRunCfg = bch.BenchRunCfg(), report: bch.BenchReport = bch.BenchReport()
25
25
  ) -> bch.Bench:
26
- """This example shows how to define a custom set of value to sample from intead of a uniform sweep
26
+ """This example shows how to define a custom set of value to sample from instead of a uniform sweep
27
27
 
28
28
  Args:
29
29
  run_cfg (BenchRunCfg): configuration of how to perform the param sweep
@@ -17,7 +17,7 @@ class Square(bch.ParametrizedSweep):
17
17
  def example_custom_sweep2(
18
18
  run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
19
19
  ) -> bch.Bench:
20
- """This example shows how to define a custom set of value to sample from intead of a uniform sweep
20
+ """This example shows how to define a custom set of value to sample from instead of a uniform sweep
21
21
 
22
22
  Args:
23
23
  run_cfg (BenchRunCfg): configuration of how to perform the param sweep
@@ -6,7 +6,6 @@ import holoviews as hv
6
6
 
7
7
 
8
8
  class ExampleMergeDataset(bch.ParametrizedSweep):
9
-
10
9
  value = bch.FloatSweep(default=0, bounds=[0, 10])
11
10
  repeats_x = bch.IntSweep(default=2, bounds=[2, 4])
12
11
  # repeats_y = bch.IntSweep(default=2, bounds=[2, 4])
@@ -1,11 +1,9 @@
1
1
  # pylint: disable=duplicate-code
2
-
3
2
  import numpy as np
4
-
5
3
  import bencher as bch
6
4
 
7
5
 
8
- class VolumeSample(bch.ParametrizedSweep):
6
+ class VolumeSweep(bch.ParametrizedSweep):
9
7
  """A class to represent a 3D point in space."""
10
8
 
11
9
  x = bch.FloatSweep(
@@ -18,10 +16,6 @@ class VolumeSample(bch.ParametrizedSweep):
18
16
  default=0, bounds=[-1.0, 1.0], doc="z coordinate of the sample volume", samples=6
19
17
  )
20
18
 
21
-
22
- class VolumeResult(bch.ParametrizedSweep):
23
- """A class to represent the properties of a volume sample."""
24
-
25
19
  value = bch.ResultVar("ul", doc="The scalar value of the 3D volume field")
26
20
  occupancy = bch.ResultVar(
27
21
  "occupied", doc="If the value is > 0.5 this point is considered occupied"
@@ -32,33 +26,25 @@ class VolumeResult(bch.ParametrizedSweep):
32
26
  3, "vec", doc="The same vector field but only showing values in a sphere of radius 0.5"
33
27
  )
34
28
 
29
+ def __call__(self, **kwargs) -> dict:
30
+ """This function takes a 3D point as input and returns distance of that point to the origin."""
31
+ self.update_params_from_kwargs(**kwargs)
32
+ self.value = np.linalg.norm(np.array([self.x, self.y, self.z])) # distance to origin
33
+ self.occupancy = float(self.value < 0.5)
34
+ # from https://plotly.com/python/3d-volume-plots/
35
+ self.interesting = np.sin(np.pi * self.x) * np.cos(np.pi * self.z) * np.sin(np.pi * self.y)
36
+ self.interesting_vec = [
37
+ np.sin(np.pi * self.x),
38
+ np.cos(np.pi * self.z),
39
+ np.sin(np.pi * self.y),
40
+ ]
35
41
 
36
- def bench_fn(point: VolumeSample) -> VolumeResult:
37
- """This function takes a 3D point as input and returns distance of that point to the origin.
42
+ if self.occupancy:
43
+ self.interesting_vec_and_occ = self.interesting_vec
44
+ else:
45
+ self.interesting_vec_and_occ = [0, 0, 0]
38
46
 
39
- Args:
40
- point (VolumeSample): Sample point
41
-
42
- Returns:
43
- VolumeResult: Value at that point
44
- """
45
- output = VolumeResult()
46
- output.value = np.linalg.norm(np.array([point.x, point.y, point.z])) # distance to origin
47
- output.occupancy = float(output.value < 0.5)
48
- # from https://plotly.com/python/3d-volume-plots/
49
- output.interesting = np.sin(np.pi * point.x) * np.cos(np.pi * point.z) * np.sin(np.pi * point.y)
50
- output.interesting_vec = [
51
- np.sin(np.pi * point.x),
52
- np.cos(np.pi * point.z),
53
- np.sin(np.pi * point.y),
54
- ]
55
-
56
- if output.occupancy:
57
- output.interesting_vec_and_occ = output.interesting_vec
58
- else:
59
- output.interesting_vec_and_occ = [0, 0, 0]
60
-
61
- return output
47
+ return super().__call__()
62
48
 
63
49
 
64
50
  def example_floats3D(
@@ -72,21 +58,15 @@ def example_floats3D(
72
58
  Returns:
73
59
  Bench: results of the parameter sweep
74
60
  """
75
- bench = bch.Bench(
76
- "Bencher_Example_Floats",
77
- bench_fn,
78
- VolumeSample,
79
- run_cfg=run_cfg,
80
- report=report,
81
- )
61
+ bench = VolumeSweep().to_bench(run_cfg=run_cfg, report=report)
82
62
 
83
63
  bench.plot_sweep(
84
64
  title="Float 3D Example",
85
- input_vars=[VolumeSample.param.x, VolumeSample.param.y, VolumeSample.param.z],
65
+ input_vars=["x", "y", "z"],
86
66
  result_vars=[
87
- VolumeResult.param.value,
88
- VolumeResult.param.occupancy,
89
- VolumeResult.param.interesting,
67
+ "value",
68
+ "occupancy",
69
+ "interesting",
90
70
  ],
91
71
  description="""This example shows how to sample 3 floating point variables and plot a volumetric representation of the results. The benchmark function returns the distance to the origin""",
92
72
  post_description="Here you can see concentric shells as the value of the function increases with distance from the origin. The occupancy graph should show a sphere with radius=0.5",
@@ -97,5 +77,5 @@ def example_floats3D(
97
77
 
98
78
  if __name__ == "__main__":
99
79
  ex_run_cfg = bch.BenchRunCfg()
100
- ex_run_cfg.level = 3
80
+ ex_run_cfg.level = 6
101
81
  example_floats3D(ex_run_cfg).report.show()
@@ -72,7 +72,6 @@ def example_image_vid_sequential1(
72
72
 
73
73
 
74
74
  if __name__ == "__main__":
75
-
76
75
  ex_run_cfg = bch.BenchRunCfg()
77
76
  ex_run_cfg.use_sample_cache = True
78
77
  ex_run_cfg.overwrite_sample_cache = True
@@ -81,7 +81,7 @@ def run_levels_1D(bench: bch.Bench) -> bch.Bench:
81
81
  bench.report.append(row)
82
82
 
83
83
  bench.report.append_markdown(
84
- "Level 1 returns a single point at the lower bound of the parameter. Level 2 uses the uppper and lower bounds of the parameter. All subsequent levels are created by adding a sample between each previously calculated sample to ensure that all previous values can be reused while retaining an equal sample spacing. The following plots show the sample points as circles and the corresponding plot of a sin function sampled at that level.",
84
+ "Level 1 returns a single point at the lower bound of the parameter. Level 2 uses the upper and lower bounds of the parameter. All subsequent levels are created by adding a sample between each previously calculated sample to ensure that all previous values can be reused while retaining an equal sample spacing. The following plots show the sample points as circles and the corresponding plot of a sin function sampled at that level.",
85
85
  width=600,
86
86
  )
87
87
 
@@ -15,7 +15,7 @@ class Square(bch.ParametrizedSweep):
15
15
 
16
16
 
17
17
  def example_levels2(run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None) -> bch.Bench:
18
- """This example shows how to define a custom set of value to sample from intead of a uniform sweep
18
+ """This example shows how to define a custom set of value to sample from instead of a uniform sweep
19
19
 
20
20
  Args:
21
21
  run_cfg (BenchRunCfg): configuration of how to perform the param sweep
@@ -29,7 +29,7 @@ def example_pareto(
29
29
 
30
30
  res = bench.plot_sweep(
31
31
  title="Pareto Optimisation with Optuna",
32
- description="This example shows how to plot the pareto front of the tradeoff between multiple criteria. When multiple result variable are defined, and use_optuna=True a pareto plot and the relative importance of each input variable on the output criteria is plotted. A summary of the points on the pareto front is printed as well. You can use the pareto plot to decide the how to trade off one objective for another. Pareto plots are suppored for 2D and 3D. If you have more than 3 result variables the first 3 are selected for the pareto plot. Plotting 4D surfaces is left as an exercise to the reader",
32
+ description="This example shows how to plot the pareto front of the tradeoff between multiple criteria. When multiple result variable are defined, and use_optuna=True a pareto plot and the relative importance of each input variable on the output criteria is plotted. A summary of the points on the pareto front is printed as well. You can use the pareto plot to decide the how to trade off one objective for another. Pareto plots are supported for 2D and 3D. If you have more than 3 result variables the first 3 are selected for the pareto plot. Plotting 4D surfaces is left as an exercise to the reader",
33
33
  input_vars=[
34
34
  ExampleBenchCfgIn.param.theta,
35
35
  ExampleBenchCfgIn.param.offset,
@@ -82,7 +82,7 @@ def example_cache_context() -> bch.Bench:
82
82
  tag="example_tag1",
83
83
  )
84
84
 
85
- # these values have not been calcuated before so there should be 1 fn call
85
+ # these values have not been calculated before so there should be 1 fn call
86
86
  assert_call_counts(bencher, run_cfg, wrapper_calls=1, fn_calls=1, cache_calls=0)
87
87
 
88
88
  # now create a new benchmark that calculates the values of the previous two benchmarks. The tag is the same so those values will be loaded from the cache instead of getting calculated again
@@ -106,7 +106,7 @@ def example_cache_context() -> bch.Bench:
106
106
  tag="example_tag2",
107
107
  )
108
108
 
109
- # Both calls are calcuated becuase the tag is different so they don't hit the cache
109
+ # Both calls are calculated because the tag is different so they don't hit the cache
110
110
  assert_call_counts(bencher, run_cfg, wrapper_calls=2, fn_calls=2, cache_calls=0)
111
111
 
112
112
  return bencher
@@ -12,7 +12,7 @@ from strenum import StrEnum
12
12
  import bencher as bch
13
13
 
14
14
 
15
- # define a class with the output variables you want to benchmark. It must inherit from ParametrizedSweep (which inherits from param.Parametrized). Param is a python library that allows you to track metadata about parameters. I would recommend reading at least the intro: https://param.holoviz.org/. I have extended param with some extra metadata such is the units of the variable so that it can automaticaly be plotted.
15
+ # define a class with the output variables you want to benchmark. It must inherit from ParametrizedSweep (which inherits from param.Parametrized). Param is a python library that allows you to track metadata about parameters. I would recommend reading at least the intro: https://param.holoviz.org/. I have extended param with some extra metadata such is the units of the variable so that it can automatically be plotted.
16
16
  class OutputCfg(bch.ParametrizedSweep):
17
17
  """A class for defining what variables the benchmark function returns and metadata on those variables"""
18
18
 
@@ -41,7 +41,7 @@ class InputCfg(bch.ParametrizedSweep):
41
41
  # The variables must be defined as one of the Sweep types, i.e, FloatSweep, IntSweep, EnumSweep from bencher.bench_vars
42
42
  # theta = FloatSweep(default=0, bounds=[0, math.pi], doc="Input angle", units="rad", samples=30)
43
43
 
44
- # Define sweep variables by passing in an enum class name. The first element of the enum is the default by convention, but you can overrride the default in the constructor
44
+ # Define sweep variables by passing in an enum class name. The first element of the enum is the default by convention, but you can override the default in the constructor
45
45
  algo_setting_enum = bch.EnumSweep(AlgoSetting, default=AlgoSetting.poor)
46
46
 
47
47
  # In this case there are no units so its marked as unitless or ul. You can define how many evenly distributed samples to sample the parameter with
@@ -66,7 +66,7 @@ class InputCfg(bch.ParametrizedSweep):
66
66
 
67
67
  match cfg.algo_setting_enum:
68
68
  case AlgoSetting.noisy:
69
- # add some random noise to the output. When your algorith has noisy output it often is an indication that something is not quite right. The graphs should show that you want to avoid the "noisy" setting in your algorithm
69
+ # add some random noise to the output. When your algorithm has noisy output it often is an indication that something is not quite right. The graphs should show that you want to avoid the "noisy" setting in your algorithm
70
70
  output.accuracy += random.uniform(-10, 10)
71
71
  case AlgoSetting.optimum:
72
72
  output.accuracy += 30 # This is the setting with the best performance, and characterising that is is the goal of the benchmarking
@@ -122,9 +122,9 @@ if __name__ == "__main__":
122
122
  result_vars=[OutputCfg.param.accuracy],
123
123
  const_vars=[(InputCfg.param.algo_setting_float, 1.33)],
124
124
  title="Simple example 1D sweep over time",
125
- description="""Once you have found the optimal settings for your algorithm you want to make sure that the performance is not lost over time. You can set variables to a constant value and in this case the float value is set to its optimum value. The first time this function is run only the results from sweeping the categorical value is plotted (the same as example 1), but the second time it is run a graph the values over time is shown. [Run the code again if you don't see a graph over time]. If the graphs over time shows long term changes (not just noise), it indicate there is another external factor that is affecting your performace over time, i.e. dependencies changing, physical degradation of equipment, an unnoticed bug from a pull request etc...
125
+ description="""Once you have found the optimal settings for your algorithm you want to make sure that the performance is not lost over time. You can set variables to a constant value and in this case the float value is set to its optimum value. The first time this function is run only the results from sweeping the categorical value is plotted (the same as example 1), but the second time it is run a graph the values over time is shown. [Run the code again if you don't see a graph over time]. If the graphs over time shows long term changes (not just noise), it indicate there is another external factor that is affecting your performance over time, i.e. dependencies changing, physical degradation of equipment, an unnoticed bug from a pull request etc...
126
126
 
127
- This shows the basic features of bencher. These examples are purposefully simplified to demonstrate its features in isolation and don't reeally show the real advantages of bencher. If you only have a few inputs and outputs its not that complicated to throw together some plots of performance. The power of bencher is that when you have a system with many moving parts that all interact with eachother, teasing apart those influences becomes much harder because the parameter spaces combine quite quickly into a high dimensional mess. Bencher makes it easier to experiment with different combination of inputs to gain an intuition of the system performance. Bencher can plot up to 6D input natively and you can add custom plots if you have exotic data types or state spaces [WIP].
127
+ This shows the basic features of bencher. These examples are purposefully simplified to demonstrate its features in isolation and don't reeally show the real advantages of bencher. If you only have a few inputs and outputs its not that complicated to throw together some plots of performance. The power of bencher is that when you have a system with many moving parts that all interact with each other, teasing apart those influences becomes much harder because the parameter spaces combine quite quickly into a high dimensional mess. Bencher makes it easier to experiment with different combination of inputs to gain an intuition of the system performance. Bencher can plot up to 6D input natively and you can add custom plots if you have exotic data types or state spaces [WIP].
128
128
  """,
129
129
  post_description="",
130
130
  run_cfg=bch.BenchRunCfg(repeats=10, over_time=True, clear_history=False),
@@ -149,7 +149,7 @@ def example_meta(
149
149
  bench.plot_sweep(
150
150
  title="Meta Bench",
151
151
  description="""## All Combinations of Variable Sweeps and Resulting Plots
152
- This uses bencher to display all the combinatios of plots bencher is able to produce""",
152
+ This uses bencher to display all the combinations of plots bencher is able to produce""",
153
153
  input_vars=[
154
154
  bch.p("float_vars", [0, 1, 2, 3]),
155
155
  BenchMeta.param.categorical_vars,
@@ -10,7 +10,7 @@
10
10
  # trig_func: str = "sin",
11
11
  # **kwargs, # pylint: disable=unused-argument
12
12
  # ) -> dict:
13
- # """All the other examples use classes and parameters to define the inputs and outputs to the function. However it makes the code less flexible when integrating with other systems, so this example shows a more basic interface that accepts and returns dictionaries. The classes still need to be defined however because that is how the sweep and plotting settings are calcuated"""
13
+ # """All the other examples use classes and parameters to define the inputs and outputs to the function. However it makes the code less flexible when integrating with other systems, so this example shows a more basic interface that accepts and returns dictionaries. The classes still need to be defined however because that is how the sweep and plotting settings are calculated"""
14
14
  # output = {}
15
15
 
16
16
  # if trig_func == "sin":
@@ -6,7 +6,7 @@ import panel as pn
6
6
 
7
7
 
8
8
  class VarRange:
9
- """A VarRange represents the bounded and unbounded ranges of integers. This class is used to define filters for various variable types. For example by defining cat_var = VarRange(0,0), calling matches(0) will return true, but any other integer will not match. You can also have unbounded ranges for example VarRange(2,None) will match to 2,3,4... up to infinity. for By default the lower and upper bounds are set to -1 so so that no matter what value is passsed to matches() will return false. Matches only takes 0 and positive integers."""
9
+ """A VarRange represents the bounded and unbounded ranges of integers. This class is used to define filters for various variable types. For example by defining cat_var = VarRange(0,0), calling matches(0) will return true, but any other integer will not match. You can also have unbounded ranges for example VarRange(2,None) will match to 2,3,4... up to infinity. for By default the lower and upper bounds are set to -1 so so that no matter what value is passed to matches() will return false. Matches only takes 0 and positive integers."""
10
10
 
11
11
  def __init__(self, lower_bound: int = 0, upper_bound: int = -1) -> None:
12
12
  """
@@ -71,7 +71,7 @@ class PlotFilter:
71
71
 
72
72
  # @dataclass
73
73
  class PlotMatchesResult:
74
- """Stores information about which properites match the requirements of a particular plotter"""
74
+ """Stores information about which properties match the requirements of a particular plotter"""
75
75
 
76
76
  def __init__(self, plot_filter: PlotFilter, plt_cnt_cfg: PltCntCfg, plot_name: str):
77
77
  match_info = []
@@ -3,7 +3,13 @@ import param
3
3
  from bencher.bench_cfg import BenchCfg
4
4
  from bencher.variables.results import PANEL_TYPES
5
5
 
6
- from bencher.variables.inputs import IntSweep, FloatSweep, BoolSweep, EnumSweep, StringSweep
6
+ from bencher.variables.inputs import (
7
+ IntSweep,
8
+ FloatSweep,
9
+ BoolSweep,
10
+ EnumSweep,
11
+ StringSweep,
12
+ )
7
13
  from bencher.variables.time import TimeSnapshot
8
14
 
9
15
 
@@ -17,12 +23,13 @@ class PltCntCfg(param.Parameterized):
17
23
  vector_len = param.Integer(1, doc="The vector length of the return variable , scalars = len 1")
18
24
  result_vars = param.Integer(1, doc="The number result variables to plot") # todo remove
19
25
  panel_vars = param.List(doc="A list of panel results")
20
- panel_cnt = param.Integer(0, doc="Number of results reprented as panel panes")
26
+ panel_cnt = param.Integer(0, doc="Number of results represent as panel panes")
21
27
  repeats = param.Integer(0, doc="The number of repeat samples")
22
28
  inputs_cnt = param.Integer(0, doc="The number of repeat samples")
23
29
 
24
30
  print_debug = param.Boolean(
25
- True, doc="Print debug information about why a filter matches this config or not"
31
+ True,
32
+ doc="Print debug information about why a filter matches this config or not",
26
33
  )
27
34
 
28
35
  @staticmethod
@@ -60,7 +60,7 @@ class BenchResultBase(OptunaResult):
60
60
  """Generate a holoviews dataset from the xarray dataset.
61
61
 
62
62
  Args:
63
- reduce (ReduceType, optional): Optionally perform reduce options on the dataset. By default the returned dataset will calculate the mean and standard devation over the "repeat" dimension so that the dataset plays nicely with most of the holoviews plot types. Reduce.Sqeeze is used if there is only 1 repeat and you want the "reduce" variable removed from the dataset. ReduceType.None returns an unaltered dataset. Defaults to ReduceType.AUTO.
63
+ reduce (ReduceType, optional): Optionally perform reduce options on the dataset. By default the returned dataset will calculate the mean and standard deviation over the "repeat" dimension so that the dataset plays nicely with most of the holoviews plot types. Reduce.Sqeeze is used if there is only 1 repeat and you want the "reduce" variable removed from the dataset. ReduceType.None returns an unaltered dataset. Defaults to ReduceType.AUTO.
64
64
 
65
65
  Returns:
66
66
  hv.Dataset: results in the form of a holoviews dataset
@@ -77,7 +77,7 @@ class BenchResultBase(OptunaResult):
77
77
  """Generate a summarised xarray dataset.
78
78
 
79
79
  Args:
80
- reduce (ReduceType, optional): Optionally perform reduce options on the dataset. By default the returned dataset will calculate the mean and standard devation over the "repeat" dimension so that the dataset plays nicely with most of the holoviews plot types. Reduce.Sqeeze is used if there is only 1 repeat and you want the "reduce" variable removed from the dataset. ReduceType.None returns an unaltered dataset. Defaults to ReduceType.AUTO.
80
+ reduce (ReduceType, optional): Optionally perform reduce options on the dataset. By default the returned dataset will calculate the mean and standard deviation over the "repeat" dimension so that the dataset plays nicely with most of the holoviews plot types. Reduce.Sqeeze is used if there is only 1 repeat and you want the "reduce" variable removed from the dataset. ReduceType.None returns an unaltered dataset. Defaults to ReduceType.AUTO.
81
81
 
82
82
  Returns:
83
83
  xr.Dataset: results in the form of an xarray dataset
@@ -87,7 +87,7 @@ class BenchResultBase(OptunaResult):
87
87
 
88
88
  ds_out = self.ds if result_var is None else self.ds[result_var.name]
89
89
 
90
- match (reduce):
90
+ match reduce:
91
91
  case ReduceType.REDUCE:
92
92
  ds_reduce_mean = ds_out.mean(dim="repeat", keep_attrs=True)
93
93
  ds_reduce_std = ds_out.std(dim="repeat", keep_attrs=True)
@@ -147,9 +147,9 @@ class BenchResultBase(OptunaResult):
147
147
  opt_val = result_da.max()
148
148
  else:
149
149
  opt_val = result_da.min()
150
- indicies = result_da.where(result_da == opt_val, drop=True).squeeze()
150
+ indices = result_da.where(result_da == opt_val, drop=True).squeeze()
151
151
  logging.info(f"optimal value of {result_var.name}: {opt_val.values}")
152
- return indicies
152
+ return indices
153
153
 
154
154
  def get_optimal_inputs(
155
155
  self,
@@ -65,7 +65,7 @@ class ComposableContainerBase:
65
65
  self.container.append(obj)
66
66
 
67
67
  def render(self):
68
- """Return a representation of the container that can be composed with other render() results. This function can also be used to defer layout and rending options until all the information about the container content is known. You may need to ovverride this method depending on the container. See composable_container_video as an example.
68
+ """Return a representation of the container that can be composed with other render() results. This function can also be used to defer layout and rending options until all the information about the container content is known. You may need to override this method depending on the container. See composable_container_video as an example.
69
69
 
70
70
  Returns:
71
71
  Any: Visual representation of the container that can be combined with other containers
@@ -34,7 +34,7 @@ from bencher.optuna_conversions import (
34
34
 
35
35
 
36
36
  def convert_dataset_bool_dims_to_str(dataset: xr.Dataset) -> xr.Dataset:
37
- """Given a dataarray that contains boolean coordinates, conver them to strings so that holoviews loads the data properly
37
+ """Given a dataarray that contains boolean coordinates, convert them to strings so that holoviews loads the data properly
38
38
 
39
39
  Args:
40
40
  dataarray (xr.DataArray): dataarray with boolean coordinates
@@ -342,14 +342,14 @@ class OptunaResult:
342
342
  if "width" not in kwargs:
343
343
  if self.bench_cfg.plot_size is not None:
344
344
  kwargs["width"] = self.bench_cfg.plot_size
345
- # specific width overrrides general size
345
+ # specific width overrides general size
346
346
  if self.bench_cfg.plot_width is not None:
347
347
  kwargs["width"] = self.bench_cfg.plot_width
348
348
 
349
349
  if "height" not in kwargs:
350
350
  if self.bench_cfg.plot_size is not None:
351
351
  kwargs["height"] = self.bench_cfg.plot_size
352
- # specific height overrrides general size
352
+ # specific height overrides general size
353
353
  if self.bench_cfg.plot_height is not None:
354
354
  kwargs["height"] = self.bench_cfg.plot_height
355
355
  return kwargs
@@ -7,7 +7,7 @@ from bencher.variables.sweep_base import SweepBase, shared_slots
7
7
 
8
8
 
9
9
  class SweepSelector(Selector, SweepBase):
10
- """A class to reprsent a parameter sweep of bools"""
10
+ """A class to represent a parameter sweep of bools"""
11
11
 
12
12
  __slots__ = shared_slots
13
13
 
@@ -27,7 +27,7 @@ class SweepSelector(Selector, SweepBase):
27
27
 
28
28
 
29
29
  class BoolSweep(SweepSelector):
30
- """A class to reprsent a parameter sweep of bools"""
30
+ """A class to represent a parameter sweep of bools"""
31
31
 
32
32
  def __init__(self, units: str = "ul", samples: int = None, default=True, **params):
33
33
  SweepSelector.__init__(
@@ -41,7 +41,7 @@ class BoolSweep(SweepSelector):
41
41
 
42
42
 
43
43
  class StringSweep(SweepSelector):
44
- """A class to reprsent a parameter sweep of strings"""
44
+ """A class to represent a parameter sweep of strings"""
45
45
 
46
46
  def __init__(
47
47
  self,
@@ -61,7 +61,7 @@ class StringSweep(SweepSelector):
61
61
 
62
62
 
63
63
  class EnumSweep(SweepSelector):
64
- """A class to reprsent a parameter sweep of enums"""
64
+ """A class to represent a parameter sweep of enums"""
65
65
 
66
66
  __slots__ = shared_slots
67
67
 
@@ -82,7 +82,7 @@ class EnumSweep(SweepSelector):
82
82
 
83
83
 
84
84
  class IntSweep(Integer, SweepBase):
85
- """A class to reprsent a parameter sweep of ints"""
85
+ """A class to represent a parameter sweep of ints"""
86
86
 
87
87
  __slots__ = shared_slots + ["sample_values"]
88
88
 
@@ -77,7 +77,7 @@ class SweepBase(param.Parameter):
77
77
  """given a sweep variable (self), return the range of values as a panel slider
78
78
 
79
79
  Args:
80
- debug (bool, optional): pass to the sweepvar to produce a full set of varaibles, or when debug=True, a reduces number of sweep vars. Defaults to False.
80
+ debug (bool, optional): pass to the sweepvar to produce a full set of variables, or when debug=True, a reduces number of sweep vars. Defaults to False.
81
81
 
82
82
  Returns:
83
83
  pn.widgets.slider.DiscreteSlider: A panel slider with the values() of the sweep variable
@@ -7,7 +7,7 @@ from bencher.variables.sweep_base import SweepBase, shared_slots
7
7
 
8
8
 
9
9
  class TimeBase(SweepBase, Selector):
10
- """A class to capture a time snapshot of benchmark values. Time is reprented as a continous value i.e a datetime which is converted into a np.datetime64. To represent time as a discrete value use the TimeEvent class. The distinction is because holoview and plotly code makes different assumptions about discrete vs continous variables"""
10
+ """A class to capture a time snapshot of benchmark values. Time is represent as a continuous value i.e a datetime which is converted into a np.datetime64. To represent time as a discrete value use the TimeEvent class. The distinction is because holoview and plotly code makes different assumptions about discrete vs continuous variables"""
11
11
 
12
12
  def __init__(
13
13
  self,
@@ -40,7 +40,7 @@ class TimeBase(SweepBase, Selector):
40
40
 
41
41
 
42
42
  class TimeSnapshot(TimeBase):
43
- """A class to capture a time snapshot of benchmark values. Time is reprented as a continous value i.e a datetime which is converted into a np.datetime64. To represent time as a discrete value use the TimeEvent class. The distinction is because holoview and plotly code makes different assumptions about discrete vs continous variables"""
43
+ """A class to capture a time snapshot of benchmark values. Time is represent as a continuous value i.e a datetime which is converted into a np.datetime64. To represent time as a discrete value use the TimeEvent class. The distinction is because holoview and plotly code makes different assumptions about discrete vs continuous variables"""
44
44
 
45
45
  __slots__ = shared_slots
46
46
 
@@ -68,7 +68,7 @@ class TimeSnapshot(TimeBase):
68
68
 
69
69
 
70
70
  class TimeEvent(TimeBase):
71
- """A class to represent a discrete event in time where the data was captured i.e a series of pull requests. Here time is discrete and can't be interpolated, to represent time as a continous value use the TimeSnapshot class. The distinction is because holoview and plotly code makes different assumptions about discrete vs continous variables"""
71
+ """A class to represent a discrete event in time where the data was captured i.e a series of pull requests. Here time is discrete and can't be interpolated, to represent time as a continuous value use the TimeSnapshot class. The distinction is because holoview and plotly code makes different assumptions about discrete vs continuous variables"""
72
72
 
73
73
  __slots__ = shared_slots
74
74
 
@@ -30,7 +30,7 @@ class VideoWriter:
30
30
  if width is None:
31
31
  width = len(label) * 10
32
32
  new_img = Image.new("RGB", (width, height), color=color)
33
- # ImageDraw.Draw(new_img).text((width/2, 0), label, (0, 0, 0),align="center",achor="ms")
33
+ # ImageDraw.Draw(new_img).text((width/2, 0), label, (0, 0, 0),align="center",anchor="ms")
34
34
  ImageDraw.Draw(new_img).text(
35
35
  (width / 2.0, 0), label, (0, 0, 0), anchor="mt", font_size=height
36
36
  )
@@ -1,32 +1,35 @@
1
1
  [project]
2
2
  name = "holobench"
3
- version = "1.30.1"
3
+ version = "1.30.3"
4
4
 
5
5
  authors = [{ name = "Austin Gregg-Smith", email = "blooop@gmail.com" }]
6
6
  description = "A package for benchmarking the performance of arbitrary functions"
7
7
  readme = "README.md"
8
8
  license = "MIT"
9
9
 
10
+ requires-python = ">=3.10,<3.13"
11
+
10
12
  dependencies = [
11
13
  "holoviews>=1.15,<=1.19.1",
12
- "numpy>=1.0,<=2.1.2",
14
+ "numpy>=1.0,<=2.1.3",
13
15
  "param>=1.13.0,<=2.1.1",
14
- "hvplot>=0.8,<=0.11.1",
16
+ "hvplot>=0.8,<=0.10.0",
15
17
  "matplotlib>=3.6.3,<=3.9.2",
16
- "panel>=1.3.6,<=1.5.3",
18
+ "panel>=1.3.6,<=1.4.5",
17
19
  "diskcache>=5.6,<=5.6.3",
18
20
  "optuna>=3.2,<=4.0.0",
19
- "xarray>=2023.7,<=2024.10.0",
21
+ "xarray>=2023.7,<=2024.7.0",
20
22
  "plotly>=5.15,<=5.24.1",
21
23
  "sortedcontainers>=2.4,<=2.4",
22
- "pandas>=2.0,<=2.2.3",
24
+ "pandas>=2.0,<=2.2.2",
23
25
  "strenum>=0.4.0,<=0.4.15",
24
- "scikit-learn>=1.2,<=1.5.2",
26
+ "scikit-learn>=1.2,<=1.5.1",
25
27
  "str2bool>=1.1,<=1.1",
26
28
  "scoop>=0.7.0,<=0.7.2.0",
27
29
  "moviepy-fix-codec",
28
30
  ]
29
31
 
32
+
30
33
  [project.urls]
31
34
  Repository = "https://github.com/dyson-ai/bencher"
32
35
  Home = "https://github.com/dyson-ai/bencher"
@@ -36,10 +39,6 @@ Documentation = "https://bencher.readthedocs.io/en/latest/"
36
39
  channels = ["conda-forge"]
37
40
  platforms = ["linux-64"]
38
41
 
39
- [tool.pixi.dependencies]
40
- python = ">=3.10"
41
-
42
-
43
42
  [tool.pixi.feature.py310.dependencies]
44
43
  python = "3.10.*"
45
44
  [tool.pixi.feature.py311.dependencies]
@@ -53,13 +52,13 @@ holobench = { path = ".", editable = true }
53
52
 
54
53
  [project.optional-dependencies]
55
54
  test = [
56
- "black>=23,<=24.10.0",
57
55
  "pylint>=3.2.5,<=3.3.1",
58
56
  "pytest-cov>=4.1,<=6.0.0",
59
57
  "pytest>=7.4,<=8.3.3",
60
- "hypothesis>=6.104.2,<=6.116.0",
61
- "ruff>=0.5.0,<=0.7.2",
62
- "coverage>=7.5.4,<=7.6.4",
58
+ "hypothesis>=6.104.2,<=6.119.1",
59
+ "ruff>=0.5.0,<=0.7.4",
60
+ "coverage>=7.5.4,<=7.6.7",
61
+ "pre-commit<=4.0.1"
63
62
  ]
64
63
 
65
64
  [build-system]
@@ -78,8 +77,9 @@ py312 = ["py312","test"]
78
77
 
79
78
 
80
79
  [tool.pixi.tasks]
81
- success = "echo Success"
82
- format = "black ."
80
+ pre-commit = "pre-commit run -a"
81
+ pre-commit-update = "pre-commit autoupdate"
82
+ format = "ruff format ."
83
83
  check-clean-workspace = "git diff --exit-code"
84
84
  ruff-lint = "ruff check . --fix"
85
85
  pylint = "pylint --version && echo 'running pylint...' && pylint $(git ls-files '*.py')"
@@ -92,7 +92,7 @@ coverage-report = "coverage report -m"
92
92
  update-lock = "pixi update && git commit -a -m'update pixi.lock' || true"
93
93
  push = "git push"
94
94
  update-lock-push = { depends_on = ["update-lock", "push"] }
95
- fix = { depends_on = ["update-lock", "format", "ruff-lint"] }
95
+ fix = { depends_on = ["update-lock","pre-commit", "format", "ruff-lint"] }
96
96
  fix-commit-push = { depends_on = ["fix", "commit-format", "update-lock-push"] }
97
97
  ci-no-cover = { depends_on = ["style", "test"] }
98
98
  ci = { depends_on = ["format","ruff-lint", "pylint", "coverage", "coverage-report"] }
@@ -112,26 +112,6 @@ jobs = 16 #detect number of cores
112
112
  disable = "C,logging-fstring-interpolation,line-too-long,fixme,missing-module-docstring,too-many-instance-attributes,too-few-public-methods,too-many-arguments,too-many-locals,too-many-branches,too-many-statements,use-dict-literal,duplicate-code,too-many-public-methods,too-many-nested-blocks,cyclic-import, too-many-positional-arguments"
113
113
  enable = "no-else-return,consider-using-in"
114
114
 
115
- [tool.black]
116
- line-length = 100
117
-
118
- [tool.ruff]
119
- line-length = 100 # Same as Black.
120
-
121
- target-version = "py310"
122
-
123
- [tool.ruff.lint]
124
- # Never enforce `E501` (line length violations).
125
- #"F841" will auto remove unused variables which is annoying during development, pylint catches this anyway
126
- ignore = ["E501", "E902", "F841"]
127
- # Allow unused variables when underscore-prefixed.
128
- dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
129
-
130
- # Ignore `E402` (import violations) in all `__init__.py` files, and in `path/to/file.py`.
131
- [tool.ruff.lint.per-file-ignores]
132
- "__init__.py" = ["E402", "F401"]
133
-
134
-
135
115
  [tool.coverage.run]
136
116
  omit = ["*/test/*", "__init__.py"]
137
117
 
File without changes
File without changes
File without changes
File without changes
File without changes