holobench 1.40.1__py3-none-any.whl → 1.42.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. CHANGELOG.md +10 -0
  2. bencher/__init__.py +20 -2
  3. bencher/bench_cfg.py +265 -61
  4. bencher/bench_report.py +2 -2
  5. bencher/bench_runner.py +96 -10
  6. bencher/bencher.py +421 -89
  7. bencher/caching.py +1 -4
  8. bencher/class_enum.py +70 -7
  9. bencher/example/example_composable_container_image.py +60 -0
  10. bencher/example/example_composable_container_video.py +49 -0
  11. bencher/example/example_dataframe.py +2 -2
  12. bencher/example/example_image.py +17 -21
  13. bencher/example/example_image1.py +16 -20
  14. bencher/example/example_levels.py +17 -173
  15. bencher/example/example_pareto.py +107 -31
  16. bencher/example/example_rerun2.py +1 -1
  17. bencher/example/example_simple_bool.py +2 -2
  18. bencher/example/example_simple_float2d.py +6 -1
  19. bencher/example/example_video.py +35 -17
  20. bencher/example/experimental/example_hvplot_explorer.py +3 -4
  21. bencher/example/inputs_0D/example_0_in_1_out.py +25 -15
  22. bencher/example/inputs_0D/example_0_in_2_out.py +12 -3
  23. bencher/example/inputs_0_float/example_0_cat_in_2_out.py +88 -0
  24. bencher/example/inputs_0_float/example_1_cat_in_2_out.py +98 -0
  25. bencher/example/inputs_0_float/example_2_cat_in_2_out.py +107 -0
  26. bencher/example/inputs_0_float/example_3_cat_in_2_out.py +111 -0
  27. bencher/example/inputs_1D/example1d_common.py +48 -12
  28. bencher/example/inputs_1D/example_0_float_1_cat.py +33 -0
  29. bencher/example/inputs_1D/example_1_cat_in_2_out_repeats.py +68 -0
  30. bencher/example/inputs_1D/example_1_float_2_cat_repeats.py +15 -0
  31. bencher/example/inputs_1D/example_1_int_in_1_out.py +98 -0
  32. bencher/example/inputs_1D/example_1_int_in_2_out.py +101 -0
  33. bencher/example/inputs_1D/example_1_int_in_2_out_repeats.py +99 -0
  34. bencher/example/inputs_1_float/example_1_float_0_cat_in_2_out.py +117 -0
  35. bencher/example/inputs_1_float/example_1_float_1_cat_in_2_out.py +124 -0
  36. bencher/example/inputs_1_float/example_1_float_2_cat_in_2_out.py +132 -0
  37. bencher/example/inputs_1_float/example_1_float_3_cat_in_2_out.py +140 -0
  38. bencher/example/inputs_2D/example_2_cat_in_4_out_repeats.py +104 -0
  39. bencher/example/inputs_2_float/example_2_float_0_cat_in_2_out.py +98 -0
  40. bencher/example/inputs_2_float/example_2_float_1_cat_in_2_out.py +112 -0
  41. bencher/example/inputs_2_float/example_2_float_2_cat_in_2_out.py +122 -0
  42. bencher/example/inputs_2_float/example_2_float_3_cat_in_2_out.py +138 -0
  43. bencher/example/inputs_3_float/example_3_float_0_cat_in_2_out.py +111 -0
  44. bencher/example/inputs_3_float/example_3_float_1_cat_in_2_out.py +117 -0
  45. bencher/example/inputs_3_float/example_3_float_2_cat_in_2_out.py +124 -0
  46. bencher/example/inputs_3_float/example_3_float_3_cat_in_2_out.py +129 -0
  47. bencher/example/meta/generate_examples.py +124 -7
  48. bencher/example/meta/generate_meta.py +88 -40
  49. bencher/job.py +175 -12
  50. bencher/plotting/plot_filter.py +52 -17
  51. bencher/results/bench_result.py +119 -26
  52. bencher/results/bench_result_base.py +119 -10
  53. bencher/results/composable_container/composable_container_video.py +39 -12
  54. bencher/results/dataset_result.py +6 -200
  55. bencher/results/explorer_result.py +23 -0
  56. bencher/results/{hvplot_result.py → histogram_result.py} +3 -18
  57. bencher/results/holoview_results/__init__.py +0 -0
  58. bencher/results/holoview_results/bar_result.py +79 -0
  59. bencher/results/holoview_results/curve_result.py +110 -0
  60. bencher/results/holoview_results/distribution_result/__init__.py +0 -0
  61. bencher/results/holoview_results/distribution_result/box_whisker_result.py +73 -0
  62. bencher/results/holoview_results/distribution_result/distribution_result.py +109 -0
  63. bencher/results/holoview_results/distribution_result/scatter_jitter_result.py +92 -0
  64. bencher/results/holoview_results/distribution_result/violin_result.py +70 -0
  65. bencher/results/holoview_results/heatmap_result.py +319 -0
  66. bencher/results/holoview_results/holoview_result.py +346 -0
  67. bencher/results/holoview_results/line_result.py +240 -0
  68. bencher/results/holoview_results/scatter_result.py +107 -0
  69. bencher/results/holoview_results/surface_result.py +158 -0
  70. bencher/results/holoview_results/table_result.py +14 -0
  71. bencher/results/holoview_results/tabulator_result.py +20 -0
  72. bencher/results/laxtex_result.py +42 -35
  73. bencher/results/optuna_result.py +30 -115
  74. bencher/results/video_controls.py +38 -0
  75. bencher/results/video_result.py +39 -36
  76. bencher/results/video_summary.py +2 -2
  77. bencher/results/{plotly_result.py → volume_result.py} +29 -8
  78. bencher/utils.py +176 -30
  79. bencher/variables/inputs.py +122 -15
  80. bencher/video_writer.py +38 -2
  81. bencher/worker_job.py +34 -7
  82. {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/METADATA +21 -25
  83. holobench-1.42.0.dist-info/RECORD +147 -0
  84. bencher/example/example_composable_container.py +0 -106
  85. bencher/example/example_levels2.py +0 -37
  86. bencher/example/inputs_1D/example_1_in_1_out.py +0 -62
  87. bencher/example/inputs_1D/example_1_in_2_out.py +0 -63
  88. bencher/example/inputs_1D/example_1_in_2_out_repeats.py +0 -61
  89. bencher/results/holoview_result.py +0 -787
  90. bencher/results/panel_result.py +0 -41
  91. holobench-1.40.1.dist-info/RECORD +0 -111
  92. {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/WHEEL +0 -0
  93. {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/licenses/LICENSE +0 -0
CHANGELOG.md ADDED
@@ -0,0 +1,10 @@
1
+ # Changelog
2
+
3
+ All notable changes to this project will be documented in this file.
4
+
5
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
+
8
+ ## [0.3.10]
9
+
10
+ Before changelogs
bencher/__init__.py CHANGED
@@ -43,6 +43,24 @@ from .results.composable_container.composable_container_video import (
43
43
  RenderCfg,
44
44
  )
45
45
 
46
+ from bencher.results.holoview_results.distribution_result.box_whisker_result import BoxWhiskerResult
47
+ from bencher.results.holoview_results.distribution_result.violin_result import ViolinResult
48
+ from bencher.results.holoview_results.scatter_result import ScatterResult
49
+ from bencher.results.holoview_results.distribution_result.scatter_jitter_result import (
50
+ ScatterJitterResult,
51
+ )
52
+ from bencher.results.holoview_results.bar_result import BarResult
53
+ from bencher.results.holoview_results.line_result import LineResult
54
+ from bencher.results.holoview_results.curve_result import CurveResult
55
+ from bencher.results.holoview_results.heatmap_result import HeatmapResult
56
+ from bencher.results.holoview_results.surface_result import SurfaceResult
57
+ from bencher.results.holoview_results.tabulator_result import TabulatorResult
58
+ from bencher.results.holoview_results.table_result import TableResult
59
+
60
+ from bencher.results.histogram_result import HistogramResult
61
+ from bencher.results.explorer_result import ExplorerResult
62
+ from bencher.results.dataset_result import DataSetResult
63
+
46
64
  from .utils import (
47
65
  hmap_canonical_input,
48
66
  get_nearest_coords,
@@ -68,8 +86,8 @@ from .plotting.plot_filter import VarRange, PlotFilter
68
86
  from .variables.parametrised_sweep import ParametrizedSweep
69
87
  from .caching import CachedParams
70
88
  from .results.bench_result import BenchResult
71
- from .results.panel_result import PanelResult
72
- from .results.holoview_result import ReduceType, HoloviewResult
89
+ from .results.video_result import VideoResult
90
+ from .results.holoview_results.holoview_result import ReduceType, HoloviewResult
73
91
  from .bench_report import BenchReport, GithubPagesCfg
74
92
  from .job import Executors
75
93
  from .video_writer import VideoWriter, add_image
bencher/bench_cfg.py CHANGED
@@ -3,12 +3,12 @@ from __future__ import annotations
3
3
  import argparse
4
4
  import logging
5
5
 
6
- from typing import List
6
+ from typing import List, Optional, Dict, Any, Union, TypeVar
7
7
 
8
8
  import param
9
- from str2bool import str2bool
10
9
  import panel as pn
11
10
  from datetime import datetime
11
+ from copy import deepcopy
12
12
 
13
13
  from bencher.variables.sweep_base import hash_sha1, describe_variable
14
14
  from bencher.variables.time import TimeSnapshot, TimeEvent
@@ -16,20 +16,79 @@ from bencher.variables.results import OptDir
16
16
  from bencher.job import Executors
17
17
  from bencher.results.laxtex_result import to_latex
18
18
 
19
+ T = TypeVar("T") # Generic type variable
20
+
19
21
 
20
22
  class BenchPlotSrvCfg(param.Parameterized):
21
- port: int = param.Integer(None, doc="The port to launch panel with")
22
- allow_ws_origin = param.Boolean(
23
+ """Configuration for the benchmarking plot server.
24
+
25
+ This class defines parameters for controlling how the benchmark visualization
26
+ server operates, including network configuration.
27
+
28
+ Attributes:
29
+ port (int): The port to launch panel with
30
+ allow_ws_origin (bool): Add the port to the whitelist (warning will disable remote
31
+ access if set to true)
32
+ show (bool): Open the served page in a web browser
33
+ """
34
+
35
+ port: Optional[int] = param.Integer(None, doc="The port to launch panel with")
36
+ allow_ws_origin: bool = param.Boolean(
23
37
  False,
24
- doc="Add the port to the whilelist, (warning will disable remote access if set to true)",
38
+ doc="Add the port to the whitelist, (warning will disable remote access if set to true)",
25
39
  )
26
40
  show: bool = param.Boolean(True, doc="Open the served page in a web browser")
27
41
 
28
42
 
29
43
  class BenchRunCfg(BenchPlotSrvCfg):
30
- """A Class to store options for how to run a benchmark parameter sweep"""
31
-
32
- repeats: bool = param.Integer(1, doc="The number of times to sample the inputs")
44
+ """Configuration class for benchmark execution parameters.
45
+
46
+ This class extends BenchPlotSrvCfg to provide comprehensive control over benchmark execution,
47
+ including caching behavior, reporting options, visualization settings, and execution strategy.
48
+ It defines numerous parameters that control how benchmark runs are performed, cached,
49
+ and displayed to the user.
50
+
51
+ Attributes:
52
+ repeats (int): The number of times to sample the inputs
53
+ over_time (bool): If true each time the function is called it will plot a
54
+ timeseries of historical and the latest result
55
+ use_optuna (bool): Show optuna plots
56
+ summarise_constant_inputs (bool): Print the inputs that are kept constant
57
+ when describing the sweep parameters
58
+ print_bench_inputs (bool): Print the inputs to the benchmark function
59
+ every time it is called
60
+ print_bench_results (bool): Print the results of the benchmark function
61
+ every time it is called
62
+ clear_history (bool): Clear historical results
63
+ print_pandas (bool): Print a pandas summary of the results to the console
64
+ print_xarray (bool): Print an xarray summary of the results to the console
65
+ serve_pandas (bool): Serve a pandas summary on the results webpage
66
+ serve_pandas_flat (bool): Serve a flattened pandas summary on the results webpage
67
+ serve_xarray (bool): Serve an xarray summary on the results webpage
68
+ auto_plot (bool): Automatically deduce the best type of plot for the results
69
+ raise_duplicate_exception (bool): Used to debug unique plot names
70
+ cache_results (bool): Benchmark level cache for completed benchmark results
71
+ clear_cache (bool): Clear the cache of saved input->output mappings
72
+ cache_samples (bool): Enable per-sample caching
73
+ only_hash_tag (bool): Use only the tag hash for cache identification
74
+ clear_sample_cache (bool): Clear the per-sample cache
75
+ overwrite_sample_cache (bool): Recalculate and overwrite cached sample values
76
+ only_plot (bool): Do not calculate benchmarks if no results are found in cache
77
+ use_holoview (bool): Use holoview for plotting
78
+ nightly (bool): Run a more extensive set of tests for a nightly benchmark
79
+ time_event (str): String representation of a sequence over time
80
+ headless (bool): Run the benchmarks headlessly
81
+ render_plotly (bool): Controls plotly rendering behavior with bokeh
82
+ level (int): Method of defining the number of samples to sweep over
83
+ run_tag (str): Tag for isolating cached results
84
+ run_date (datetime): Date the benchmark run was performed
85
+ executor (Executors): Executor for running the benchmark
86
+ plot_size (int): Sets both width and height of the plot
87
+ plot_width (int): Sets width of the plots
88
+ plot_height (int): Sets height of the plot
89
+ """
90
+
91
+ repeats: int = param.Integer(1, doc="The number of times to sample the inputs")
33
92
 
34
93
  over_time: bool = param.Boolean(
35
94
  False,
@@ -38,7 +97,7 @@ class BenchRunCfg(BenchPlotSrvCfg):
38
97
 
39
98
  use_optuna: bool = param.Boolean(False, doc="show optuna plots")
40
99
 
41
- summarise_constant_inputs = param.Boolean(
100
+ summarise_constant_inputs: bool = param.Boolean(
42
101
  True, doc="Print the inputs that are kept constant when describing the sweep parameters"
43
102
  )
44
103
 
@@ -120,30 +179,30 @@ class BenchRunCfg(BenchPlotSrvCfg):
120
179
  False, doc="Run a more extensive set of tests for a nightly benchmark"
121
180
  )
122
181
 
123
- time_event: str = param.String(
182
+ time_event: Optional[str] = param.String(
124
183
  None,
125
184
  doc="A string representation of a sequence over time, i.e. datetime, pull request number, or run number",
126
185
  )
127
186
 
128
187
  headless: bool = param.Boolean(False, doc="Run the benchmarks headlessly")
129
188
 
130
- render_plotly = param.Boolean(
189
+ render_plotly: bool = param.Boolean(
131
190
  True,
132
191
  doc="Plotly and Bokeh don't play nicely together, so by default pre-render plotly figures to a non dynamic version so that bokeh plots correctly. If you want interactive 3D graphs, set this to true but be aware that your 2D interactive graphs will probably stop working.",
133
192
  )
134
193
 
135
- level = param.Integer(
194
+ level: int = param.Integer(
136
195
  default=0,
137
196
  bounds=[0, 12],
138
197
  doc="The level parameter is a method of defining the number samples to sweep over in a variable agnostic way, i.e you don't need to specify the number of samples for each variable as they are calculated dynamically from the sampling level. See example_level.py for more information.",
139
198
  )
140
199
 
141
- run_tag = param.String(
200
+ run_tag: str = param.String(
142
201
  default="",
143
202
  doc="Define a tag for a run to isolate the results stored in the cache from other runs",
144
203
  )
145
204
 
146
- run_date = param.Date(
205
+ run_date: datetime = param.Date(
147
206
  default=datetime.now(),
148
207
  doc="The date the bench run was performed",
149
208
  )
@@ -158,21 +217,25 @@ class BenchRunCfg(BenchPlotSrvCfg):
158
217
  doc="The function can be run serially or in parallel with different futures executors",
159
218
  )
160
219
 
161
- plot_size = param.Integer(default=None, doc="Sets the width and height of the plot")
162
- plot_width = param.Integer(
220
+ plot_size: Optional[int] = param.Integer(
221
+ default=None, doc="Sets the width and height of the plot"
222
+ )
223
+ plot_width: Optional[int] = param.Integer(
163
224
  default=None,
164
225
  doc="Sets with width of the plots, this will override the plot_size parameter",
165
226
  )
166
- plot_height = param.Integer(
227
+ plot_height: Optional[int] = param.Integer(
167
228
  default=None, doc="Sets the height of the plot, this will override the plot_size parameter"
168
229
  )
169
230
 
170
231
  @staticmethod
171
232
  def from_cmd_line() -> BenchRunCfg: # pragma: no cover
172
- """create a BenchRunCfg by parsing command line arguments
233
+ """Create a BenchRunCfg by parsing command line arguments.
234
+
235
+ Parses command line arguments to create a configuration for benchmark runs.
173
236
 
174
237
  Returns:
175
- parsed args: parsed args
238
+ BenchRunCfg: Configuration object with settings from command line arguments
176
239
  """
177
240
 
178
241
  parser = argparse.ArgumentParser(description="benchmark")
@@ -197,11 +260,8 @@ class BenchRunCfg(BenchPlotSrvCfg):
197
260
 
198
261
  parser.add_argument(
199
262
  "--nightly",
200
- type=lambda b: bool(str2bool(b)),
201
- nargs="?",
202
- const=False,
203
- default=False,
204
- help="turn on nightly benchmarking",
263
+ action="store_true",
264
+ help="Turn on nightly benchmarking",
205
265
  )
206
266
 
207
267
  parser.add_argument(
@@ -220,9 +280,44 @@ class BenchRunCfg(BenchPlotSrvCfg):
220
280
 
221
281
  return BenchRunCfg(**vars(parser.parse_args()))
222
282
 
283
+ def deep(self):
284
+ return deepcopy(self)
285
+
223
286
 
224
287
  class BenchCfg(BenchRunCfg):
225
- """A class for storing the arguments to configure a benchmark protocol If the inputs variables are the same the class should return the same hash and same filename. This is so that historical data can be referenced and ensures that the generated plots are unique per benchmark"""
288
+ """Complete configuration for a benchmark protocol.
289
+
290
+ This class extends BenchRunCfg and provides a comprehensive set of parameters
291
+ for configuring benchmark runs. It maintains a unique hash value based on its
292
+ configuration to ensure that benchmark results can be consistently referenced
293
+ and that plots are uniquely identified across runs.
294
+
295
+ The class handles input variables, result variables, constant values, meta variables,
296
+ and various presentation options. It also provides methods for generating
297
+ descriptive summaries and visualizations of the benchmark configuration.
298
+
299
+ Attributes:
300
+ input_vars (List): A list of ParameterizedSweep variables to perform a parameter sweep over
301
+ result_vars (List): A list of ParameterizedSweep results to collect and plot
302
+ const_vars (List): Variables to keep constant but are different from the default value
303
+ result_hmaps (List): A list of holomap results
304
+ meta_vars (List): Meta variables such as recording time and repeat id
305
+ all_vars (List): Stores a list of both the input_vars and meta_vars
306
+ iv_time (List[TimeSnapshot | TimeEvent]): Parameter for sampling the same inputs over time
307
+ iv_time_event (List[TimeEvent]): Parameter for sampling inputs over time as a discrete type
308
+ over_time (bool): Controls whether the function is sampled over time
309
+ name (str): The name of the benchmarkCfg
310
+ title (str): The title of the benchmark
311
+ raise_duplicate_exception (bool): Used for debugging filename generation uniqueness
312
+ bench_name (str): The name of the benchmark and save folder
313
+ description (str): A longer description of the benchmark function
314
+ post_description (str): Comments on the output of the graphs
315
+ has_results (bool): Whether this config has results
316
+ pass_repeat (bool): Whether to pass the 'repeat' kwarg to the benchmark function
317
+ tag (str): Tags for grouping different benchmarks
318
+ hash_value (str): Stored hash value of the config
319
+ plot_callbacks (List): Callables that take a BenchResult and return panel representation
320
+ """
226
321
 
227
322
  input_vars = param.List(
228
323
  default=None,
@@ -250,7 +345,7 @@ class BenchCfg(BenchRunCfg):
250
345
  )
251
346
  iv_time = param.List(
252
347
  default=[],
253
- item_type=TimeSnapshot | TimeEvent,
348
+ item_type=Union[TimeSnapshot, TimeEvent],
254
349
  doc="A parameter to represent the sampling the same inputs over time as a scalar type",
255
350
  )
256
351
 
@@ -260,22 +355,24 @@ class BenchCfg(BenchRunCfg):
260
355
  doc="A parameter to represent the sampling the same inputs over time as a discrete type",
261
356
  )
262
357
 
263
- over_time: param.Boolean(
358
+ over_time: bool = param.Boolean(
264
359
  False, doc="A parameter to control whether the function is sampled over time"
265
360
  )
266
- name: str = param.String(None, doc="The name of the benchmarkCfg")
267
- title: str = param.String(None, doc="The title of the benchmark")
268
- raise_duplicate_exception: str = param.Boolean(
361
+ name: Optional[str] = param.String(None, doc="The name of the benchmarkCfg")
362
+ title: Optional[str] = param.String(None, doc="The title of the benchmark")
363
+ raise_duplicate_exception: bool = param.Boolean(
269
364
  False, doc="Use this while debugging if filename generation is unique"
270
365
  )
271
- bench_name: str = param.String(
366
+ bench_name: Optional[str] = param.String(
272
367
  None, doc="The name of the benchmark and the name of the save folder"
273
368
  )
274
- description: str = param.String(
369
+ description: Optional[str] = param.String(
275
370
  None,
276
371
  doc="A place to store a longer description of the function of the benchmark",
277
372
  )
278
- post_description: str = param.String(None, doc="A place to comment on the output of the graphs")
373
+ post_description: Optional[str] = param.String(
374
+ None, doc="A place to comment on the output of the graphs"
375
+ )
279
376
 
280
377
  has_results: bool = param.Boolean(
281
378
  False,
@@ -302,17 +399,31 @@ class BenchCfg(BenchRunCfg):
302
399
  doc="A callable that takes a BenchResult and returns panel representation of the results",
303
400
  )
304
401
 
305
- def __init__(self, **params):
402
+ def __init__(self, **params: Any) -> None:
403
+ """Initialize a BenchCfg with the given parameters.
404
+
405
+ Args:
406
+ **params (Any): Parameters to set on the BenchCfg
407
+ """
306
408
  super().__init__(**params)
307
409
  self.plot_lib = None
308
410
  self.hmap_kdims = None
309
411
  self.iv_repeat = None
310
412
 
311
- def hash_persistent(self, include_repeats) -> str:
312
- """override the default hash function because the default hash function does not return the same value for the same inputs. It references internal variables that are unique per instance of BenchCfg
413
+ def hash_persistent(self, include_repeats: bool) -> str:
414
+ """Generate a persistent hash for the benchmark configuration.
415
+
416
+ Overrides the default hash function because the default hash function does not
417
+ return the same value for the same inputs. This method references only stable
418
+ variables that are consistent across instances of BenchCfg with the same
419
+ configuration.
313
420
 
314
421
  Args:
315
- include_repeats (bool) : by default include repeats as part of the hash except with using the sample cache
422
+ include_repeats (bool): Whether to include repeats as part of the hash
423
+ (True by default except when using the sample cache)
424
+
425
+ Returns:
426
+ str: A persistent hash value for the benchmark configuration
316
427
  """
317
428
 
318
429
  if include_repeats:
@@ -340,25 +451,50 @@ class BenchCfg(BenchRunCfg):
340
451
  return hash_val
341
452
 
342
453
  def inputs_as_str(self) -> List[str]:
454
+ """Get a list of input variable names.
455
+
456
+ Returns:
457
+ List[str]: List of the names of input variables
458
+ """
343
459
  return [i.name for i in self.input_vars]
344
460
 
345
- def to_latex(self):
461
+ def to_latex(self) -> Optional[pn.pane.LaTeX]:
462
+ """Convert benchmark configuration to LaTeX representation.
463
+
464
+ Returns:
465
+ Optional[pn.pane.LaTeX]: LaTeX representation of the benchmark configuration
466
+ """
346
467
  return to_latex(self)
347
468
 
348
- def describe_sweep(self, width: int = 800, accordion=True) -> pn.pane.Markdown | pn.Column:
349
- """Produce a markdown summary of the sweep settings"""
469
+ def describe_sweep(
470
+ self, width: int = 800, accordion: bool = True
471
+ ) -> Union[pn.pane.Markdown, pn.Column]:
472
+ """Produce a markdown summary of the sweep settings.
473
+
474
+ Args:
475
+ width (int): Width of the markdown panel in pixels. Defaults to 800.
476
+ accordion (bool): Whether to wrap the description in an accordion. Defaults to True.
477
+
478
+ Returns:
479
+ Union[pn.pane.Markdown, pn.Column]: Panel containing the sweep description
480
+ """
350
481
 
351
482
  latex = self.to_latex()
352
483
  desc = pn.pane.Markdown(self.describe_benchmark(), width=width)
353
484
  if accordion:
354
- desc = pn.Accordion(("Data Collection Parameters", desc))
485
+ desc = pn.Accordion(("Expand Full Data Collection Parameters", desc))
355
486
 
356
487
  sentence = self.sweep_sentence()
357
488
  if latex is not None:
358
489
  return pn.Column(sentence, latex, desc)
359
- return pn.Column(sentence, latex, desc)
490
+ return pn.Column(sentence, desc)
360
491
 
361
- def sweep_sentence(self):
492
+ def sweep_sentence(self) -> pn.pane.Markdown:
493
+ """Generate a concise summary sentence of the sweep configuration.
494
+
495
+ Returns:
496
+ pn.pane.Markdown: A panel containing a markdown summary sentence
497
+ """
362
498
  inputs = " by ".join([iv.name for iv in self.all_vars])
363
499
 
364
500
  all_vars_lens = [len(iv.values()) for iv in reversed(self.all_vars)]
@@ -367,13 +503,15 @@ class BenchCfg(BenchRunCfg):
367
503
  result_sizes = "x".join([str(iv) for iv in all_vars_lens])
368
504
  results = ", ".join([rv.name for rv in self.result_vars])
369
505
 
370
- return f"Sweeping {inputs} to generate a {result_sizes} result dataframe containing {results}. "
506
+ return pn.pane.Markdown(
507
+ f"Sweeping {inputs} to generate a {result_sizes} result dataframe containing {results}. "
508
+ )
371
509
 
372
510
  def describe_benchmark(self) -> str:
373
- """Generate a string summary of the inputs and results from a BenchCfg
511
+ """Generate a detailed string summary of the inputs and results from a BenchCfg.
374
512
 
375
513
  Returns:
376
- str: summary of BenchCfg
514
+ str: Comprehensive summary of BenchCfg
377
515
  """
378
516
  benchmark_sampling_str = ["```text"]
379
517
  benchmark_sampling_str.append("")
@@ -410,26 +548,62 @@ class BenchCfg(BenchRunCfg):
410
548
  benchmark_sampling_str = "\n".join(benchmark_sampling_str)
411
549
  return benchmark_sampling_str
412
550
 
413
- def to_title(self, panel_name: str = None) -> pn.pane.Markdown:
551
+ def to_title(self, panel_name: Optional[str] = None) -> pn.pane.Markdown:
552
+ """Create a markdown panel with the benchmark title.
553
+
554
+ Args:
555
+ panel_name (Optional[str]): The name for the panel. Defaults to the benchmark title.
556
+
557
+ Returns:
558
+ pn.pane.Markdown: A panel with the benchmark title as a heading
559
+ """
414
560
  if panel_name is None:
415
561
  panel_name = self.title
416
562
  return pn.pane.Markdown(f"# {self.title}", name=panel_name)
417
563
 
418
564
  def to_description(self, width: int = 800) -> pn.pane.Markdown:
565
+ """Create a markdown panel with the benchmark description.
566
+
567
+ Args:
568
+ width (int): Width of the markdown panel in pixels. Defaults to 800.
569
+
570
+ Returns:
571
+ pn.pane.Markdown: A panel with the benchmark description
572
+ """
419
573
  return pn.pane.Markdown(f"{self.description}", width=width)
420
574
 
421
575
  def to_post_description(self, width: int = 800) -> pn.pane.Markdown:
576
+ """Create a markdown panel with the benchmark post-description.
577
+
578
+ Args:
579
+ width (int): Width of the markdown panel in pixels. Defaults to 800.
580
+
581
+ Returns:
582
+ pn.pane.Markdown: A panel with the benchmark post-description
583
+ """
422
584
  return pn.pane.Markdown(f"{self.post_description}", width=width)
423
585
 
424
586
  def to_sweep_summary(
425
587
  self,
426
- name=None,
427
- description=True,
428
- describe_sweep=True,
429
- results_suffix=True,
588
+ name: Optional[str] = None,
589
+ description: bool = True,
590
+ describe_sweep: bool = True,
591
+ results_suffix: bool = True,
430
592
  title: bool = True,
431
- ) -> pn.pane.Markdown:
432
- """Produce panel output summarising the title, description and sweep setting"""
593
+ ) -> pn.Column:
594
+ """Produce panel output summarising the title, description and sweep setting.
595
+
596
+ Args:
597
+ name (Optional[str]): Name for the panel. Defaults to benchmark title or
598
+ "Data Collection Parameters" if title is False.
599
+ description (bool): Whether to include the benchmark description. Defaults to True.
600
+ describe_sweep (bool): Whether to include the sweep description. Defaults to True.
601
+ results_suffix (bool): Whether to add a "Results:" heading. Defaults to True.
602
+ title (bool): Whether to include the benchmark title. Defaults to True.
603
+
604
+ Returns:
605
+ pn.Column: A panel with the benchmark summary
606
+ """
433
607
  if name is None:
434
608
  if title:
435
609
  name = self.title
@@ -446,7 +620,16 @@ class BenchCfg(BenchRunCfg):
446
620
  col.append(pn.pane.Markdown("## Results:"))
447
621
  return col
448
622
 
449
- def optuna_targets(self, as_var=False) -> List[str]:
623
+ def optuna_targets(self, as_var: bool = False) -> List[Any]:
624
+ """Get the list of result variables that are optimization targets.
625
+
626
+ Args:
627
+ as_var (bool): If True, return the variable objects rather than their names.
628
+ Defaults to False.
629
+
630
+ Returns:
631
+ List[Any]: List of result variable names or objects that are optimization targets
632
+ """
450
633
  targets = []
451
634
  for rv in self.result_vars:
452
635
  if hasattr(rv, "direction") and rv.direction != OptDir.none:
@@ -458,17 +641,38 @@ class BenchCfg(BenchRunCfg):
458
641
 
459
642
 
460
643
  class DimsCfg:
461
- """A class to store data about the sampling and result dimensions"""
644
+ """A class to store data about the sampling and result dimensions.
645
+
646
+ This class processes a BenchCfg object to extract and organize information about
647
+ the dimensions of the benchmark, including names, ranges, sizes, and coordinates.
648
+ It is used to set up the structure for analyzing and visualizing benchmark results.
649
+
650
+ Attributes:
651
+ dims_name (List[str]): Names of the benchmark dimensions
652
+ dim_ranges (List[List[Any]]): Values for each dimension
653
+ dims_size (List[int]): Size (number of values) for each dimension
654
+ dim_ranges_index (List[List[int]]): Indices for each dimension value
655
+ dim_ranges_str (List[str]): String representation of dimension ranges
656
+ coords (Dict[str, List[Any]]): Mapping of dimension names to their values
657
+ """
462
658
 
463
659
  def __init__(self, bench_cfg: BenchCfg) -> None:
464
- self.dims_name = [i.name for i in bench_cfg.all_vars]
660
+ """Initialize the DimsCfg with dimension information from a benchmark configuration.
661
+
662
+ Extracts dimension names, ranges, sizes, and coordinates from the provided benchmark
663
+ configuration for use in organizing and analyzing benchmark results.
664
+
665
+ Args:
666
+ bench_cfg (BenchCfg): The benchmark configuration containing dimension information
667
+ """
668
+ self.dims_name: List[str] = [i.name for i in bench_cfg.all_vars]
465
669
 
466
- self.dim_ranges = []
670
+ self.dim_ranges: List[List[Any]] = []
467
671
  self.dim_ranges = [i.values() for i in bench_cfg.all_vars]
468
- self.dims_size = [len(p) for p in self.dim_ranges]
469
- self.dim_ranges_index = [list(range(i)) for i in self.dims_size]
470
- self.dim_ranges_str = [f"{s}\n" for s in self.dim_ranges]
471
- self.coords = dict(zip(self.dims_name, self.dim_ranges))
672
+ self.dims_size: List[int] = [len(p) for p in self.dim_ranges]
673
+ self.dim_ranges_index: List[List[int]] = [list(range(i)) for i in self.dims_size]
674
+ self.dim_ranges_str: List[str] = [f"{s}\n" for s in self.dim_ranges]
675
+ self.coords: Dict[str, List[Any]] = dict(zip(self.dims_name, self.dim_ranges))
472
676
 
473
677
  logging.debug(f"dims_name: {self.dims_name}")
474
678
  logging.debug(f"dim_ranges {self.dim_ranges_str}")
bencher/bench_report.py CHANGED
@@ -162,8 +162,8 @@ class BenchReport(BenchPlotServer):
162
162
 
163
163
  def publish_args(branch_name) -> Tuple[str, str]:
164
164
  return (
165
- "https://github.com/dyson-ai/bencher.git",
166
- f"https://github.com/dyson-ai/bencher/blob/{branch_name}")
165
+ "https://github.com/blooop/bencher.git",
166
+ f"https://github.com/blooop/bencher/blob/{branch_name}")
167
167
 
168
168
 
169
169
  Args: