holobench 1.40.1__py3-none-any.whl → 1.42.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. CHANGELOG.md +10 -0
  2. bencher/__init__.py +20 -2
  3. bencher/bench_cfg.py +265 -61
  4. bencher/bench_report.py +2 -2
  5. bencher/bench_runner.py +96 -10
  6. bencher/bencher.py +421 -89
  7. bencher/caching.py +1 -4
  8. bencher/class_enum.py +70 -7
  9. bencher/example/example_composable_container_image.py +60 -0
  10. bencher/example/example_composable_container_video.py +49 -0
  11. bencher/example/example_dataframe.py +2 -2
  12. bencher/example/example_image.py +17 -21
  13. bencher/example/example_image1.py +16 -20
  14. bencher/example/example_levels.py +17 -173
  15. bencher/example/example_pareto.py +107 -31
  16. bencher/example/example_rerun2.py +1 -1
  17. bencher/example/example_simple_bool.py +2 -2
  18. bencher/example/example_simple_float2d.py +6 -1
  19. bencher/example/example_video.py +35 -17
  20. bencher/example/experimental/example_hvplot_explorer.py +3 -4
  21. bencher/example/inputs_0D/example_0_in_1_out.py +25 -15
  22. bencher/example/inputs_0D/example_0_in_2_out.py +12 -3
  23. bencher/example/inputs_0_float/example_0_cat_in_2_out.py +88 -0
  24. bencher/example/inputs_0_float/example_1_cat_in_2_out.py +98 -0
  25. bencher/example/inputs_0_float/example_2_cat_in_2_out.py +107 -0
  26. bencher/example/inputs_0_float/example_3_cat_in_2_out.py +111 -0
  27. bencher/example/inputs_1D/example1d_common.py +48 -12
  28. bencher/example/inputs_1D/example_0_float_1_cat.py +33 -0
  29. bencher/example/inputs_1D/example_1_cat_in_2_out_repeats.py +68 -0
  30. bencher/example/inputs_1D/example_1_float_2_cat_repeats.py +15 -0
  31. bencher/example/inputs_1D/example_1_int_in_1_out.py +98 -0
  32. bencher/example/inputs_1D/example_1_int_in_2_out.py +101 -0
  33. bencher/example/inputs_1D/example_1_int_in_2_out_repeats.py +99 -0
  34. bencher/example/inputs_1_float/example_1_float_0_cat_in_2_out.py +117 -0
  35. bencher/example/inputs_1_float/example_1_float_1_cat_in_2_out.py +124 -0
  36. bencher/example/inputs_1_float/example_1_float_2_cat_in_2_out.py +132 -0
  37. bencher/example/inputs_1_float/example_1_float_3_cat_in_2_out.py +140 -0
  38. bencher/example/inputs_2D/example_2_cat_in_4_out_repeats.py +104 -0
  39. bencher/example/inputs_2_float/example_2_float_0_cat_in_2_out.py +98 -0
  40. bencher/example/inputs_2_float/example_2_float_1_cat_in_2_out.py +112 -0
  41. bencher/example/inputs_2_float/example_2_float_2_cat_in_2_out.py +122 -0
  42. bencher/example/inputs_2_float/example_2_float_3_cat_in_2_out.py +138 -0
  43. bencher/example/inputs_3_float/example_3_float_0_cat_in_2_out.py +111 -0
  44. bencher/example/inputs_3_float/example_3_float_1_cat_in_2_out.py +117 -0
  45. bencher/example/inputs_3_float/example_3_float_2_cat_in_2_out.py +124 -0
  46. bencher/example/inputs_3_float/example_3_float_3_cat_in_2_out.py +129 -0
  47. bencher/example/meta/generate_examples.py +124 -7
  48. bencher/example/meta/generate_meta.py +88 -40
  49. bencher/job.py +175 -12
  50. bencher/plotting/plot_filter.py +52 -17
  51. bencher/results/bench_result.py +119 -26
  52. bencher/results/bench_result_base.py +119 -10
  53. bencher/results/composable_container/composable_container_video.py +39 -12
  54. bencher/results/dataset_result.py +6 -200
  55. bencher/results/explorer_result.py +23 -0
  56. bencher/results/{hvplot_result.py → histogram_result.py} +3 -18
  57. bencher/results/holoview_results/__init__.py +0 -0
  58. bencher/results/holoview_results/bar_result.py +79 -0
  59. bencher/results/holoview_results/curve_result.py +110 -0
  60. bencher/results/holoview_results/distribution_result/__init__.py +0 -0
  61. bencher/results/holoview_results/distribution_result/box_whisker_result.py +73 -0
  62. bencher/results/holoview_results/distribution_result/distribution_result.py +109 -0
  63. bencher/results/holoview_results/distribution_result/scatter_jitter_result.py +92 -0
  64. bencher/results/holoview_results/distribution_result/violin_result.py +70 -0
  65. bencher/results/holoview_results/heatmap_result.py +319 -0
  66. bencher/results/holoview_results/holoview_result.py +346 -0
  67. bencher/results/holoview_results/line_result.py +240 -0
  68. bencher/results/holoview_results/scatter_result.py +107 -0
  69. bencher/results/holoview_results/surface_result.py +158 -0
  70. bencher/results/holoview_results/table_result.py +14 -0
  71. bencher/results/holoview_results/tabulator_result.py +20 -0
  72. bencher/results/laxtex_result.py +42 -35
  73. bencher/results/optuna_result.py +30 -115
  74. bencher/results/video_controls.py +38 -0
  75. bencher/results/video_result.py +39 -36
  76. bencher/results/video_summary.py +2 -2
  77. bencher/results/{plotly_result.py → volume_result.py} +29 -8
  78. bencher/utils.py +176 -30
  79. bencher/variables/inputs.py +122 -15
  80. bencher/video_writer.py +38 -2
  81. bencher/worker_job.py +34 -7
  82. {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/METADATA +21 -25
  83. holobench-1.42.0.dist-info/RECORD +147 -0
  84. bencher/example/example_composable_container.py +0 -106
  85. bencher/example/example_levels2.py +0 -37
  86. bencher/example/inputs_1D/example_1_in_1_out.py +0 -62
  87. bencher/example/inputs_1D/example_1_in_2_out.py +0 -63
  88. bencher/example/inputs_1D/example_1_in_2_out_repeats.py +0 -61
  89. bencher/results/holoview_result.py +0 -787
  90. bencher/results/panel_result.py +0 -41
  91. holobench-1.40.1.dist-info/RECORD +0 -111
  92. {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/WHEEL +0 -0
  93. {holobench-1.40.1.dist-info → holobench-1.42.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,51 +1,127 @@
1
- # pylint: disable=duplicate-code
1
+ """
2
+ Advanced Pareto Optimization Example with Randomness
3
+
4
+ This example demonstrates multi-objective optimization using Optuna with the Bencher framework.
5
+ It shows how to:
6
+ 1. Define a problem with multiple competing objectives
7
+ 2. Use Optuna's multi-objective optimization capabilities
8
+ 3. Visualize and analyze the Pareto front
9
+ 4. Compare different optimization approaches
10
+ 5. Demonstrate the effect of randomness on Pareto optimization
11
+ """
2
12
 
3
13
  import bencher as bch
14
+ import numpy as np
15
+
16
+ np.random.seed(0)
17
+
18
+
19
+ class EngineeringDesignProblem(bch.ParametrizedSweep):
20
+ """
21
+ A simplified engineering design problem with two competing objectives.
22
+
23
+ This example simulates a common engineering trade-off problem:
24
+ - Performance vs Cost
25
+
26
+ This is a classic multi-objective optimization scenario.
27
+ The problem includes controlled randomness to simulate real-world variability
28
+ in manufacturing processes and materials.
29
+ """
30
+
31
+ # Input design parameters - reduced to just 2
32
+ material_quality = bch.FloatSweep(
33
+ default=0.5, bounds=[0.1, 1.0], doc="Quality of the material (dimensionless)", samples=20
34
+ )
35
+ thickness = bch.FloatSweep(
36
+ default=0.05, bounds=[0.01, 0.2], doc="Component thickness (m)", samples=20
37
+ )
38
+
39
+ # Result variables - reduced to just 2 objectives to be optimized
40
+ performance = bch.ResultVar("score", bch.OptDir.maximize, doc="Performance metric (maximize)")
41
+ cost = bch.ResultVar("$", bch.OptDir.minimize, doc="Manufacturing cost (minimize)")
42
+
43
+ def __call__(self, **kwargs) -> dict:
44
+ """
45
+ Calculate the multi-objective outcomes based on input parameters.
4
46
 
5
- # All the examples will be using the data structures and benchmark function defined in this file
6
- from bencher.example.benchmark_data import ExampleBenchCfgIn, ExampleBenchCfgOut, bench_function
47
+ This simulates an engineering design problem where various objectives
48
+ compete with each other:
49
+ - Higher quality materials improve performance but increase cost
50
+ - Thicker material improves performance but increases cost
51
+
52
+ Includes inherent randomness to simulate:
53
+ - Manufacturing variability
54
+ - Material property variations
55
+ - Measurement uncertainty
56
+ """
57
+ self.update_params_from_kwargs(**kwargs)
58
+
59
+ # Base performance calculation
60
+ base_performance = self.material_quality * 80 + self.thickness * 50
61
+
62
+ # Add significant randomness (standard deviation = 10% of the base value)
63
+ # This will create noticeably different results on each run
64
+ performance_variability = 0.15 * base_performance
65
+ self.performance = base_performance + np.random.normal(0, performance_variability)
66
+
67
+ # Introduce a 30% chance of failure (e.g., due to manufacturing defects)
68
+ if np.random.rand() < 0.3:
69
+ self.performance = np.nan
70
+
71
+ # Base cost calculation
72
+ base_cost = self.material_quality * 100 + 10 / (self.thickness + 0.01)
73
+
74
+ # Add randomness to cost (standard deviation = 8% of the base value)
75
+ # Manufacturing costs can vary significantly in real-world scenarios
76
+ cost_variability = 0.12 * base_cost
77
+ self.cost = base_cost + np.random.normal(0, cost_variability)
78
+
79
+ return self.get_results_values_as_dict()
7
80
 
8
81
 
9
82
  def example_pareto(run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None) -> bch.Bench:
10
- """Example of how to calculate the pareto front of a parameter sweep
83
+ """
84
+ Advanced example of multi-objective Pareto optimization using Optuna.
85
+
86
+ This function demonstrates:
87
+ 1. Grid search approach to visualize the entire parameter space
88
+ 2. True multi-objective optimization with Optuna
89
+ 3. Analysis of the Pareto front
90
+ 4. Effect of randomness on the Pareto front
11
91
 
12
92
  Args:
13
- run_cfg (BenchRunCfg): configuration of how to perform the param sweep
93
+ run_cfg (BenchRunCfg): Configuration for the benchmark run
94
+ report (BenchReport): Report object to store results
14
95
 
15
96
  Returns:
16
- Bench: results of the parameter sweep
97
+ Bench: Benchmark object with results
17
98
  """
99
+ if run_cfg is None:
100
+ run_cfg = bch.BenchRunCfg()
101
+ run_cfg.repeats = 5 # Multiple repeats to demonstrate randomness effects
102
+ run_cfg.level = 4
103
+
104
+ # Set up Optuna for multi-objective optimization
18
105
  run_cfg.use_optuna = True
19
106
 
20
- bench = bch.Bench(
21
- "Multi-objective optimisation",
22
- bench_function,
23
- ExampleBenchCfgIn,
24
- run_cfg=run_cfg,
25
- report=report,
26
- )
107
+ # Important: Set multiple repeats to demonstrate the effect of randomness
108
+ # The framework will automatically calculate and plot both individual runs and averages
109
+ run_cfg.repeats = 5
110
+
111
+ # Create problem definition and benchmark
112
+ bench = EngineeringDesignProblem().to_bench(run_cfg, report)
27
113
 
28
- res = bench.plot_sweep(
29
- title="Pareto Optimisation with Optuna",
30
- description="This example shows how to plot the pareto front of the tradeoff between multiple criteria. When multiple result variable are defined, and use_optuna=True a pareto plot and the relative importance of each input variable on the output criteria is plotted. A summary of the points on the pareto front is printed as well. You can use the pareto plot to decide the how to trade off one objective for another. Pareto plots are supported for 2D and 3D. If you have more than 3 result variables the first 3 are selected for the pareto plot. Plotting 4D surfaces is left as an exercise to the reader",
31
- input_vars=[
32
- ExampleBenchCfgIn.param.theta,
33
- ExampleBenchCfgIn.param.offset,
34
- ],
35
- result_vars=[ExampleBenchCfgOut.param.out_sin, ExampleBenchCfgOut.param.out_cos],
36
- const_vars=ExampleBenchCfgIn.get_input_defaults(
37
- [ExampleBenchCfgIn.param.noisy.with_const(True)]
38
- ),
39
- post_description="""# Post Description
40
- This is a slightly unusual way of doing pareto optimisation as we are not using a typical multi-objective optimisation algorithm [TODO, add example]. Instead we are performing a grid search and looking at the resulting pareto plot. The reason for doing a grid search instead of standard pareto optimisation is that we can produce more isolated plots of how an input affects an output which can help understanding of the parameter space. Future examples will show how to use grid search to bootstrap further optimisation with a multi objective optimiser""",
114
+ # Perform grid search on our two input variables
115
+ grid_result = bench.plot_sweep(
116
+ title="Parameter Space Exploration with Variability",
117
+ description="Exploring how material quality and thickness affect performance and cost with inherent randomness",
41
118
  )
42
119
 
43
- bench.report.append(res.to_optuna_plots())
120
+ # Add Optuna-specific visualizations
121
+ bench.report.append(grid_result.to_optuna_plots())
122
+
44
123
  return bench
45
124
 
46
125
 
47
126
  if __name__ == "__main__":
48
- run_cfg_ex = bch.BenchRunCfg()
49
- run_cfg_ex.repeats = 2
50
- run_cfg_ex.level = 2
51
- example_pareto(run_cfg_ex).report.show()
127
+ example_pareto().report.show()
@@ -23,7 +23,7 @@ else:
23
23
  # publish data to a github branch
24
24
  bch.publish_and_view_rrd(
25
25
  file_path,
26
- remote="https://github.com/dyson-ai/bencher.git",
26
+ remote="https://github.com/blooop/bencher.git",
27
27
  branch_name="test_rrd",
28
28
  content_callback=bch.github_content,
29
29
  ).show()
@@ -16,14 +16,14 @@ def example_1D_bool(run_cfg: bch.BenchRunCfg) -> bch.Bench:
16
16
  )
17
17
 
18
18
  # here we sample the input variable theta and plot the value of output1. The (noisy) function is sampled 20 times so you can see the distribution
19
- res = bench.plot_sweep(
19
+ bench.plot_sweep(
20
20
  title="Example 1D Bool",
21
21
  input_vars=[ExampleBenchCfgIn.param.noisy],
22
22
  result_vars=[ExampleBenchCfgOut.param.out_sin],
23
23
  description=example_1D_bool.__doc__,
24
24
  run_cfg=run_cfg,
25
25
  )
26
- bench.report.append(res.to_bar())
26
+ bench.add(bch.BarResult)
27
27
 
28
28
  return bench
29
29
 
@@ -21,7 +21,12 @@ def example_2D_float(run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport =
21
21
  """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
22
22
 
23
23
  bench = SimpleFloat().to_bench(run_cfg, report)
24
- bench.plot_sweep()
24
+ res = bench.plot_sweep()
25
+
26
+ bench.add(bch.CurveResult)
27
+ bench.report.append(res.to(bch.CurveResult))
28
+ bench.report.append(res.to(bch.HeatmapResult))
29
+ bench.add(bch.BarResult)
25
30
  return bench
26
31
 
27
32
 
@@ -1,9 +1,24 @@
1
1
  import bencher as bch
2
2
  import numpy as np
3
- import matplotlib.pyplot as plt
3
+ from PIL import Image
4
+ import colorcet as cc
5
+ import numpy.typing as npt
6
+
7
+
8
+ def apply_colormap(data: npt.NDArray) -> npt.NDArray:
9
+ """Apply a perceptually uniform colormap to the data"""
10
+ # Normalize data to [0, 1]
11
+ normalized = (data - data.min()) / (data.max() - data.min())
12
+ # Convert hex colors to RGB values using numpy's frombuffer
13
+ colors = np.array(
14
+ [np.frombuffer(bytes.fromhex(c.lstrip("#")), dtype=np.uint8) for c in cc.rainbow]
15
+ )
16
+ # Map normalized values to colormap indices
17
+ indices = (normalized * (len(colors) - 1)).astype(int)
18
+ # Create RGB array from the colormap
19
+ return colors[indices]
4
20
 
5
21
 
6
- # code from https://ipython-books.github.io/124-simulating-a-partial-differential-equation-reaction-diffusion-systems-and-turing-patterns/
7
22
  class TuringPattern(bch.ParametrizedSweep):
8
23
  alpha = bch.FloatSweep(default=2.8e-4, bounds=(2e-4, 5e-3))
9
24
  beta = bch.FloatSweep(default=5e-3, bounds=(1e-3, 9e-3))
@@ -17,6 +32,7 @@ class TuringPattern(bch.ParametrizedSweep):
17
32
  video = bch.ResultVideo()
18
33
  score = bch.ResultVar()
19
34
  img = bch.ResultImage()
35
+ img_extracted = bch.ResultImage()
20
36
 
21
37
  def laplacian(self, Z, dx):
22
38
  Ztop = Z[0:-2, 1:-1]
@@ -49,28 +65,28 @@ class TuringPattern(bch.ParametrizedSweep):
49
65
  def __call__(self, **kwargs):
50
66
  self.update_params_from_kwargs(**kwargs)
51
67
 
52
- n = int(self.time / self.dt) # number of iterations
53
- dx = 2.0 / self.size # space step
68
+ n = int(self.time / self.dt)
69
+ dx = 2.0 / self.size
54
70
 
55
71
  U = np.random.rand(self.size, self.size)
56
72
  V = np.random.rand(self.size, self.size)
57
73
 
58
- fig, ax = plt.subplots(frameon=False, figsize=(2, 2))
59
- fig.set_tight_layout(True)
60
- ax.set_axis_off()
61
74
  vid_writer = bch.VideoWriter()
62
75
  for i in range(n):
63
76
  self.update(U, V, dx)
64
77
  if i % 500 == 0:
65
- ax.imshow(U)
66
- fig.canvas.draw()
67
- rgb = np.array(fig.canvas.renderer.buffer_rgba())
68
- vid_writer.append(rgb)
69
-
70
- self.img = bch.add_image(rgb)
71
-
78
+ # Apply colormap to create RGB image
79
+ rgb = apply_colormap(U)
80
+ # Create PIL image with alpha channel
81
+ img = Image.fromarray(rgb, "RGB").convert("RGBA")
82
+ img = img.resize((200, 200), Image.Resampling.LANCZOS)
83
+ rgb_alpha = np.array(img)
84
+ vid_writer.append(rgb_alpha)
85
+
86
+ self.img = bch.add_image(rgb_alpha)
72
87
  self.video = vid_writer.write()
73
-
88
+ self.img_extracted = bch.video_writer.VideoWriter.extract_frame(self.video)
89
+ print("img path", self.img_extracted)
74
90
  self.score = self.alpha + self.beta
75
91
  return super().__call__()
76
92
 
@@ -103,14 +119,16 @@ def example_video_tap(
103
119
  )
104
120
  )
105
121
 
122
+ bench.worker_class_instance.get_results_only()
123
+
106
124
  return bench
107
125
 
108
126
 
109
127
  if __name__ == "__main__":
110
128
  run_cfg_ex = bch.BenchRunCfg()
111
129
  run_cfg_ex.level = 2
112
- run_cfg_ex.cache_samples = True
113
- run_cfg_ex.only_hash_tag = True
130
+ # run_cfg_ex.cache_samples = True
131
+ # run_cfg_ex.only_hash_tag = True
114
132
 
115
133
  # example_video(run_cfg_ex).report.show()
116
134
  example_video_tap(run_cfg_ex).report.show()
@@ -1,6 +1,5 @@
1
1
  # THIS IS NOT A WORKING EXAMPLE YET
2
2
  # pylint: disable=duplicate-code
3
- import hvplot
4
3
  import bencher as bch
5
4
  from bencher import ExampleBenchCfgIn, ExampleBenchCfgOut, bench_function
6
5
 
@@ -8,7 +7,7 @@ bench = bch.Bench("Bencher_Example_Simple", bench_function, ExampleBenchCfgIn)
8
7
 
9
8
 
10
9
  if __name__ == "__main__":
11
- bench_out = bench.plot_sweep(
10
+ res = bench.plot_sweep(
12
11
  input_vars=[ExampleBenchCfgIn.param.theta, ExampleBenchCfgIn.param.offset],
13
12
  result_vars=[ExampleBenchCfgOut.param.out_sin],
14
13
  title="Float 1D Example",
@@ -35,5 +34,5 @@ if __name__ == "__main__":
35
34
  ),
36
35
  )
37
36
 
38
- hvexplorer = hvplot.explorer(bench_out.get_dataframe())
39
- hvexplorer.show()
37
+ bench.add(bch.ExplorerResult)
38
+ bench.report.show()
@@ -5,13 +5,13 @@ import random
5
5
 
6
6
 
7
7
  class SimpleFloat0D(bch.ParametrizedSweep):
8
- """This class has 0 input dimensions and 1 output dimensions. It samples from a gaussian distribution"""
8
+ """This class has 0 input dimensions and 1 output dimension. It samples from a Gaussian distribution"""
9
9
 
10
10
  # This defines a variable that we want to plot
11
11
  output = bch.ResultVar(units="ul", doc="a sample from a gaussian distribution")
12
12
 
13
13
  def __call__(self, **kwargs) -> dict:
14
- """Generate a sample from a uniform distribution
14
+ """Generate a sample from a Gaussian distribution
15
15
 
16
16
  Returns:
17
17
  dict: a dictionary with all the result variables in the ParametrisedSweep class as named key value pairs.
@@ -24,31 +24,41 @@ class SimpleFloat0D(bch.ParametrizedSweep):
24
24
  def example_0_in_1_out(
25
25
  run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
26
26
  ) -> bch.Bench:
27
- """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
27
+ """This example shows how to sample a 0-dimensional variable (no input parameters)
28
+ and plot the result of that sampling operation.
29
+
30
+ Args:
31
+ run_cfg: Configuration for the benchmark run
32
+ report: Report to append the results to
33
+
34
+ Returns:
35
+ bch.Bench: The benchmark object
36
+ """
28
37
 
29
38
  bench = SimpleFloat0D().to_bench(run_cfg, report)
30
39
  bench.plot_sweep()
31
40
 
32
- bench.report.append(bench.get_result().to_table())
41
+ bench.add(bch.TableResult)
33
42
  return bench
34
43
 
35
44
 
36
45
  if __name__ == "__main__":
37
- run_config = bch.BenchRunCfg(repeats=100)
38
- reprt = bch.BenchReport()
39
- # example_0_in_1_out(run_cfg, report).report.show()
46
+ example_0_in_1_out().report.show()
47
+ # run_config = bch.BenchRunCfg(repeats=100)
48
+ # report_obj = bch.BenchReport()
49
+ # example_0_in_1_out(run_config, report_obj).report.show()
40
50
 
41
51
  # run_cfg.over_time = True
42
52
  # run_cfg.cache_samples = True
43
53
  # for i in range(4):
44
- # example_0_in_1_out(run_cfg, report)
54
+ # example_0_in_1_out(run_cfg, report_obj)
45
55
 
46
- run_config.over_time = True
47
- run_config.auto_plot = False
48
- for _ in range(4):
49
- example_0_in_1_out(run_config, reprt)
56
+ # run_config.over_time = True
57
+ # run_config.auto_plot = False
58
+ # for _ in range(4):
59
+ # example_0_in_1_out(run_config, report_obj)
50
60
 
51
- run_config.auto_plot = True
52
- example_0_in_1_out(run_config, reprt)
61
+ # run_config.auto_plot = True
62
+ # example_0_in_1_out(run_config, report_obj)
53
63
 
54
- reprt.show()
64
+ # report_obj.show()
@@ -5,14 +5,14 @@ import random
5
5
 
6
6
 
7
7
  class SimpleFloat0D(bch.ParametrizedSweep):
8
- """This class has 0 input dimensions and 1 output dimensions. It samples from a gaussian distribution"""
8
+ """This class has 0 input dimensions and 2 output dimensions. It samples from Gaussian distributions"""
9
9
 
10
10
  # This defines a variable that we want to plot
11
11
  output1 = bch.ResultVar(units="ul", doc="a sample from a gaussian distribution")
12
12
  output2 = bch.ResultVar(units="ul", doc="a sample from a gaussian distribution")
13
13
 
14
14
  def __call__(self, **kwargs) -> dict:
15
- """Generate a sample from a uniform distribution
15
+ """Generate samples from Gaussian distributions
16
16
 
17
17
  Returns:
18
18
  dict: a dictionary with all the result variables in the ParametrisedSweep class as named key value pairs.
@@ -26,7 +26,16 @@ class SimpleFloat0D(bch.ParametrizedSweep):
26
26
  def example_0_in_2_out(
27
27
  run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
28
28
  ) -> bch.Bench:
29
- """This example shows how to sample a 1 dimensional float variable and plot the result of passing that parameter sweep to the benchmarking function"""
29
+ """This example shows how to sample a 0-dimensional variable (no input parameters)
30
+ that produces two output values and plot the results.
31
+
32
+ Args:
33
+ run_cfg: Configuration for the benchmark run
34
+ report: Report to append the results to
35
+
36
+ Returns:
37
+ bch.Bench: The benchmark object
38
+ """
30
39
 
31
40
  bench = SimpleFloat0D().to_bench(run_cfg, report)
32
41
  bench.plot_sweep()
@@ -0,0 +1,88 @@
1
+ """This file demonstrates benchmarking with 0 categorical inputs and 2 output variables.
2
+
3
+ It benchmarks a single Python operation configuration to showcase output variations
4
+ using simulated performance data to illustrate how benchmarking works with fixed inputs.
5
+ """
6
+
7
+ import random
8
+ import bencher as bch
9
+
10
+ random.seed(0)
11
+
12
+
13
+ class PythonOperations0CatBenchmark(bch.ParametrizedSweep):
14
+ """Example class for benchmarking with no categorical variables.
15
+
16
+ This class demonstrates how to structure a benchmark with fixed inputs
17
+ and multiple output metrics. It uses simulated performance data that follows realistic
18
+ patterns while being deterministic and reproducible.
19
+ """
20
+
21
+ # All inputs are fixed (list data structure, read operation, medium size)
22
+
23
+ execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
24
+ memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
25
+
26
+ def __call__(self, **kwargs) -> dict:
27
+ """Execute the benchmark for the given set of parameters.
28
+
29
+ Args:
30
+ **kwargs: Parameters to update before executing
31
+
32
+ Returns:
33
+ dict: Dictionary containing the benchmark results
34
+ """
35
+ self.update_params_from_kwargs(**kwargs)
36
+
37
+ # Base values for list read operation on medium data
38
+ base_time = 28.0 # ms
39
+ base_memory = 960.0 # KB
40
+
41
+ # Add significant variance to show distribution of results
42
+ # even with fixed inputs
43
+ self.execution_time = base_time * random.gauss(0.75, 1.25)
44
+ self.memory_peak = base_memory * random.gauss(0.80, 1.20)
45
+
46
+ return super().__call__(**kwargs)
47
+
48
+
49
+ def example_0_cat_in_2_out(
50
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
51
+ ) -> bch.Bench:
52
+ """This example demonstrates benchmarking with no categorical variables and multiple output metrics.
53
+
54
+ It creates a synthetic benchmark that simulates performance variations when repeatedly running
55
+ the same operation (list read operation on medium data). This example shows that even with
56
+ fixed inputs, repeated benchmark runs produce variations in performance metrics.
57
+
58
+ Args:
59
+ run_cfg: Configuration for the benchmark run
60
+ report: Report to append the results to
61
+
62
+ Returns:
63
+ bch.Bench: The benchmark object
64
+ """
65
+
66
+ if run_cfg is None:
67
+ run_cfg = bch.BenchRunCfg()
68
+ run_cfg.repeats = 100 # More repeats to show distribution
69
+ bench = PythonOperations0CatBenchmark().to_bench(run_cfg, report)
70
+ bench.plot_sweep(
71
+ title="Python List Read Operation Performance (Fixed Inputs)",
72
+ description="Distribution of execution time and peak memory usage across multiple runs",
73
+ post_description="""
74
+ This benchmark illustrates how performance metrics vary even with fixed inputs.
75
+
76
+ Key observations:
77
+ - Each run uses the same configuration: list data structure, read operation, medium data size
78
+ - Despite identical inputs, performance metrics show natural variations
79
+ - This variance simulates real-world system fluctuations that occur in benchmarking
80
+ - With no categorical variables, the benchmark helps establish baseline performance
81
+ distribution for a single configuration
82
+ """,
83
+ )
84
+ return bench
85
+
86
+
87
+ if __name__ == "__main__":
88
+ example_0_cat_in_2_out().report.show()
@@ -0,0 +1,98 @@
1
+ """This file demonstrates benchmarking with 1 categorical input and 2 output variables.
2
+
3
+ It benchmarks different Python data structures to compare their performance characteristics
4
+ using simulated performance data to illustrate how benchmarking works.
5
+ """
6
+
7
+ import random
8
+ import bencher as bch
9
+
10
+ random.seed(0)
11
+
12
+
13
+ class PythonOperations1CatBenchmark(bch.ParametrizedSweep):
14
+ """Example class for benchmarking different Python data structures using 1 categorical variable.
15
+
16
+ This class demonstrates how to structure a benchmark with a single input parameter
17
+ and multiple output metrics. It uses simulated performance data that follows realistic
18
+ patterns while being deterministic and reproducible.
19
+ """
20
+
21
+ data_structure = bch.StringSweep(["list", "dict"], doc="Type of data structure to operate on")
22
+
23
+ # Using fixed read operation and medium size data
24
+
25
+ execution_time = bch.ResultVar(units="ms", doc="Execution time in milliseconds")
26
+ memory_peak = bch.ResultVar(units="KB", doc="Peak memory usage in kilobytes")
27
+
28
+ def __call__(self, **kwargs) -> dict:
29
+ """Execute the benchmark for the given set of parameters.
30
+
31
+ Args:
32
+ **kwargs: Parameters to update before executing
33
+
34
+ Returns:
35
+ dict: Dictionary containing the benchmark results
36
+ """
37
+ self.update_params_from_kwargs(**kwargs)
38
+
39
+ # Use deterministic fake data based on parameters
40
+ # Base values (for read operation on medium data)
41
+ base_time = 35.0 # ms (medium size, read operation base)
42
+ base_memory = 800.0 # KB (medium size, read operation base)
43
+
44
+ # Adjust for data structure (lists are generally faster but use more memory)
45
+ if self.data_structure == "list":
46
+ time_factor = 0.8
47
+ memory_factor = 1.2
48
+ else: # dict
49
+ time_factor = 1.2
50
+ memory_factor = 0.9
51
+
52
+ # Calculate final metrics with significant variance to show differences
53
+ self.execution_time = base_time * time_factor * random.gauss(0.80, 1.20)
54
+ self.memory_peak = base_memory * memory_factor * random.gauss(0.85, 1.15)
55
+
56
+ return super().__call__(**kwargs)
57
+
58
+
59
+ def example_1_cat_in_2_out(
60
+ run_cfg: bch.BenchRunCfg = None, report: bch.BenchReport = None
61
+ ) -> bch.Bench:
62
+ """This example demonstrates benchmarking with 1 categorical variable and multiple output metrics.
63
+
64
+ It creates a synthetic benchmark that simulates performance characteristics of different
65
+ Python data structures (list vs dict) using a fixed read operation and medium data size.
66
+ The benchmark produces realistic patterns of execution time and memory usage without
67
+ actually executing real operations, making it ideal for learning and demonstration.
68
+
69
+ Args:
70
+ run_cfg: Configuration for the benchmark run
71
+ report: Report to append the results to
72
+
73
+ Returns:
74
+ bch.Bench: The benchmark object
75
+ """
76
+
77
+ if run_cfg is None:
78
+ run_cfg = bch.BenchRunCfg()
79
+ run_cfg.repeats = 5 # Fewer repeats for a quicker benchmark
80
+ bench = PythonOperations1CatBenchmark().to_bench(run_cfg, report)
81
+ bench.plot_sweep(
82
+ title="Python Data Structure Performance Benchmark (1 Variable)",
83
+ description="Comparing execution time and peak memory usage between lists and dictionaries",
84
+ post_description="""
85
+ This benchmark illustrates how different data structures affect performance.
86
+
87
+ Key observations:
88
+ - Lists generally process faster than dictionaries for read operations
89
+ - However, lists consume more memory than dictionaries
90
+ - All tests were performed with read operations on medium-sized datasets
91
+ - Note that variance in the results simulates real-world measurement fluctuations
92
+ """,
93
+ )
94
+ return bench
95
+
96
+
97
+ if __name__ == "__main__":
98
+ example_1_cat_in_2_out().report.show()