ennbo 0.1.2__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. enn/__init__.py +25 -13
  2. enn/benchmarks/__init__.py +3 -0
  3. enn/benchmarks/ackley.py +5 -0
  4. enn/benchmarks/ackley_class.py +17 -0
  5. enn/benchmarks/ackley_core.py +12 -0
  6. enn/benchmarks/double_ackley.py +24 -0
  7. enn/enn/candidates.py +14 -0
  8. enn/enn/conditional_posterior_draw_internals.py +15 -0
  9. enn/enn/draw_internals.py +15 -0
  10. enn/enn/enn.py +16 -269
  11. enn/enn/enn_class.py +423 -0
  12. enn/enn/enn_conditional.py +325 -0
  13. enn/enn/enn_fit.py +69 -70
  14. enn/enn/enn_hash.py +79 -0
  15. enn/enn/enn_index.py +92 -0
  16. enn/enn/enn_like_protocol.py +35 -0
  17. enn/enn/enn_normal.py +0 -1
  18. enn/enn/enn_params.py +3 -22
  19. enn/enn/enn_params_class.py +24 -0
  20. enn/enn/enn_util.py +60 -46
  21. enn/enn/neighbor_data.py +14 -0
  22. enn/enn/neighbors.py +14 -0
  23. enn/enn/posterior_flags.py +8 -0
  24. enn/enn/weighted_stats.py +14 -0
  25. enn/turbo/components/__init__.py +41 -0
  26. enn/turbo/components/acquisition.py +13 -0
  27. enn/turbo/components/acquisition_optimizer_protocol.py +19 -0
  28. enn/turbo/components/builder.py +22 -0
  29. enn/turbo/components/chebyshev_incumbent_selector.py +76 -0
  30. enn/turbo/components/enn_surrogate.py +115 -0
  31. enn/turbo/components/gp_surrogate.py +144 -0
  32. enn/turbo/components/hnr_acq_optimizer.py +83 -0
  33. enn/turbo/components/incumbent_selector.py +11 -0
  34. enn/turbo/components/incumbent_selector_protocol.py +16 -0
  35. enn/turbo/components/no_incumbent_selector.py +21 -0
  36. enn/turbo/components/no_surrogate.py +49 -0
  37. enn/turbo/components/pareto_acq_optimizer.py +49 -0
  38. enn/turbo/components/posterior_result.py +12 -0
  39. enn/turbo/components/protocols.py +13 -0
  40. enn/turbo/components/random_acq_optimizer.py +21 -0
  41. enn/turbo/components/scalar_incumbent_selector.py +39 -0
  42. enn/turbo/components/surrogate_protocol.py +32 -0
  43. enn/turbo/components/surrogate_result.py +12 -0
  44. enn/turbo/components/surrogates.py +5 -0
  45. enn/turbo/components/thompson_acq_optimizer.py +49 -0
  46. enn/turbo/components/trust_region_protocol.py +24 -0
  47. enn/turbo/components/ucb_acq_optimizer.py +49 -0
  48. enn/turbo/config/__init__.py +87 -0
  49. enn/turbo/config/acq_type.py +8 -0
  50. enn/turbo/config/acquisition.py +26 -0
  51. enn/turbo/config/base.py +4 -0
  52. enn/turbo/config/candidate_gen_config.py +49 -0
  53. enn/turbo/config/candidate_rv.py +7 -0
  54. enn/turbo/config/draw_acquisition_config.py +14 -0
  55. enn/turbo/config/enn_index_driver.py +6 -0
  56. enn/turbo/config/enn_surrogate_config.py +42 -0
  57. enn/turbo/config/enums.py +7 -0
  58. enn/turbo/config/factory.py +118 -0
  59. enn/turbo/config/gp_surrogate_config.py +14 -0
  60. enn/turbo/config/hnr_optimizer_config.py +7 -0
  61. enn/turbo/config/init_config.py +17 -0
  62. enn/turbo/config/init_strategies/__init__.py +9 -0
  63. enn/turbo/config/init_strategies/hybrid_init.py +23 -0
  64. enn/turbo/config/init_strategies/init_strategy.py +19 -0
  65. enn/turbo/config/init_strategies/lhd_only_init.py +24 -0
  66. enn/turbo/config/morbo_tr_config.py +82 -0
  67. enn/turbo/config/nds_optimizer_config.py +7 -0
  68. enn/turbo/config/no_surrogate_config.py +14 -0
  69. enn/turbo/config/no_tr_config.py +31 -0
  70. enn/turbo/config/optimizer_config.py +72 -0
  71. enn/turbo/config/pareto_acquisition_config.py +14 -0
  72. enn/turbo/config/raasp_driver.py +6 -0
  73. enn/turbo/config/raasp_optimizer_config.py +7 -0
  74. enn/turbo/config/random_acquisition_config.py +14 -0
  75. enn/turbo/config/rescalarize.py +7 -0
  76. enn/turbo/config/surrogate.py +12 -0
  77. enn/turbo/config/trust_region.py +34 -0
  78. enn/turbo/config/turbo_tr_config.py +71 -0
  79. enn/turbo/config/ucb_acquisition_config.py +14 -0
  80. enn/turbo/config/validation.py +45 -0
  81. enn/turbo/hypervolume.py +30 -0
  82. enn/turbo/impl_helpers.py +68 -0
  83. enn/turbo/morbo_trust_region.py +131 -70
  84. enn/turbo/no_trust_region.py +32 -39
  85. enn/turbo/optimizer.py +300 -0
  86. enn/turbo/optimizer_config.py +8 -0
  87. enn/turbo/proposal.py +36 -38
  88. enn/turbo/sampling.py +21 -0
  89. enn/turbo/strategies/__init__.py +9 -0
  90. enn/turbo/strategies/lhd_only_strategy.py +36 -0
  91. enn/turbo/strategies/optimization_strategy.py +19 -0
  92. enn/turbo/strategies/turbo_hybrid_strategy.py +124 -0
  93. enn/turbo/tr_helpers.py +202 -0
  94. enn/turbo/turbo_gp.py +0 -1
  95. enn/turbo/turbo_gp_base.py +0 -1
  96. enn/turbo/turbo_gp_fit.py +187 -0
  97. enn/turbo/turbo_gp_noisy.py +0 -1
  98. enn/turbo/turbo_optimizer_utils.py +98 -0
  99. enn/turbo/turbo_trust_region.py +126 -58
  100. enn/turbo/turbo_utils.py +98 -161
  101. enn/turbo/types/__init__.py +7 -0
  102. enn/turbo/types/appendable_array.py +85 -0
  103. enn/turbo/types/gp_data_prep.py +13 -0
  104. enn/turbo/types/gp_fit_result.py +11 -0
  105. enn/turbo/types/obs_lists.py +10 -0
  106. enn/turbo/types/prepare_ask_result.py +14 -0
  107. enn/turbo/types/tell_inputs.py +14 -0
  108. {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/METADATA +18 -11
  109. ennbo-0.1.7.dist-info/RECORD +111 -0
  110. enn/enn/__init__.py +0 -4
  111. enn/turbo/__init__.py +0 -11
  112. enn/turbo/base_turbo_impl.py +0 -144
  113. enn/turbo/lhd_only_impl.py +0 -49
  114. enn/turbo/turbo_config.py +0 -72
  115. enn/turbo/turbo_enn_impl.py +0 -201
  116. enn/turbo/turbo_mode.py +0 -10
  117. enn/turbo/turbo_mode_impl.py +0 -76
  118. enn/turbo/turbo_one_impl.py +0 -302
  119. enn/turbo/turbo_optimizer.py +0 -525
  120. enn/turbo/turbo_zero_impl.py +0 -29
  121. ennbo-0.1.2.dist-info/RECORD +0 -29
  122. {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/WHEEL +0 -0
  123. {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/licenses/LICENSE +0 -0
enn/turbo/turbo_utils.py CHANGED
@@ -1,20 +1,35 @@
1
1
  from __future__ import annotations
2
-
3
2
  import contextlib
4
- from typing import TYPE_CHECKING, Any, Iterator
3
+ from dataclasses import dataclass
4
+ from typing import TYPE_CHECKING, Any, Callable, Iterator
5
+ import numpy as np
5
6
 
6
7
  if TYPE_CHECKING:
7
- import numpy as np
8
8
  import torch
9
- from gpytorch.likelihoods import GaussianLikelihood
10
9
  from numpy.random import Generator
11
10
  from scipy.stats._qmc import QMCEngine
11
+ __all__ = [
12
+ "Telemetry",
13
+ ]
14
+
12
15
 
13
- from .turbo_gp import TurboGP
14
- from .turbo_gp_noisy import TurboGPNoisy
16
+ @dataclass(frozen=True)
17
+ class Telemetry:
18
+ dt_fit: float
19
+ dt_sel: float
20
+ dt_gen: float = 0.0
21
+ dt_tell: float = 0.0
15
22
 
16
23
 
17
- from enn.enn.enn_util import standardize_y
24
+ @contextlib.contextmanager
25
+ def record_duration(set_dt: Callable[[float], None]) -> Iterator[None]:
26
+ import time
27
+
28
+ t0 = time.perf_counter()
29
+ try:
30
+ yield
31
+ finally:
32
+ set_dt(time.perf_counter() - t0)
18
33
 
19
34
 
20
35
  def _next_power_of_2(n: int) -> int:
@@ -43,138 +58,27 @@ def torch_seed_context(
43
58
  yield
44
59
 
45
60
 
46
- def fit_gp(
47
- x_obs_list: list[float] | list[list[float]],
48
- y_obs_list: list[float] | list[list[float]],
49
- num_dim: int,
50
- *,
51
- yvar_obs_list: list[float] | None = None,
52
- num_steps: int = 50,
53
- ) -> tuple[
54
- "TurboGP | TurboGPNoisy | None",
55
- "GaussianLikelihood | None",
56
- float | np.ndarray,
57
- float | np.ndarray,
58
- ]:
59
- import numpy as np
60
- import torch
61
- from gpytorch.constraints import Interval
62
- from gpytorch.likelihoods import GaussianLikelihood
63
- from gpytorch.mlls import ExactMarginalLogLikelihood
64
-
65
- from .turbo_gp import TurboGP
66
- from .turbo_gp_noisy import TurboGPNoisy
67
-
68
- x = np.asarray(x_obs_list, dtype=float)
69
- y = np.asarray(y_obs_list, dtype=float)
70
- n = x.shape[0]
71
- if y.ndim not in (1, 2):
72
- raise ValueError(y.shape)
73
- is_multi_output = y.ndim == 2 and y.shape[1] > 1
74
- if yvar_obs_list is not None:
75
- if len(yvar_obs_list) != len(y_obs_list):
76
- raise ValueError(
77
- f"yvar_obs_list length {len(yvar_obs_list)} != y_obs_list length {len(y_obs_list)}"
78
- )
79
- if is_multi_output:
80
- raise ValueError("yvar_obs_list not supported for multi-output GP")
81
- if n == 0:
82
- if is_multi_output:
83
- num_outputs = int(y.shape[1])
84
- return None, None, np.zeros(num_outputs), np.ones(num_outputs)
85
- return None, None, 0.0, 1.0
86
- if n == 1 and is_multi_output:
87
- gp_y_mean = y[0].copy()
88
- gp_y_std = np.ones(int(y.shape[1]), dtype=float)
89
- return None, None, gp_y_mean, gp_y_std
90
-
91
- if is_multi_output:
92
- gp_y_mean = y.mean(axis=0)
93
- gp_y_std = y.std(axis=0)
94
- gp_y_std = np.where(gp_y_std < 1e-6, 1.0, gp_y_std)
95
- z = (y - gp_y_mean) / gp_y_std
96
- else:
97
- gp_y_mean, gp_y_std = standardize_y(y)
98
- y_centered = y - gp_y_mean
99
- z = y_centered / gp_y_std
100
- train_x = torch.as_tensor(x, dtype=torch.float64)
101
- if is_multi_output:
102
- train_y = torch.as_tensor(z.T, dtype=torch.float64)
103
- else:
104
- train_y = torch.as_tensor(z, dtype=torch.float64)
105
- lengthscale_constraint = Interval(0.005, 2.0)
106
- outputscale_constraint = Interval(0.05, 20.0)
107
- if yvar_obs_list is not None:
108
- y_var = np.asarray(yvar_obs_list, dtype=float)
109
- train_y_var = torch.as_tensor(y_var / (gp_y_std**2), dtype=torch.float64)
110
- model = TurboGPNoisy(
111
- train_x=train_x,
112
- train_y=train_y,
113
- train_y_var=train_y_var,
114
- lengthscale_constraint=lengthscale_constraint,
115
- outputscale_constraint=outputscale_constraint,
116
- ard_dims=num_dim,
117
- ).to(dtype=train_x.dtype)
118
- likelihood = model.likelihood
119
- else:
120
- noise_constraint = Interval(5e-4, 0.2)
121
- if is_multi_output:
122
- num_outputs = int(y.shape[1])
123
- likelihood = GaussianLikelihood(
124
- noise_constraint=noise_constraint,
125
- batch_shape=torch.Size([num_outputs]),
126
- ).to(dtype=train_y.dtype)
127
- else:
128
- likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(
129
- dtype=train_y.dtype
130
- )
131
- model = TurboGP(
132
- train_x=train_x,
133
- train_y=train_y,
134
- likelihood=likelihood,
135
- lengthscale_constraint=lengthscale_constraint,
136
- outputscale_constraint=outputscale_constraint,
137
- ard_dims=num_dim,
138
- ).to(dtype=train_x.dtype)
139
- if is_multi_output:
140
- likelihood.noise = torch.full(
141
- (int(y.shape[1]),), 0.005, dtype=train_y.dtype
142
- )
143
- else:
144
- likelihood.noise = torch.tensor(0.005, dtype=train_y.dtype)
145
- if is_multi_output:
146
- num_outputs = int(y.shape[1])
147
- model.covar_module.outputscale = torch.ones(num_outputs, dtype=train_x.dtype)
148
- model.covar_module.base_kernel.lengthscale = torch.full(
149
- (num_outputs, 1, num_dim), 0.5, dtype=train_x.dtype
150
- )
151
- else:
152
- model.covar_module.outputscale = torch.tensor(1.0, dtype=train_x.dtype)
153
- model.covar_module.base_kernel.lengthscale = torch.full(
154
- (num_dim,), 0.5, dtype=train_x.dtype
61
+ def get_gp_posterior_suppress_warning(model: Any, x_torch: Any) -> Any:
62
+ import warnings
63
+
64
+ try:
65
+ from gpytorch.utils.warnings import GPInputWarning
66
+ except Exception:
67
+ GPInputWarning = None
68
+ if GPInputWarning is None:
69
+ return model.posterior(x_torch)
70
+ with warnings.catch_warnings():
71
+ warnings.filterwarnings(
72
+ "ignore",
73
+ message=r"The input matches the stored training data\..*",
74
+ category=GPInputWarning,
155
75
  )
156
- model.train()
157
- likelihood.train()
158
- mll = ExactMarginalLogLikelihood(likelihood, model)
159
- optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
160
- for _ in range(num_steps):
161
- optimizer.zero_grad()
162
- output = model(train_x)
163
- loss = -mll(output, train_y)
164
- if loss.ndim != 0:
165
- loss = loss.sum()
166
- loss.backward()
167
- optimizer.step()
168
- model.eval()
169
- likelihood.eval()
170
- return model, likelihood, gp_y_mean, gp_y_std
76
+ return model.posterior(x_torch)
171
77
 
172
78
 
173
79
  def latin_hypercube(
174
80
  num_points: int, num_dim: int, *, rng: Generator | Any
175
81
  ) -> np.ndarray:
176
- import numpy as np
177
-
178
82
  x = np.zeros((num_points, num_dim))
179
83
  centers = (1.0 + 2.0 * np.arange(0.0, num_points)) / float(2 * num_points)
180
84
  for j in range(num_dim):
@@ -185,8 +89,6 @@ def latin_hypercube(
185
89
 
186
90
 
187
91
  def argmax_random_tie(values: np.ndarray | Any, *, rng: Generator | Any) -> int:
188
- import numpy as np
189
-
190
92
  if values.ndim != 1:
191
93
  raise ValueError(values.shape)
192
94
  max_val = float(np.max(values))
@@ -208,8 +110,6 @@ def sobol_perturb_np(
208
110
  *,
209
111
  sobol_engine: QMCEngine | Any,
210
112
  ) -> np.ndarray:
211
- import numpy as np
212
-
213
113
  n_sobol = _next_power_of_2(num_candidates)
214
114
  sobol_samples = sobol_engine.random(n_sobol)[:num_candidates]
215
115
  lb_array = np.asarray(lb)
@@ -221,29 +121,76 @@ def sobol_perturb_np(
221
121
  return candidates
222
122
 
223
123
 
224
- def raasp(
124
+ def uniform_perturb_np(
225
125
  x_center: np.ndarray | Any,
226
126
  lb: np.ndarray | list[float] | Any,
227
127
  ub: np.ndarray | list[float] | Any,
228
128
  num_candidates: int,
129
+ mask: np.ndarray | Any,
229
130
  *,
230
- num_pert: int = 20,
231
131
  rng: Generator | Any,
232
- sobol_engine: QMCEngine | Any,
233
132
  ) -> np.ndarray:
234
- import numpy as np
133
+ lb_array = np.asarray(lb)
134
+ ub_array = np.asarray(ub)
135
+ pert = lb_array + (ub_array - lb_array) * rng.uniform(
136
+ 0.0, 1.0, size=(num_candidates, x_center.shape[-1])
137
+ )
138
+ candidates = np.tile(x_center, (num_candidates, 1))
139
+ if np.any(mask):
140
+ candidates[mask] = pert[mask]
141
+ return candidates
235
142
 
236
- num_dim = x_center.shape[-1]
143
+
144
+ def _raasp_mask(
145
+ *,
146
+ num_candidates: int,
147
+ num_dim: int,
148
+ num_pert: int,
149
+ rng: Generator | Any,
150
+ ) -> np.ndarray:
237
151
  prob_perturb = min(num_pert / num_dim, 1.0)
238
152
  mask = rng.random((num_candidates, num_dim)) <= prob_perturb
239
153
  ind = np.nonzero(~mask.any(axis=1))[0]
240
154
  if len(ind) > 0:
241
155
  mask[ind, rng.integers(0, num_dim, size=len(ind))] = True
156
+ return mask
157
+
158
+
159
+ def raasp(
160
+ x_center: np.ndarray | Any,
161
+ lb: np.ndarray | list[float] | Any,
162
+ ub: np.ndarray | list[float] | Any,
163
+ num_candidates: int,
164
+ *,
165
+ num_pert: int = 20,
166
+ rng: Generator | Any,
167
+ sobol_engine: QMCEngine | Any,
168
+ ) -> np.ndarray:
169
+ num_dim = x_center.shape[-1]
170
+ mask = _raasp_mask(
171
+ num_candidates=num_candidates, num_dim=num_dim, num_pert=num_pert, rng=rng
172
+ )
242
173
  return sobol_perturb_np(
243
174
  x_center, lb, ub, num_candidates, mask, sobol_engine=sobol_engine
244
175
  )
245
176
 
246
177
 
178
+ def raasp_uniform(
179
+ x_center: np.ndarray | Any,
180
+ lb: np.ndarray | list[float] | Any,
181
+ ub: np.ndarray | list[float] | Any,
182
+ num_candidates: int,
183
+ *,
184
+ num_pert: int = 20,
185
+ rng: Generator | Any,
186
+ ) -> np.ndarray:
187
+ num_dim = x_center.shape[-1]
188
+ mask = _raasp_mask(
189
+ num_candidates=num_candidates, num_dim=num_dim, num_pert=num_pert, rng=rng
190
+ )
191
+ return uniform_perturb_np(x_center, lb, ub, num_candidates, mask, rng=rng)
192
+
193
+
247
194
  def generate_raasp_candidates(
248
195
  center: np.ndarray | Any,
249
196
  lb: np.ndarray | list[float] | Any,
@@ -267,29 +214,21 @@ def generate_raasp_candidates(
267
214
  )
268
215
 
269
216
 
270
- def generate_trust_region_candidates(
271
- x_center: np.ndarray | Any,
272
- lengthscales: np.ndarray | None,
217
+ def generate_raasp_candidates_uniform(
218
+ center: np.ndarray | Any,
219
+ lb: np.ndarray | list[float] | Any,
220
+ ub: np.ndarray | list[float] | Any,
273
221
  num_candidates: int,
274
222
  *,
275
- compute_bounds_1d: Any,
276
223
  rng: Generator | Any,
277
- sobol_engine: QMCEngine | Any,
224
+ num_pert: int = 20,
278
225
  ) -> np.ndarray:
279
- """
280
- Small DRY helper for trust-region candidate generation.
281
-
282
- `compute_bounds_1d` is typically a TR object's bound computation method.
283
- """
284
- lb, ub = compute_bounds_1d(x_center, lengthscales)
285
- return generate_raasp_candidates(
286
- x_center, lb, ub, num_candidates, rng=rng, sobol_engine=sobol_engine
287
- )
226
+ if num_candidates <= 0:
227
+ raise ValueError(num_candidates)
228
+ return raasp_uniform(center, lb, ub, num_candidates, num_pert=num_pert, rng=rng)
288
229
 
289
230
 
290
231
  def to_unit(x: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
291
- import numpy as np
292
-
293
232
  lb = bounds[:, 0]
294
233
  ub = bounds[:, 1]
295
234
  if np.any(ub <= lb):
@@ -298,8 +237,6 @@ def to_unit(x: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
298
237
 
299
238
 
300
239
  def from_unit(x_unit: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
301
- import numpy as np
302
-
303
240
  lb = np.asarray(bounds[:, 0])
304
241
  ub = np.asarray(bounds[:, 1])
305
242
  return lb + x_unit * (ub - lb)
@@ -310,11 +247,11 @@ def gp_thompson_sample(
310
247
  x_cand: np.ndarray | Any,
311
248
  num_arms: int,
312
249
  rng: Generator | Any,
250
+ *,
313
251
  gp_y_mean: float,
314
252
  gp_y_std: float,
315
253
  ) -> np.ndarray:
316
254
  import gpytorch
317
- import numpy as np
318
255
  import torch
319
256
 
320
257
  x_torch = torch.as_tensor(x_cand, dtype=torch.float64)
@@ -0,0 +1,7 @@
1
+ from .gp_data_prep import GPDataPrep
2
+ from .gp_fit_result import GPFitResult
3
+ from .obs_lists import ObsLists
4
+ from .prepare_ask_result import PrepareAskResult
5
+ from .tell_inputs import TellInputs
6
+
7
+ __all__ = ["GPDataPrep", "GPFitResult", "ObsLists", "PrepareAskResult", "TellInputs"]
@@ -0,0 +1,85 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+
6
+ class AppendableArray:
7
+ def __init__(self, initial_capacity: int = 100) -> None:
8
+ self._initial_capacity = initial_capacity
9
+ self._buffer: np.ndarray | None = None
10
+ self._size = 0
11
+ self._num_cols: int | None = None
12
+
13
+ @property
14
+ def shape(self) -> tuple[int, int]:
15
+ if self._num_cols is None:
16
+ return (0, 0)
17
+ return (self._size, self._num_cols)
18
+
19
+ def _initialize_buffer(self, row: np.ndarray) -> None:
20
+ if row.ndim == 0:
21
+ self._num_cols = 1
22
+ row = row.reshape(1, 1)
23
+ elif row.ndim == 1:
24
+ self._num_cols = row.shape[0]
25
+ row = row[np.newaxis, :]
26
+ elif row.ndim == 2:
27
+ if row.shape[0] != 1:
28
+ raise ValueError(f"Expected row shape (1, D), got {row.shape}")
29
+ self._num_cols = row.shape[1]
30
+ else:
31
+ raise ValueError(f"Expected 0D, 1D or 2D array, got {row.ndim}D")
32
+
33
+ self._buffer = np.empty(
34
+ (self._initial_capacity, self._num_cols), dtype=row.dtype
35
+ )
36
+
37
+ def _validate_row(self, row: np.ndarray) -> np.ndarray:
38
+ if row.ndim == 0:
39
+ if self._num_cols != 1:
40
+ raise ValueError(f"Expected {self._num_cols} columns, got 1 (scalar)")
41
+ return row.reshape(1, 1)
42
+ if row.ndim == 1:
43
+ if row.shape[0] != self._num_cols:
44
+ raise ValueError(
45
+ f"Expected {self._num_cols} columns, got {row.shape[0]}"
46
+ )
47
+ return row[np.newaxis, :]
48
+ if row.ndim == 2:
49
+ if row.shape != (1, self._num_cols):
50
+ raise ValueError(
51
+ f"Expected shape (1, {self._num_cols}), got {row.shape}"
52
+ )
53
+ return row
54
+ raise ValueError(f"Expected 0D, 1D or 2D array, got {row.ndim}D")
55
+
56
+ def append(self, row: np.ndarray) -> None:
57
+ row = np.asarray(row)
58
+
59
+ if self._num_cols is None:
60
+ self._initialize_buffer(row)
61
+
62
+ row = self._validate_row(row)
63
+
64
+ assert self._buffer is not None
65
+ if self._size + 1 > self._buffer.shape[0]:
66
+ new_capacity = self._buffer.shape[0] * 2
67
+ new_buffer = np.empty(
68
+ (new_capacity, self._num_cols), dtype=self._buffer.dtype
69
+ )
70
+ new_buffer[: self._size] = self._buffer[: self._size]
71
+ self._buffer = new_buffer
72
+
73
+ self._buffer[self._size] = row
74
+ self._size += 1
75
+
76
+ def view(self) -> np.ndarray:
77
+ if self._buffer is None:
78
+ return np.empty((0, 0))
79
+ return self._buffer[: self._size]
80
+
81
+ def __len__(self) -> int:
82
+ return self._size
83
+
84
+ def __getitem__(self, key) -> np.ndarray:
85
+ return self.view()[key]
@@ -0,0 +1,13 @@
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+ from typing import Any
4
+
5
+
6
+ @dataclass
7
+ class GPDataPrep:
8
+ train_x: Any
9
+ train_y: Any
10
+ is_multi: bool
11
+ y_mean: Any
12
+ y_std: Any
13
+ y_raw: Any
@@ -0,0 +1,11 @@
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+ from typing import Any
4
+
5
+
6
+ @dataclass
7
+ class GPFitResult:
8
+ model: Any
9
+ likelihood: Any
10
+ y_mean: Any
11
+ y_std: Any
@@ -0,0 +1,10 @@
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+
4
+
5
+ @dataclass
6
+ class ObsLists:
7
+ x_obs: list
8
+ y_obs: list
9
+ y_tr: list
10
+ yvar_obs: list
@@ -0,0 +1,14 @@
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Any
4
+
5
+ if TYPE_CHECKING:
6
+ import numpy as np
7
+
8
+
9
+ @dataclass
10
+ class PrepareAskResult:
11
+ model: Any
12
+ y_mean: float | None
13
+ y_std: float | None
14
+ lengthscales: np.ndarray | None
@@ -0,0 +1,14 @@
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ import numpy as np
7
+
8
+
9
+ @dataclass
10
+ class TellInputs:
11
+ x: np.ndarray
12
+ y: np.ndarray
13
+ y_var: np.ndarray | None
14
+ num_metrics: int
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ennbo
3
- Version: 0.1.2
3
+ Version: 0.1.7
4
4
  Summary: Epistemic Nearest Neighbors
5
5
  Project-URL: Homepage, https://github.com/yubo-research/enn
6
6
  Project-URL: Source, https://github.com/yubo-research/enn
@@ -37,11 +37,14 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
37
37
  Classifier: Topic :: Scientific/Engineering :: Mathematics
38
38
  Requires-Python: >=3.11
39
39
  Requires-Dist: faiss-cpu>=1.9.0
40
- Requires-Dist: gpytorch==1.13
41
- Requires-Dist: nds==0.4.3
40
+ Requires-Dist: gpytorch>=1.13
42
41
  Requires-Dist: numpy<2.0.0,>=1.26.4
43
- Requires-Dist: scipy==1.15.3
44
- Requires-Dist: torch==2.5.1
42
+ Requires-Dist: scipy>=1.15.3
43
+ Requires-Dist: torch>=2.5.1
44
+ Provides-Extra: dev
45
+ Requires-Dist: nbmake>=1.5; extra == 'dev'
46
+ Requires-Dist: pytest>=8.0; extra == 'dev'
47
+ Requires-Dist: ruff>=0.4; extra == 'dev'
45
48
  Description-Content-Type: text/markdown
46
49
 
47
50
  # Epistemic Nearest Neighbors
@@ -50,12 +53,16 @@ A fast, alternative surrogate for Bayesian optimization
50
53
  ENN estimates a function's value and associated epistemic uncertainty using a K-Nearest Neighbors model. Queries take $O(N lnK)$ time, where $N$ is the number of observations available for KNN lookups. Compare to an exact GP, which takes $O(N^2)$ time. Additionally, measured running times are very small compared to GPs and other alternative surrogates. [1]
51
54
 
52
55
  ## Contents
53
- - ENN model, [`EpistemicNearestNeighbors`](https://github.com/yubo-research/enn/blob/main/src/enn/enn/enn.py) [1]
54
- - TuRBO-ENN optimizer, class [`TurboOptimizer`](https://github.com/yubo-research/enn/blob/main/src/enn/turbo/turbo_optimizer.py) has four modes
55
- - `TURBO_ONE` - A clone of the TuRBO [2] reference [code](https://github.com/uber-research/TuRBO), reworked to have an `ask()`/`tell()` interface.
56
- - `TURBO_ENN` - Same as TURBO_ONE, except uses ENN instead of GP and Pareto(mu, se) instead of Thompson sampling.
57
- - `TURBO_ZERO` - Same as TURBO_ONE, except randomly-chosen RAASP [3] candidates are picked to be proposals. There is no surrogate.
58
- - `LHD_ONLY` - Just creates an LHD design for every `ask()`. Good for a baseline and for testing.
56
+ - ENN surrogate, [`EpistemicNearestNeighbors`](https://github.com/yubo-research/enn/blob/main/src/enn/enn/enn.py) [1]
57
+ - TuRBO-ENN optimizer via [`create_optimizer`](https://github.com/yubo-research/enn/blob/main/src/enn/turbo/optimizer.py) with config factories
58
+ - `turbo_one_config()` - TuRBO [2], matching the reference implementation.
59
+ - `turbo_enn_config()` - Uses ENN instead of GP.
60
+ - `turbo_zero_config()` - No surrogate
61
+ - `lhd_only_config()` - LHD design on every `ask()`. Good for a baseline and for testing.
62
+ The optimizer has an `ask()/tell()` interface. All `turbo_*()` methods follow TuRBO:
63
+ - Generate candidates with RAASP [3] sampling.
64
+ - Select a candidate with Thompson sampling (TuRBO-one), UCB (TuRBO-ENN), or randomly (TURBO-zero).
65
+
59
66
 
60
67
  [1] **Sweet, D., & Jadhav, S. A. (2025).** Taking the GP Out of the Loop. *arXiv preprint arXiv:2506.12818*.
61
68
  https://arxiv.org/abs/2506.12818