ennbo 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,337 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import TYPE_CHECKING, Any, Callable
5
+
6
+ from .proposal import select_uniform
7
+ from .turbo_config import TurboConfig
8
+ from .turbo_utils import from_unit, latin_hypercube, to_unit
9
+
10
+
11
+ @dataclass(frozen=True)
12
+ class Telemetry:
13
+ dt_fit: float
14
+ dt_sel: float
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ import numpy as np
19
+ from numpy.random import Generator
20
+
21
+ from .turbo_mode import TurboMode
22
+ from .turbo_mode_impl import TurboModeImpl
23
+
24
+
25
+ class TurboOptimizer:
26
+ def __init__(
27
+ self,
28
+ bounds: np.ndarray,
29
+ mode: TurboMode,
30
+ *,
31
+ rng: Generator,
32
+ config: TurboConfig | None = None,
33
+ ) -> None:
34
+ import numpy as np
35
+ from scipy.stats import qmc
36
+
37
+ from .turbo_mode import TurboMode
38
+
39
+ if config is None:
40
+ config = TurboConfig()
41
+ self._config = config
42
+
43
+ if bounds.ndim != 2 or bounds.shape[1] != 2:
44
+ raise ValueError(bounds.shape)
45
+ self._bounds = np.asarray(bounds, dtype=float)
46
+ self._num_dim = self._bounds.shape[0]
47
+ self._mode = mode
48
+ num_candidates = config.num_candidates
49
+ if num_candidates is None:
50
+ num_candidates = min(5000, 100 * self._num_dim)
51
+
52
+ self._num_candidates = int(num_candidates)
53
+ if self._num_candidates <= 0:
54
+ raise ValueError(self._num_candidates)
55
+ self._rng = rng
56
+ sobol_seed = int(self._rng.integers(1_000_000))
57
+ self._sobol_engine = qmc.Sobol(d=self._num_dim, scramble=True, seed=sobol_seed)
58
+ self._x_obs_list: list = []
59
+ self._y_obs_list: list = []
60
+ self._yvar_obs_list: list = []
61
+ match mode:
62
+ case TurboMode.TURBO_ONE:
63
+ from .turbo_one_impl import TurboOneImpl
64
+
65
+ self._mode_impl: TurboModeImpl = TurboOneImpl(config)
66
+ case TurboMode.TURBO_ZERO:
67
+ from .turbo_zero_impl import TurboZeroImpl
68
+
69
+ self._mode_impl = TurboZeroImpl(config)
70
+ case TurboMode.TURBO_ENN:
71
+ from .turbo_enn_impl import TurboENNImpl
72
+
73
+ self._mode_impl = TurboENNImpl(config)
74
+ case TurboMode.LHD_ONLY:
75
+ from .lhd_only_impl import LHDOnlyImpl
76
+
77
+ self._mode_impl = LHDOnlyImpl(config)
78
+ case _:
79
+ raise ValueError(f"Unknown mode: {mode}")
80
+ self._tr_state: Any | None = None
81
+ self._gp_num_steps: int = 50
82
+ if config.k is not None:
83
+ k_val = int(config.k)
84
+ if k_val < 3:
85
+ raise ValueError(f"k must be >= 3, got {k_val}")
86
+ self._k = k_val
87
+ else:
88
+ self._k = None
89
+ if config.trailing_obs is not None:
90
+ trailing_obs_val = int(config.trailing_obs)
91
+ if trailing_obs_val <= 0:
92
+ raise ValueError(f"trailing_obs must be > 0, got {trailing_obs_val}")
93
+ self._trailing_obs = trailing_obs_val
94
+ else:
95
+ self._trailing_obs = None
96
+ num_init = config.num_init
97
+ if num_init is None:
98
+ num_init = 2 * self._num_dim
99
+ num_init_val = int(num_init)
100
+ if num_init_val <= 0:
101
+ raise ValueError(f"num_init must be > 0, got {num_init_val}")
102
+ self._num_init = num_init_val
103
+ if config.local_only:
104
+ center = 0.5 * (self._bounds[:, 0] + self._bounds[:, 1])
105
+ self._init_lhd = center.reshape(1, -1)
106
+ self._num_init = 1
107
+ else:
108
+ self._init_lhd = from_unit(
109
+ latin_hypercube(self._num_init, self._num_dim, rng=self._rng),
110
+ self._bounds,
111
+ )
112
+ self._init_idx = 0
113
+ self._dt_fit: float = 0.0
114
+ self._dt_sel: float = 0.0
115
+ self._local_only = config.local_only
116
+
117
+ @property
118
+ def tr_obs_count(self) -> int:
119
+ return len(self._y_obs_list)
120
+
121
+ @property
122
+ def best_tr_value(self) -> float | None:
123
+ import numpy as np
124
+
125
+ if len(self._y_obs_list) == 0:
126
+ return None
127
+ return float(np.max(self._y_obs_list))
128
+
129
+ @property
130
+ def tr_length(self) -> float | None:
131
+ if self._tr_state is None:
132
+ return None
133
+ return float(self._tr_state.length)
134
+
135
+ def telemetry(self) -> Telemetry:
136
+ return Telemetry(dt_fit=self._dt_fit, dt_sel=self._dt_sel)
137
+
138
+ def ask(self, num_arms: int) -> np.ndarray:
139
+ num_arms = int(num_arms)
140
+ if num_arms <= 0:
141
+ raise ValueError(num_arms)
142
+ if self._tr_state is None:
143
+ self._tr_state = self._mode_impl.create_trust_region(
144
+ self._num_dim, num_arms
145
+ )
146
+ if self._local_only:
147
+ self._tr_state.length_max = 0.1
148
+ self._tr_state.length = min(self._tr_state.length, 0.1)
149
+ self._tr_state.length_init = min(self._tr_state.length_init, 0.1)
150
+ early_result = self._mode_impl.try_early_ask(
151
+ num_arms,
152
+ self._x_obs_list,
153
+ self._draw_initial,
154
+ self._get_init_lhd_points,
155
+ )
156
+ if early_result is not None:
157
+ self._dt_fit = 0.0
158
+ self._dt_sel = 0.0
159
+ return early_result
160
+ if self._init_idx < self._num_init:
161
+ if len(self._x_obs_list) == 0:
162
+ fallback_fn = None
163
+ else:
164
+
165
+ def fallback_fn(n: int) -> np.ndarray:
166
+ return self._ask_normal(n, is_fallback=True)
167
+
168
+ self._dt_fit = 0.0
169
+ self._dt_sel = 0.0
170
+ return self._get_init_lhd_points(num_arms, fallback_fn=fallback_fn)
171
+ if len(self._x_obs_list) == 0:
172
+ self._dt_fit = 0.0
173
+ self._dt_sel = 0.0
174
+ return self._draw_initial(num_arms)
175
+ return self._ask_normal(num_arms)
176
+
177
+ def _ask_normal(self, num_arms: int, *, is_fallback: bool = False) -> np.ndarray:
178
+ import numpy as np
179
+
180
+ if self._tr_state.needs_restart():
181
+ self._tr_state.restart()
182
+ should_reset_init, new_init_idx = self._mode_impl.handle_restart(
183
+ self._x_obs_list,
184
+ self._y_obs_list,
185
+ self._yvar_obs_list,
186
+ self._init_idx,
187
+ self._num_init,
188
+ )
189
+ if should_reset_init:
190
+ self._init_idx = new_init_idx
191
+ self._init_lhd = from_unit(
192
+ latin_hypercube(self._num_init, self._num_dim, rng=self._rng),
193
+ self._bounds,
194
+ )
195
+ return self._get_init_lhd_points(num_arms)
196
+
197
+ def from_unit_fn(x):
198
+ return from_unit(x, self._bounds)
199
+
200
+ if self._mode_impl.needs_tr_list() and len(self._x_obs_list) == 0:
201
+ return self._get_init_lhd_points(num_arms)
202
+
203
+ import time
204
+
205
+ t0_fit = time.perf_counter()
206
+ _gp_model, _gp_y_mean_fitted, _gp_y_std_fitted, weights = (
207
+ self._mode_impl.prepare_ask(
208
+ self._x_obs_list,
209
+ self._y_obs_list,
210
+ self._yvar_obs_list,
211
+ self._num_dim,
212
+ self._gp_num_steps,
213
+ rng=self._rng,
214
+ )
215
+ )
216
+ self._dt_fit = time.perf_counter() - t0_fit
217
+
218
+ x_center = self._mode_impl.get_x_center(
219
+ self._x_obs_list, self._y_obs_list, self._rng
220
+ )
221
+ if x_center is None:
222
+ if len(self._y_obs_list) == 0:
223
+ raise RuntimeError("no observations")
224
+ x_center = np.full(self._num_dim, 0.5)
225
+
226
+ x_cand = self._tr_state.generate_candidates(
227
+ x_center,
228
+ weights,
229
+ self._num_candidates,
230
+ self._rng,
231
+ self._sobol_engine,
232
+ )
233
+
234
+ def fallback_fn(x, n):
235
+ return select_uniform(x, n, self._num_dim, self._rng, from_unit_fn)
236
+
237
+ self._tr_state.validate_request(num_arms, is_fallback=is_fallback)
238
+
239
+ t0_sel = time.perf_counter()
240
+ selected = self._mode_impl.select_candidates(
241
+ x_cand,
242
+ num_arms,
243
+ self._num_dim,
244
+ self._rng,
245
+ fallback_fn,
246
+ from_unit_fn,
247
+ )
248
+ self._dt_sel = time.perf_counter() - t0_sel
249
+
250
+ self._mode_impl.update_trust_region(
251
+ self._tr_state, self._y_obs_list, x_center=x_center, k=self._k
252
+ )
253
+ return selected
254
+
255
+ def _trim_trailing_obs(self) -> None:
256
+ import numpy as np
257
+
258
+ from .turbo_utils import argmax_random_tie
259
+
260
+ if len(self._x_obs_list) <= self._trailing_obs:
261
+ return
262
+ y_array = np.asarray(self._y_obs_list, dtype=float)
263
+ incumbent_idx = argmax_random_tie(y_array, rng=self._rng)
264
+ num_total = len(self._x_obs_list)
265
+ start_idx = max(0, num_total - self._trailing_obs)
266
+ if incumbent_idx < start_idx:
267
+ indices = np.array(
268
+ [incumbent_idx]
269
+ + list(range(num_total - (self._trailing_obs - 1), num_total)),
270
+ dtype=int,
271
+ )
272
+ else:
273
+ indices = np.arange(start_idx, num_total, dtype=int)
274
+ if incumbent_idx not in indices:
275
+ raise RuntimeError("Incumbent must be included in trimmed list")
276
+ x_array = np.asarray(self._x_obs_list, dtype=float)
277
+ incumbent_value = y_array[incumbent_idx]
278
+ self._x_obs_list = x_array[indices].tolist()
279
+ self._y_obs_list = y_array[indices].tolist()
280
+ if len(self._yvar_obs_list) == len(y_array):
281
+ yvar_array = np.asarray(self._yvar_obs_list, dtype=float)
282
+ self._yvar_obs_list = yvar_array[indices].tolist()
283
+ y_trimmed = np.asarray(self._y_obs_list, dtype=float)
284
+ if not np.any(np.abs(y_trimmed - incumbent_value) < 1e-10):
285
+ raise RuntimeError("Incumbent value must be preserved in trimmed list")
286
+
287
+ def tell(
288
+ self,
289
+ x: np.ndarray | Any,
290
+ y: np.ndarray | Any,
291
+ y_var: np.ndarray | Any | None = None,
292
+ ) -> np.ndarray:
293
+ import numpy as np
294
+
295
+ x = np.asarray(x, dtype=float)
296
+ y = np.asarray(y, dtype=float)
297
+ if x.ndim != 2 or x.shape[1] != self._num_dim:
298
+ raise ValueError(x.shape)
299
+ if y.ndim != 1 or y.shape[0] != x.shape[0]:
300
+ raise ValueError((x.shape, y.shape))
301
+ if y_var is not None:
302
+ y_var = np.asarray(y_var, dtype=float)
303
+ if y_var.shape != y.shape:
304
+ raise ValueError((y.shape, y_var.shape))
305
+ if x.shape[0] == 0:
306
+ return np.array([], dtype=float)
307
+ x_unit = to_unit(x, self._bounds)
308
+ y_estimate = self._mode_impl.estimate_y(x_unit, y)
309
+ self._x_obs_list.extend(x_unit.tolist())
310
+ self._y_obs_list.extend(y.tolist())
311
+ if y_var is not None:
312
+ self._yvar_obs_list.extend(y_var.tolist())
313
+ if self._trailing_obs is not None:
314
+ self._trim_trailing_obs()
315
+ self._mode_impl.update_trust_region(self._tr_state, self._y_obs_list)
316
+ return y_estimate
317
+
318
+ def _draw_initial(self, num_arms: int) -> np.ndarray:
319
+ unit = latin_hypercube(num_arms, self._num_dim, rng=self._rng)
320
+ return from_unit(unit, self._bounds)
321
+
322
+ def _get_init_lhd_points(
323
+ self, num_arms: int, fallback_fn: Callable[[int], np.ndarray] | None = None
324
+ ) -> np.ndarray:
325
+ import numpy as np
326
+
327
+ remaining_init = self._num_init - self._init_idx
328
+ num_to_return = min(num_arms, remaining_init)
329
+ result = self._init_lhd[self._init_idx : self._init_idx + num_to_return]
330
+ self._init_idx += num_to_return
331
+ if num_to_return < num_arms:
332
+ num_remaining = num_arms - num_to_return
333
+ if fallback_fn is not None:
334
+ result = np.vstack([result, fallback_fn(num_remaining)])
335
+ else:
336
+ result = np.vstack([result, self._draw_initial(num_remaining)])
337
+ return result
@@ -0,0 +1,123 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import TYPE_CHECKING, Any
5
+
6
+ if TYPE_CHECKING:
7
+ import numpy as np
8
+ from numpy.random import Generator
9
+ from scipy.stats._qmc import QMCEngine
10
+
11
+
12
+ @dataclass
13
+ class TurboTrustRegion:
14
+ num_dim: int
15
+ num_arms: int
16
+ length: float = 0.8
17
+ length_init: float = 0.8
18
+ length_min: float = 0.5**7
19
+ length_max: float = 1.6
20
+ failure_counter: int = 0
21
+ success_counter: int = 0
22
+ best_value: float = -float("inf")
23
+ prev_num_obs: int = 0
24
+
25
+ def __post_init__(self) -> None:
26
+ import numpy as np
27
+
28
+ self.failure_tolerance = int(
29
+ np.ceil(
30
+ max(
31
+ 4.0 / float(self.num_arms),
32
+ float(self.num_dim) / float(self.num_arms),
33
+ )
34
+ )
35
+ )
36
+ self.success_tolerance = 3
37
+
38
+ def update(self, values: np.ndarray | Any) -> None:
39
+ import numpy as np
40
+
41
+ if values.ndim != 1:
42
+ raise ValueError(values.shape)
43
+ if values.size == 0:
44
+ return
45
+ new_values = values[self.prev_num_obs :]
46
+ if new_values.size == 0:
47
+ return
48
+ if not np.isfinite(self.best_value):
49
+ self.best_value = float(np.max(new_values))
50
+ self.prev_num_obs = values.size
51
+ return
52
+ improved = np.max(new_values) > self.best_value + 1e-3 * np.abs(self.best_value)
53
+ if improved:
54
+ self.success_counter += 1
55
+ self.failure_counter = 0
56
+ else:
57
+ self.success_counter = 0
58
+ self.failure_counter += 1
59
+ if self.success_counter >= self.success_tolerance:
60
+ self.length = min(2.0 * self.length, self.length_max)
61
+ self.success_counter = 0
62
+ elif self.failure_counter >= self.failure_tolerance:
63
+ self.length = 0.5 * self.length
64
+ self.failure_counter = 0
65
+
66
+ self.best_value = max(self.best_value, float(np.max(new_values)))
67
+ self.prev_num_obs = values.size
68
+
69
+ def needs_restart(self) -> bool:
70
+ return self.length < self.length_min
71
+
72
+ def restart(self) -> None:
73
+ self.length = self.length_init
74
+ self.failure_counter = 0
75
+ self.success_counter = 0
76
+ self.best_value = -float("inf")
77
+ self.prev_num_obs = 0
78
+
79
+ def validate_request(self, num_arms: int, *, is_fallback: bool = False) -> None:
80
+ if is_fallback:
81
+ if num_arms > self.num_arms:
82
+ raise ValueError(
83
+ f"num_arms {num_arms} > configured num_arms {self.num_arms}"
84
+ )
85
+ else:
86
+ if num_arms != self.num_arms:
87
+ raise ValueError(
88
+ f"num_arms {num_arms} != configured num_arms {self.num_arms}"
89
+ )
90
+
91
+ def compute_bounds_1d(
92
+ self, x_center: np.ndarray | Any, weights: np.ndarray | None = None
93
+ ) -> tuple[np.ndarray, np.ndarray]:
94
+ import numpy as np
95
+
96
+ if weights is None:
97
+ half_length = 0.5 * self.length
98
+ else:
99
+ half_length = weights * self.length / 2.0
100
+ lb = np.clip(x_center - half_length, 0.0, 1.0)
101
+ ub = np.clip(x_center + half_length, 0.0, 1.0)
102
+ return lb, ub
103
+
104
+ def generate_candidates(
105
+ self,
106
+ x_center: np.ndarray,
107
+ weights: np.ndarray | None,
108
+ num_candidates: int,
109
+ rng: Generator,
110
+ sobol_engine: QMCEngine,
111
+ ) -> np.ndarray:
112
+ from .turbo_utils import raasp
113
+
114
+ lb, ub = self.compute_bounds_1d(x_center, weights)
115
+ return raasp(
116
+ x_center,
117
+ lb,
118
+ ub,
119
+ num_candidates,
120
+ num_pert=20,
121
+ rng=rng,
122
+ sobol_engine=sobol_engine,
123
+ )
@@ -0,0 +1,248 @@
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ from typing import TYPE_CHECKING, Any, Iterator
5
+
6
+ if TYPE_CHECKING:
7
+ import numpy as np
8
+ import torch
9
+ from gpytorch.likelihoods import GaussianLikelihood
10
+ from numpy.random import Generator
11
+ from scipy.stats._qmc import QMCEngine
12
+
13
+ from .turbo_gp import TurboGP
14
+ from .turbo_gp_noisy import TurboGPNoisy
15
+
16
+
17
+ from enn.enn.enn_util import standardize_y
18
+
19
+
20
+ def _next_power_of_2(n: int) -> int:
21
+ if n <= 0:
22
+ return 1
23
+ return 1 << (n - 1).bit_length()
24
+
25
+
26
+ @contextlib.contextmanager
27
+ def torch_rng_context(generator: torch.Generator | Any) -> Iterator[None]:
28
+ import torch
29
+
30
+ old_state = torch.get_rng_state()
31
+ try:
32
+ torch.set_rng_state(generator.get_state())
33
+ yield
34
+ finally:
35
+ torch.set_rng_state(old_state)
36
+
37
+
38
+ def fit_gp(
39
+ x_obs_list: list[float] | list[list[float]],
40
+ y_obs_list: list[float] | list[list[float]],
41
+ num_dim: int,
42
+ *,
43
+ yvar_obs_list: list[float] | None = None,
44
+ num_steps: int = 50,
45
+ ) -> tuple[
46
+ "TurboGP | TurboGPNoisy | None",
47
+ "GaussianLikelihood | None",
48
+ float,
49
+ float,
50
+ ]:
51
+ import numpy as np
52
+ import torch
53
+ from gpytorch.constraints import Interval
54
+ from gpytorch.likelihoods import GaussianLikelihood
55
+ from gpytorch.mlls import ExactMarginalLogLikelihood
56
+
57
+ from .turbo_gp import TurboGP
58
+ from .turbo_gp_noisy import TurboGPNoisy
59
+
60
+ x = np.asarray(x_obs_list, dtype=float)
61
+ y = np.asarray(y_obs_list, dtype=float)
62
+ n = x.shape[0]
63
+ if yvar_obs_list is not None:
64
+ if len(yvar_obs_list) != len(y_obs_list):
65
+ raise ValueError(
66
+ f"yvar_obs_list length {len(yvar_obs_list)} != y_obs_list length {len(y_obs_list)}"
67
+ )
68
+ if n == 0:
69
+ return None, None, 0.0, 1.0
70
+ if n == 1:
71
+ gp_y_mean = float(y[0])
72
+ gp_y_std = 1.0
73
+ return None, None, gp_y_mean, gp_y_std
74
+ gp_y_mean, gp_y_std = standardize_y(y)
75
+ y_centered = y - gp_y_mean
76
+ z = y_centered / gp_y_std
77
+ train_x = torch.as_tensor(x, dtype=torch.float64)
78
+ train_y = torch.as_tensor(z, dtype=torch.float64)
79
+ lengthscale_constraint = Interval(0.005, 2.0)
80
+ outputscale_constraint = Interval(0.05, 20.0)
81
+ if yvar_obs_list is not None:
82
+ y_var = np.asarray(yvar_obs_list, dtype=float)
83
+ train_y_var = torch.as_tensor(y_var / (gp_y_std**2), dtype=torch.float64)
84
+ model = TurboGPNoisy(
85
+ train_x=train_x,
86
+ train_y=train_y,
87
+ train_y_var=train_y_var,
88
+ lengthscale_constraint=lengthscale_constraint,
89
+ outputscale_constraint=outputscale_constraint,
90
+ ard_dims=num_dim,
91
+ ).to(dtype=train_x.dtype)
92
+ likelihood = model.likelihood
93
+ else:
94
+ noise_constraint = Interval(5e-4, 0.2)
95
+ likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(
96
+ dtype=train_y.dtype
97
+ )
98
+ model = TurboGP(
99
+ train_x=train_x,
100
+ train_y=train_y,
101
+ likelihood=likelihood,
102
+ lengthscale_constraint=lengthscale_constraint,
103
+ outputscale_constraint=outputscale_constraint,
104
+ ard_dims=num_dim,
105
+ ).to(dtype=train_x.dtype)
106
+ likelihood.noise = torch.tensor(0.005, dtype=train_y.dtype)
107
+ model.covar_module.outputscale = torch.tensor(1.0, dtype=train_x.dtype)
108
+ model.covar_module.base_kernel.lengthscale = torch.full(
109
+ (num_dim,), 0.5, dtype=train_x.dtype
110
+ )
111
+ model.train()
112
+ likelihood.train()
113
+ mll = ExactMarginalLogLikelihood(likelihood, model)
114
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
115
+ for _ in range(num_steps):
116
+ optimizer.zero_grad()
117
+ output = model(train_x)
118
+ loss = -mll(output, train_y)
119
+ loss.backward()
120
+ optimizer.step()
121
+ model.eval()
122
+ likelihood.eval()
123
+ return model, likelihood, gp_y_mean, gp_y_std
124
+
125
+
126
+ def latin_hypercube(
127
+ num_points: int, num_dim: int, *, rng: Generator | Any
128
+ ) -> np.ndarray:
129
+ import numpy as np
130
+
131
+ x = np.zeros((num_points, num_dim))
132
+ centers = (1.0 + 2.0 * np.arange(0.0, num_points)) / float(2 * num_points)
133
+ for j in range(num_dim):
134
+ x[:, j] = centers[rng.permutation(num_points)]
135
+ pert = rng.uniform(-1.0, 1.0, size=(num_points, num_dim)) / float(2 * num_points)
136
+ x += pert
137
+ return x
138
+
139
+
140
+ def argmax_random_tie(values: np.ndarray | Any, *, rng: Generator | Any) -> int:
141
+ import numpy as np
142
+
143
+ if values.ndim != 1:
144
+ raise ValueError(values.shape)
145
+ max_val = float(np.max(values))
146
+ idx = np.nonzero(values >= max_val)[0]
147
+ if idx.size == 0:
148
+ return int(rng.integers(values.size))
149
+ if idx.size == 1:
150
+ return int(idx[0])
151
+ j = int(rng.integers(idx.size))
152
+ return int(idx[j])
153
+
154
+
155
+ def sobol_perturb_np(
156
+ x_center: np.ndarray | Any,
157
+ lb: np.ndarray | list[float] | Any,
158
+ ub: np.ndarray | list[float] | Any,
159
+ num_candidates: int,
160
+ mask: np.ndarray | Any,
161
+ *,
162
+ sobol_engine: QMCEngine | Any,
163
+ ) -> np.ndarray:
164
+ import numpy as np
165
+
166
+ n_sobol = _next_power_of_2(num_candidates)
167
+ sobol_samples = sobol_engine.random(n_sobol)[:num_candidates]
168
+ lb_array = np.asarray(lb)
169
+ ub_array = np.asarray(ub)
170
+ pert = lb_array + (ub_array - lb_array) * sobol_samples
171
+ candidates = np.tile(x_center, (num_candidates, 1))
172
+ if np.any(mask):
173
+ candidates[mask] = pert[mask]
174
+ return candidates
175
+
176
+
177
+ def raasp(
178
+ x_center: np.ndarray | Any,
179
+ lb: np.ndarray | list[float] | Any,
180
+ ub: np.ndarray | list[float] | Any,
181
+ num_candidates: int,
182
+ *,
183
+ num_pert: int = 20,
184
+ rng: Generator | Any,
185
+ sobol_engine: QMCEngine | Any,
186
+ ) -> np.ndarray:
187
+ import numpy as np
188
+
189
+ num_dim = x_center.shape[-1]
190
+ prob_perturb = min(num_pert / num_dim, 1.0)
191
+ mask = rng.random((num_candidates, num_dim)) <= prob_perturb
192
+ ind = np.nonzero(~mask.any(axis=1))[0]
193
+ if len(ind) > 0:
194
+ mask[ind, rng.integers(0, num_dim, size=len(ind))] = True
195
+ return sobol_perturb_np(
196
+ x_center, lb, ub, num_candidates, mask, sobol_engine=sobol_engine
197
+ )
198
+
199
+
200
+ def to_unit(x: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
201
+ import numpy as np
202
+
203
+ lb = bounds[:, 0]
204
+ ub = bounds[:, 1]
205
+ if np.any(ub <= lb):
206
+ raise ValueError(bounds)
207
+ return (x - lb) / (ub - lb)
208
+
209
+
210
+ def from_unit(x_unit: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
211
+ import numpy as np
212
+
213
+ lb = np.asarray(bounds[:, 0])
214
+ ub = np.asarray(bounds[:, 1])
215
+ return lb + x_unit * (ub - lb)
216
+
217
+
218
+ def gp_thompson_sample(
219
+ model: Any,
220
+ x_cand: np.ndarray | Any,
221
+ num_arms: int,
222
+ rng: Generator | Any,
223
+ gp_y_mean: float,
224
+ gp_y_std: float,
225
+ ) -> np.ndarray:
226
+ import gpytorch
227
+ import numpy as np
228
+ import torch
229
+
230
+ x_torch = torch.as_tensor(x_cand, dtype=torch.float64)
231
+ seed = int(rng.integers(2**31 - 1))
232
+ gen = torch.Generator(device=x_torch.device)
233
+ gen.manual_seed(seed)
234
+ with (
235
+ torch.no_grad(),
236
+ gpytorch.settings.fast_pred_var(),
237
+ torch_rng_context(gen),
238
+ ):
239
+ posterior = model.posterior(x_torch)
240
+ samples = posterior.sample(sample_shape=torch.Size([1]))
241
+ ts = samples[0].reshape(-1)
242
+ scores = ts.detach().cpu().numpy().reshape(-1)
243
+ scores = gp_y_mean + gp_y_std * scores
244
+ shuffled_indices = rng.permutation(len(scores))
245
+ shuffled_scores = scores[shuffled_indices]
246
+ top_k_in_shuffled = np.argpartition(-shuffled_scores, num_arms - 1)[:num_arms]
247
+ idx = shuffled_indices[top_k_in_shuffled]
248
+ return idx