ennbo 0.1.0__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. enn/__init__.py +25 -13
  2. enn/benchmarks/__init__.py +3 -0
  3. enn/benchmarks/ackley.py +5 -0
  4. enn/benchmarks/ackley_class.py +17 -0
  5. enn/benchmarks/ackley_core.py +12 -0
  6. enn/benchmarks/double_ackley.py +24 -0
  7. enn/enn/candidates.py +14 -0
  8. enn/enn/conditional_posterior_draw_internals.py +15 -0
  9. enn/enn/draw_internals.py +15 -0
  10. enn/enn/enn.py +16 -229
  11. enn/enn/enn_class.py +423 -0
  12. enn/enn/enn_conditional.py +325 -0
  13. enn/enn/enn_fit.py +77 -76
  14. enn/enn/enn_hash.py +79 -0
  15. enn/enn/enn_index.py +92 -0
  16. enn/enn/enn_like_protocol.py +35 -0
  17. enn/enn/enn_normal.py +3 -3
  18. enn/enn/enn_params.py +3 -9
  19. enn/enn/enn_params_class.py +24 -0
  20. enn/enn/enn_util.py +79 -37
  21. enn/enn/neighbor_data.py +14 -0
  22. enn/enn/neighbors.py +14 -0
  23. enn/enn/posterior_flags.py +8 -0
  24. enn/enn/weighted_stats.py +14 -0
  25. enn/turbo/components/__init__.py +41 -0
  26. enn/turbo/components/acquisition.py +13 -0
  27. enn/turbo/components/acquisition_optimizer_protocol.py +19 -0
  28. enn/turbo/components/builder.py +22 -0
  29. enn/turbo/components/chebyshev_incumbent_selector.py +76 -0
  30. enn/turbo/components/enn_surrogate.py +115 -0
  31. enn/turbo/components/gp_surrogate.py +144 -0
  32. enn/turbo/components/hnr_acq_optimizer.py +83 -0
  33. enn/turbo/components/incumbent_selector.py +11 -0
  34. enn/turbo/components/incumbent_selector_protocol.py +16 -0
  35. enn/turbo/components/no_incumbent_selector.py +21 -0
  36. enn/turbo/components/no_surrogate.py +49 -0
  37. enn/turbo/components/pareto_acq_optimizer.py +49 -0
  38. enn/turbo/components/posterior_result.py +12 -0
  39. enn/turbo/components/protocols.py +13 -0
  40. enn/turbo/components/random_acq_optimizer.py +21 -0
  41. enn/turbo/components/scalar_incumbent_selector.py +39 -0
  42. enn/turbo/components/surrogate_protocol.py +32 -0
  43. enn/turbo/components/surrogate_result.py +12 -0
  44. enn/turbo/components/surrogates.py +5 -0
  45. enn/turbo/components/thompson_acq_optimizer.py +49 -0
  46. enn/turbo/components/trust_region_protocol.py +24 -0
  47. enn/turbo/components/ucb_acq_optimizer.py +49 -0
  48. enn/turbo/config/__init__.py +87 -0
  49. enn/turbo/config/acq_type.py +8 -0
  50. enn/turbo/config/acquisition.py +26 -0
  51. enn/turbo/config/base.py +4 -0
  52. enn/turbo/config/candidate_gen_config.py +49 -0
  53. enn/turbo/config/candidate_rv.py +7 -0
  54. enn/turbo/config/draw_acquisition_config.py +14 -0
  55. enn/turbo/config/enn_index_driver.py +6 -0
  56. enn/turbo/config/enn_surrogate_config.py +42 -0
  57. enn/turbo/config/enums.py +7 -0
  58. enn/turbo/config/factory.py +118 -0
  59. enn/turbo/config/gp_surrogate_config.py +14 -0
  60. enn/turbo/config/hnr_optimizer_config.py +7 -0
  61. enn/turbo/config/init_config.py +17 -0
  62. enn/turbo/config/init_strategies/__init__.py +9 -0
  63. enn/turbo/config/init_strategies/hybrid_init.py +23 -0
  64. enn/turbo/config/init_strategies/init_strategy.py +19 -0
  65. enn/turbo/config/init_strategies/lhd_only_init.py +24 -0
  66. enn/turbo/config/morbo_tr_config.py +82 -0
  67. enn/turbo/config/nds_optimizer_config.py +7 -0
  68. enn/turbo/config/no_surrogate_config.py +14 -0
  69. enn/turbo/config/no_tr_config.py +31 -0
  70. enn/turbo/config/optimizer_config.py +72 -0
  71. enn/turbo/config/pareto_acquisition_config.py +14 -0
  72. enn/turbo/config/raasp_driver.py +6 -0
  73. enn/turbo/config/raasp_optimizer_config.py +7 -0
  74. enn/turbo/config/random_acquisition_config.py +14 -0
  75. enn/turbo/config/rescalarize.py +7 -0
  76. enn/turbo/config/surrogate.py +12 -0
  77. enn/turbo/config/trust_region.py +34 -0
  78. enn/turbo/config/turbo_tr_config.py +71 -0
  79. enn/turbo/config/ucb_acquisition_config.py +14 -0
  80. enn/turbo/config/validation.py +45 -0
  81. enn/turbo/hypervolume.py +30 -0
  82. enn/turbo/impl_helpers.py +68 -0
  83. enn/turbo/morbo_trust_region.py +250 -0
  84. enn/turbo/no_trust_region.py +58 -0
  85. enn/turbo/optimizer.py +300 -0
  86. enn/turbo/optimizer_config.py +8 -0
  87. enn/turbo/proposal.py +46 -39
  88. enn/turbo/sampling.py +21 -0
  89. enn/turbo/strategies/__init__.py +9 -0
  90. enn/turbo/strategies/lhd_only_strategy.py +36 -0
  91. enn/turbo/strategies/optimization_strategy.py +19 -0
  92. enn/turbo/strategies/turbo_hybrid_strategy.py +124 -0
  93. enn/turbo/tr_helpers.py +202 -0
  94. enn/turbo/turbo_gp.py +9 -2
  95. enn/turbo/turbo_gp_base.py +0 -1
  96. enn/turbo/turbo_gp_fit.py +187 -0
  97. enn/turbo/turbo_gp_noisy.py +0 -1
  98. enn/turbo/turbo_optimizer_utils.py +98 -0
  99. enn/turbo/turbo_trust_region.py +129 -63
  100. enn/turbo/turbo_utils.py +144 -117
  101. enn/turbo/types/__init__.py +7 -0
  102. enn/turbo/types/appendable_array.py +85 -0
  103. enn/turbo/types/gp_data_prep.py +13 -0
  104. enn/turbo/types/gp_fit_result.py +11 -0
  105. enn/turbo/types/obs_lists.py +10 -0
  106. enn/turbo/types/prepare_ask_result.py +14 -0
  107. enn/turbo/types/tell_inputs.py +14 -0
  108. {ennbo-0.1.0.dist-info → ennbo-0.1.7.dist-info}/METADATA +22 -14
  109. ennbo-0.1.7.dist-info/RECORD +111 -0
  110. enn/enn/__init__.py +0 -4
  111. enn/turbo/__init__.py +0 -11
  112. enn/turbo/base_turbo_impl.py +0 -98
  113. enn/turbo/lhd_only_impl.py +0 -42
  114. enn/turbo/turbo_config.py +0 -28
  115. enn/turbo/turbo_enn_impl.py +0 -176
  116. enn/turbo/turbo_mode.py +0 -10
  117. enn/turbo/turbo_mode_impl.py +0 -67
  118. enn/turbo/turbo_one_impl.py +0 -163
  119. enn/turbo/turbo_optimizer.py +0 -337
  120. enn/turbo/turbo_zero_impl.py +0 -24
  121. ennbo-0.1.0.dist-info/RECORD +0 -27
  122. {ennbo-0.1.0.dist-info → ennbo-0.1.7.dist-info}/WHEEL +0 -0
  123. {ennbo-0.1.0.dist-info → ennbo-0.1.7.dist-info}/licenses/LICENSE +0 -0
@@ -1,55 +1,107 @@
1
1
  from __future__ import annotations
2
-
3
- from dataclasses import dataclass
2
+ from dataclasses import dataclass, field
4
3
  from typing import TYPE_CHECKING, Any
4
+ from .tr_helpers import ScalarIncumbentMixin
5
5
 
6
6
  if TYPE_CHECKING:
7
7
  import numpy as np
8
8
  from numpy.random import Generator
9
- from scipy.stats._qmc import QMCEngine
9
+ from .components.incumbent_selector import IncumbentSelector
10
+ from .config.turbo_tr_config import TurboTRConfig
10
11
 
11
12
 
12
13
  @dataclass
13
- class TurboTrustRegion:
14
+ class TurboTrustRegion(ScalarIncumbentMixin):
15
+ config: TurboTRConfig
14
16
  num_dim: int
15
- num_arms: int
16
- length: float = 0.8
17
- length_init: float = 0.8
18
- length_min: float = 0.5**7
19
- length_max: float = 1.6
17
+ length: float = field(init=False)
20
18
  failure_counter: int = 0
21
19
  success_counter: int = 0
22
20
  best_value: float = -float("inf")
23
21
  prev_num_obs: int = 0
22
+ incumbent_selector: IncumbentSelector = field(default=None, repr=False)
23
+ _num_arms: int | None = field(default=None, repr=False)
24
+ _failure_tolerance: int | None = field(default=None, repr=False)
24
25
 
25
26
  def __post_init__(self) -> None:
27
+ from .components.incumbent_selector import ScalarIncumbentSelector
28
+
29
+ self.length = self.config.length_init
30
+ self.success_tolerance = 3
31
+ if self.incumbent_selector is None:
32
+ self.incumbent_selector = ScalarIncumbentSelector(noise_aware=False)
33
+
34
+ @property
35
+ def length_init(self) -> float:
36
+ return self.config.length_init
37
+
38
+ @property
39
+ def length_min(self) -> float:
40
+ return self.config.length_min
41
+
42
+ @property
43
+ def length_max(self) -> float:
44
+ return self.config.length_max
45
+
46
+ @property
47
+ def num_metrics(self) -> int:
48
+ return 1
49
+
50
+ def _ensure_initialized(self, num_arms: int) -> None:
26
51
  import numpy as np
27
52
 
28
- self.failure_tolerance = int(
29
- np.ceil(
30
- max(
31
- 4.0 / float(self.num_arms),
32
- float(self.num_dim) / float(self.num_arms),
53
+ if self._num_arms is None:
54
+ self._num_arms = num_arms
55
+ self._failure_tolerance = int(
56
+ np.ceil(
57
+ max(
58
+ 4.0 / float(num_arms),
59
+ float(self.num_dim) / float(num_arms),
60
+ )
33
61
  )
34
62
  )
35
- )
36
- self.success_tolerance = 3
63
+ elif num_arms != self._num_arms:
64
+ raise ValueError(
65
+ f"num_arms changed from {self._num_arms} to {num_arms}; "
66
+ "must be consistent across ask() calls"
67
+ )
68
+
69
+ @property
70
+ def failure_tolerance(self) -> int:
71
+ if self._failure_tolerance is None:
72
+ raise RuntimeError("failure_tolerance not initialized; call ask() first")
73
+ return self._failure_tolerance
37
74
 
38
- def update(self, values: np.ndarray | Any) -> None:
75
+ def _coerce_y_obs_1d(self, y_obs: np.ndarray | Any) -> np.ndarray:
39
76
  import numpy as np
40
77
 
41
- if values.ndim != 1:
42
- raise ValueError(values.shape)
43
- if values.size == 0:
44
- return
45
- new_values = values[self.prev_num_obs :]
46
- if new_values.size == 0:
47
- return
48
- if not np.isfinite(self.best_value):
49
- self.best_value = float(np.max(new_values))
50
- self.prev_num_obs = values.size
51
- return
52
- improved = np.max(new_values) > self.best_value + 1e-3 * np.abs(self.best_value)
78
+ y_obs = np.asarray(y_obs, dtype=float)
79
+ if y_obs.ndim == 2:
80
+ if y_obs.shape[1] != 1:
81
+ raise ValueError(f"TurboTrustRegion expects m=1, got {y_obs.shape}")
82
+ return y_obs[:, 0]
83
+ if y_obs.ndim != 1:
84
+ raise ValueError(y_obs.shape)
85
+ return y_obs
86
+
87
+ def _coerce_y_incumbent_value(self, y_incumbent: np.ndarray | Any) -> float:
88
+ import numpy as np
89
+
90
+ y_incumbent = np.asarray(y_incumbent, dtype=float).reshape(-1)
91
+ if y_incumbent.shape != (self.num_metrics,):
92
+ raise ValueError(
93
+ f"y_incumbent must have shape ({self.num_metrics},), got {y_incumbent.shape}"
94
+ )
95
+ return float(y_incumbent[0])
96
+
97
+ def _improvement_scale(self, prev_values: np.ndarray) -> float:
98
+ import numpy as np
99
+
100
+ if prev_values.size == 0:
101
+ return 0.0
102
+ return float(np.max(prev_values) - np.min(prev_values))
103
+
104
+ def _update_counters_and_length(self, *, improved: bool) -> None:
53
105
  if improved:
54
106
  self.success_counter += 1
55
107
  self.failure_counter = 0
@@ -59,65 +111,79 @@ class TurboTrustRegion:
59
111
  if self.success_counter >= self.success_tolerance:
60
112
  self.length = min(2.0 * self.length, self.length_max)
61
113
  self.success_counter = 0
62
- elif self.failure_counter >= self.failure_tolerance:
114
+ elif (
115
+ self._failure_tolerance is not None
116
+ and self.failure_counter >= self._failure_tolerance
117
+ ):
63
118
  self.length = 0.5 * self.length
64
119
  self.failure_counter = 0
65
120
 
66
- self.best_value = max(self.best_value, float(np.max(new_values)))
67
- self.prev_num_obs = values.size
121
+ def update(self, y_obs: np.ndarray | Any, y_incumbent: np.ndarray | Any) -> None:
122
+ if self._failure_tolerance is None:
123
+ return
124
+ y_obs = self._coerce_y_obs_1d(y_obs)
125
+ n = int(y_obs.size)
126
+ if n <= 0:
127
+ return
128
+ if n < self.prev_num_obs:
129
+ raise ValueError((n, self.prev_num_obs))
130
+ if n == self.prev_num_obs:
131
+ return
132
+ y_incumbent_value = self._coerce_y_incumbent_value(y_incumbent)
133
+ import math
134
+
135
+ if not math.isfinite(self.best_value):
136
+ self.best_value = y_incumbent_value
137
+ self.prev_num_obs = n
138
+ return
139
+ prev_values = y_obs[: self.prev_num_obs]
140
+ scale = self._improvement_scale(prev_values)
141
+ improved = y_incumbent_value > self.best_value + 1e-3 * scale
142
+ self._update_counters_and_length(improved=improved)
143
+ self.best_value = max(self.best_value, y_incumbent_value)
144
+ self.prev_num_obs = n
68
145
 
69
146
  def needs_restart(self) -> bool:
70
147
  return self.length < self.length_min
71
148
 
72
- def restart(self) -> None:
149
+ def restart(self, rng: Any | None = None) -> None:
73
150
  self.length = self.length_init
74
151
  self.failure_counter = 0
75
152
  self.success_counter = 0
76
153
  self.best_value = -float("inf")
77
154
  self.prev_num_obs = 0
155
+ self._num_arms = None
156
+ self._failure_tolerance = None
78
157
 
79
158
  def validate_request(self, num_arms: int, *, is_fallback: bool = False) -> None:
80
- if is_fallback:
81
- if num_arms > self.num_arms:
82
- raise ValueError(
83
- f"num_arms {num_arms} > configured num_arms {self.num_arms}"
84
- )
85
- else:
86
- if num_arms != self.num_arms:
87
- raise ValueError(
88
- f"num_arms {num_arms} != configured num_arms {self.num_arms}"
89
- )
159
+ self._ensure_initialized(num_arms)
90
160
 
91
161
  def compute_bounds_1d(
92
- self, x_center: np.ndarray | Any, weights: np.ndarray | None = None
162
+ self, x_center: np.ndarray | Any, lengthscales: np.ndarray | None = None
93
163
  ) -> tuple[np.ndarray, np.ndarray]:
94
164
  import numpy as np
95
165
 
96
- if weights is None:
166
+ if lengthscales is None:
97
167
  half_length = 0.5 * self.length
98
168
  else:
99
- half_length = weights * self.length / 2.0
169
+ lengthscales = np.asarray(lengthscales, dtype=float).reshape(-1)
170
+ if lengthscales.shape != (self.num_dim,):
171
+ raise ValueError(
172
+ f"lengthscales must have shape ({self.num_dim},), got {lengthscales.shape}"
173
+ )
174
+ if not np.all(np.isfinite(lengthscales)):
175
+ raise ValueError("lengthscales must be finite")
176
+ half_length = lengthscales * self.length / 2.0
100
177
  lb = np.clip(x_center - half_length, 0.0, 1.0)
101
178
  ub = np.clip(x_center + half_length, 0.0, 1.0)
102
179
  return lb, ub
103
180
 
104
- def generate_candidates(
181
+ def get_incumbent_indices(
105
182
  self,
106
- x_center: np.ndarray,
107
- weights: np.ndarray | None,
108
- num_candidates: int,
183
+ y: np.ndarray | Any,
109
184
  rng: Generator,
110
- sobol_engine: QMCEngine,
185
+ mu: np.ndarray | None = None,
111
186
  ) -> np.ndarray:
112
- from .turbo_utils import raasp
113
-
114
- lb, ub = self.compute_bounds_1d(x_center, weights)
115
- return raasp(
116
- x_center,
117
- lb,
118
- ub,
119
- num_candidates,
120
- num_pert=20,
121
- rng=rng,
122
- sobol_engine=sobol_engine,
123
- )
187
+ import numpy as np
188
+
189
+ return np.array([self.get_incumbent_index(y, rng, mu=mu)])
enn/turbo/turbo_utils.py CHANGED
@@ -1,20 +1,35 @@
1
1
  from __future__ import annotations
2
-
3
2
  import contextlib
4
- from typing import TYPE_CHECKING, Any, Iterator
3
+ from dataclasses import dataclass
4
+ from typing import TYPE_CHECKING, Any, Callable, Iterator
5
+ import numpy as np
5
6
 
6
7
  if TYPE_CHECKING:
7
- import numpy as np
8
8
  import torch
9
- from gpytorch.likelihoods import GaussianLikelihood
10
9
  from numpy.random import Generator
11
10
  from scipy.stats._qmc import QMCEngine
11
+ __all__ = [
12
+ "Telemetry",
13
+ ]
14
+
12
15
 
13
- from .turbo_gp import TurboGP
14
- from .turbo_gp_noisy import TurboGPNoisy
16
+ @dataclass(frozen=True)
17
+ class Telemetry:
18
+ dt_fit: float
19
+ dt_sel: float
20
+ dt_gen: float = 0.0
21
+ dt_tell: float = 0.0
15
22
 
16
23
 
17
- from enn.enn.enn_util import standardize_y
24
+ @contextlib.contextmanager
25
+ def record_duration(set_dt: Callable[[float], None]) -> Iterator[None]:
26
+ import time
27
+
28
+ t0 = time.perf_counter()
29
+ try:
30
+ yield
31
+ finally:
32
+ set_dt(time.perf_counter() - t0)
18
33
 
19
34
 
20
35
  def _next_power_of_2(n: int) -> int:
@@ -24,110 +39,46 @@ def _next_power_of_2(n: int) -> int:
24
39
 
25
40
 
26
41
  @contextlib.contextmanager
27
- def torch_rng_context(generator: torch.Generator | Any) -> Iterator[None]:
42
+ def torch_seed_context(
43
+ seed: int, device: torch.device | Any | None = None
44
+ ) -> Iterator[None]:
28
45
  import torch
29
46
 
30
- old_state = torch.get_rng_state()
31
- try:
32
- torch.set_rng_state(generator.get_state())
47
+ devices: list[int] | None = None
48
+ if device is not None and getattr(device, "type", None) == "cuda":
49
+ idx = 0 if getattr(device, "index", None) is None else int(device.index)
50
+ devices = [idx]
51
+ with torch.random.fork_rng(devices=devices, enabled=True):
52
+ torch.manual_seed(int(seed))
53
+ if device is not None and getattr(device, "type", None) == "cuda":
54
+ torch.cuda.manual_seed_all(int(seed))
55
+ if device is not None and getattr(device, "type", None) == "mps":
56
+ if hasattr(torch, "mps") and hasattr(torch.mps, "manual_seed"):
57
+ torch.mps.manual_seed(int(seed))
33
58
  yield
34
- finally:
35
- torch.set_rng_state(old_state)
36
59
 
37
60
 
38
- def fit_gp(
39
- x_obs_list: list[float] | list[list[float]],
40
- y_obs_list: list[float] | list[list[float]],
41
- num_dim: int,
42
- *,
43
- yvar_obs_list: list[float] | None = None,
44
- num_steps: int = 50,
45
- ) -> tuple[
46
- "TurboGP | TurboGPNoisy | None",
47
- "GaussianLikelihood | None",
48
- float,
49
- float,
50
- ]:
51
- import numpy as np
52
- import torch
53
- from gpytorch.constraints import Interval
54
- from gpytorch.likelihoods import GaussianLikelihood
55
- from gpytorch.mlls import ExactMarginalLogLikelihood
56
-
57
- from .turbo_gp import TurboGP
58
- from .turbo_gp_noisy import TurboGPNoisy
59
-
60
- x = np.asarray(x_obs_list, dtype=float)
61
- y = np.asarray(y_obs_list, dtype=float)
62
- n = x.shape[0]
63
- if yvar_obs_list is not None:
64
- if len(yvar_obs_list) != len(y_obs_list):
65
- raise ValueError(
66
- f"yvar_obs_list length {len(yvar_obs_list)} != y_obs_list length {len(y_obs_list)}"
67
- )
68
- if n == 0:
69
- return None, None, 0.0, 1.0
70
- if n == 1:
71
- gp_y_mean = float(y[0])
72
- gp_y_std = 1.0
73
- return None, None, gp_y_mean, gp_y_std
74
- gp_y_mean, gp_y_std = standardize_y(y)
75
- y_centered = y - gp_y_mean
76
- z = y_centered / gp_y_std
77
- train_x = torch.as_tensor(x, dtype=torch.float64)
78
- train_y = torch.as_tensor(z, dtype=torch.float64)
79
- lengthscale_constraint = Interval(0.005, 2.0)
80
- outputscale_constraint = Interval(0.05, 20.0)
81
- if yvar_obs_list is not None:
82
- y_var = np.asarray(yvar_obs_list, dtype=float)
83
- train_y_var = torch.as_tensor(y_var / (gp_y_std**2), dtype=torch.float64)
84
- model = TurboGPNoisy(
85
- train_x=train_x,
86
- train_y=train_y,
87
- train_y_var=train_y_var,
88
- lengthscale_constraint=lengthscale_constraint,
89
- outputscale_constraint=outputscale_constraint,
90
- ard_dims=num_dim,
91
- ).to(dtype=train_x.dtype)
92
- likelihood = model.likelihood
93
- else:
94
- noise_constraint = Interval(5e-4, 0.2)
95
- likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(
96
- dtype=train_y.dtype
61
+ def get_gp_posterior_suppress_warning(model: Any, x_torch: Any) -> Any:
62
+ import warnings
63
+
64
+ try:
65
+ from gpytorch.utils.warnings import GPInputWarning
66
+ except Exception:
67
+ GPInputWarning = None
68
+ if GPInputWarning is None:
69
+ return model.posterior(x_torch)
70
+ with warnings.catch_warnings():
71
+ warnings.filterwarnings(
72
+ "ignore",
73
+ message=r"The input matches the stored training data\..*",
74
+ category=GPInputWarning,
97
75
  )
98
- model = TurboGP(
99
- train_x=train_x,
100
- train_y=train_y,
101
- likelihood=likelihood,
102
- lengthscale_constraint=lengthscale_constraint,
103
- outputscale_constraint=outputscale_constraint,
104
- ard_dims=num_dim,
105
- ).to(dtype=train_x.dtype)
106
- likelihood.noise = torch.tensor(0.005, dtype=train_y.dtype)
107
- model.covar_module.outputscale = torch.tensor(1.0, dtype=train_x.dtype)
108
- model.covar_module.base_kernel.lengthscale = torch.full(
109
- (num_dim,), 0.5, dtype=train_x.dtype
110
- )
111
- model.train()
112
- likelihood.train()
113
- mll = ExactMarginalLogLikelihood(likelihood, model)
114
- optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
115
- for _ in range(num_steps):
116
- optimizer.zero_grad()
117
- output = model(train_x)
118
- loss = -mll(output, train_y)
119
- loss.backward()
120
- optimizer.step()
121
- model.eval()
122
- likelihood.eval()
123
- return model, likelihood, gp_y_mean, gp_y_std
76
+ return model.posterior(x_torch)
124
77
 
125
78
 
126
79
  def latin_hypercube(
127
80
  num_points: int, num_dim: int, *, rng: Generator | Any
128
81
  ) -> np.ndarray:
129
- import numpy as np
130
-
131
82
  x = np.zeros((num_points, num_dim))
132
83
  centers = (1.0 + 2.0 * np.arange(0.0, num_points)) / float(2 * num_points)
133
84
  for j in range(num_dim):
@@ -138,8 +89,6 @@ def latin_hypercube(
138
89
 
139
90
 
140
91
  def argmax_random_tie(values: np.ndarray | Any, *, rng: Generator | Any) -> int:
141
- import numpy as np
142
-
143
92
  if values.ndim != 1:
144
93
  raise ValueError(values.shape)
145
94
  max_val = float(np.max(values))
@@ -161,8 +110,6 @@ def sobol_perturb_np(
161
110
  *,
162
111
  sobol_engine: QMCEngine | Any,
163
112
  ) -> np.ndarray:
164
- import numpy as np
165
-
166
113
  n_sobol = _next_power_of_2(num_candidates)
167
114
  sobol_samples = sobol_engine.random(n_sobol)[:num_candidates]
168
115
  lb_array = np.asarray(lb)
@@ -174,32 +121,114 @@ def sobol_perturb_np(
174
121
  return candidates
175
122
 
176
123
 
177
- def raasp(
124
+ def uniform_perturb_np(
178
125
  x_center: np.ndarray | Any,
179
126
  lb: np.ndarray | list[float] | Any,
180
127
  ub: np.ndarray | list[float] | Any,
181
128
  num_candidates: int,
129
+ mask: np.ndarray | Any,
182
130
  *,
183
- num_pert: int = 20,
184
131
  rng: Generator | Any,
185
- sobol_engine: QMCEngine | Any,
186
132
  ) -> np.ndarray:
187
- import numpy as np
133
+ lb_array = np.asarray(lb)
134
+ ub_array = np.asarray(ub)
135
+ pert = lb_array + (ub_array - lb_array) * rng.uniform(
136
+ 0.0, 1.0, size=(num_candidates, x_center.shape[-1])
137
+ )
138
+ candidates = np.tile(x_center, (num_candidates, 1))
139
+ if np.any(mask):
140
+ candidates[mask] = pert[mask]
141
+ return candidates
188
142
 
189
- num_dim = x_center.shape[-1]
143
+
144
+ def _raasp_mask(
145
+ *,
146
+ num_candidates: int,
147
+ num_dim: int,
148
+ num_pert: int,
149
+ rng: Generator | Any,
150
+ ) -> np.ndarray:
190
151
  prob_perturb = min(num_pert / num_dim, 1.0)
191
152
  mask = rng.random((num_candidates, num_dim)) <= prob_perturb
192
153
  ind = np.nonzero(~mask.any(axis=1))[0]
193
154
  if len(ind) > 0:
194
155
  mask[ind, rng.integers(0, num_dim, size=len(ind))] = True
156
+ return mask
157
+
158
+
159
+ def raasp(
160
+ x_center: np.ndarray | Any,
161
+ lb: np.ndarray | list[float] | Any,
162
+ ub: np.ndarray | list[float] | Any,
163
+ num_candidates: int,
164
+ *,
165
+ num_pert: int = 20,
166
+ rng: Generator | Any,
167
+ sobol_engine: QMCEngine | Any,
168
+ ) -> np.ndarray:
169
+ num_dim = x_center.shape[-1]
170
+ mask = _raasp_mask(
171
+ num_candidates=num_candidates, num_dim=num_dim, num_pert=num_pert, rng=rng
172
+ )
195
173
  return sobol_perturb_np(
196
174
  x_center, lb, ub, num_candidates, mask, sobol_engine=sobol_engine
197
175
  )
198
176
 
199
177
 
200
- def to_unit(x: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
201
- import numpy as np
178
+ def raasp_uniform(
179
+ x_center: np.ndarray | Any,
180
+ lb: np.ndarray | list[float] | Any,
181
+ ub: np.ndarray | list[float] | Any,
182
+ num_candidates: int,
183
+ *,
184
+ num_pert: int = 20,
185
+ rng: Generator | Any,
186
+ ) -> np.ndarray:
187
+ num_dim = x_center.shape[-1]
188
+ mask = _raasp_mask(
189
+ num_candidates=num_candidates, num_dim=num_dim, num_pert=num_pert, rng=rng
190
+ )
191
+ return uniform_perturb_np(x_center, lb, ub, num_candidates, mask, rng=rng)
192
+
193
+
194
+ def generate_raasp_candidates(
195
+ center: np.ndarray | Any,
196
+ lb: np.ndarray | list[float] | Any,
197
+ ub: np.ndarray | list[float] | Any,
198
+ num_candidates: int,
199
+ *,
200
+ rng: Generator | Any,
201
+ sobol_engine: QMCEngine | Any,
202
+ num_pert: int = 20,
203
+ ) -> np.ndarray:
204
+ if num_candidates <= 0:
205
+ raise ValueError(num_candidates)
206
+ return raasp(
207
+ center,
208
+ lb,
209
+ ub,
210
+ num_candidates,
211
+ num_pert=num_pert,
212
+ rng=rng,
213
+ sobol_engine=sobol_engine,
214
+ )
215
+
202
216
 
217
+ def generate_raasp_candidates_uniform(
218
+ center: np.ndarray | Any,
219
+ lb: np.ndarray | list[float] | Any,
220
+ ub: np.ndarray | list[float] | Any,
221
+ num_candidates: int,
222
+ *,
223
+ rng: Generator | Any,
224
+ num_pert: int = 20,
225
+ ) -> np.ndarray:
226
+ if num_candidates <= 0:
227
+ raise ValueError(num_candidates)
228
+ return raasp_uniform(center, lb, ub, num_candidates, num_pert=num_pert, rng=rng)
229
+
230
+
231
+ def to_unit(x: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
203
232
  lb = bounds[:, 0]
204
233
  ub = bounds[:, 1]
205
234
  if np.any(ub <= lb):
@@ -208,8 +237,6 @@ def to_unit(x: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
208
237
 
209
238
 
210
239
  def from_unit(x_unit: np.ndarray | Any, bounds: np.ndarray | Any) -> np.ndarray:
211
- import numpy as np
212
-
213
240
  lb = np.asarray(bounds[:, 0])
214
241
  ub = np.asarray(bounds[:, 1])
215
242
  return lb + x_unit * (ub - lb)
@@ -220,24 +247,24 @@ def gp_thompson_sample(
220
247
  x_cand: np.ndarray | Any,
221
248
  num_arms: int,
222
249
  rng: Generator | Any,
250
+ *,
223
251
  gp_y_mean: float,
224
252
  gp_y_std: float,
225
253
  ) -> np.ndarray:
226
254
  import gpytorch
227
- import numpy as np
228
255
  import torch
229
256
 
230
257
  x_torch = torch.as_tensor(x_cand, dtype=torch.float64)
231
258
  seed = int(rng.integers(2**31 - 1))
232
- gen = torch.Generator(device=x_torch.device)
233
- gen.manual_seed(seed)
234
259
  with (
235
260
  torch.no_grad(),
236
261
  gpytorch.settings.fast_pred_var(),
237
- torch_rng_context(gen),
262
+ torch_seed_context(seed, device=x_torch.device),
238
263
  ):
239
264
  posterior = model.posterior(x_torch)
240
265
  samples = posterior.sample(sample_shape=torch.Size([1]))
266
+ if samples.ndim != 2:
267
+ raise ValueError(samples.shape)
241
268
  ts = samples[0].reshape(-1)
242
269
  scores = ts.detach().cpu().numpy().reshape(-1)
243
270
  scores = gp_y_mean + gp_y_std * scores
@@ -0,0 +1,7 @@
1
+ from .gp_data_prep import GPDataPrep
2
+ from .gp_fit_result import GPFitResult
3
+ from .obs_lists import ObsLists
4
+ from .prepare_ask_result import PrepareAskResult
5
+ from .tell_inputs import TellInputs
6
+
7
+ __all__ = ["GPDataPrep", "GPFitResult", "ObsLists", "PrepareAskResult", "TellInputs"]