ennbo 0.1.2__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- enn/__init__.py +25 -13
- enn/benchmarks/__init__.py +3 -0
- enn/benchmarks/ackley.py +5 -0
- enn/benchmarks/ackley_class.py +17 -0
- enn/benchmarks/ackley_core.py +12 -0
- enn/benchmarks/double_ackley.py +24 -0
- enn/enn/candidates.py +14 -0
- enn/enn/conditional_posterior_draw_internals.py +15 -0
- enn/enn/draw_internals.py +15 -0
- enn/enn/enn.py +16 -269
- enn/enn/enn_class.py +423 -0
- enn/enn/enn_conditional.py +325 -0
- enn/enn/enn_fit.py +69 -70
- enn/enn/enn_hash.py +79 -0
- enn/enn/enn_index.py +92 -0
- enn/enn/enn_like_protocol.py +35 -0
- enn/enn/enn_normal.py +0 -1
- enn/enn/enn_params.py +3 -22
- enn/enn/enn_params_class.py +24 -0
- enn/enn/enn_util.py +60 -46
- enn/enn/neighbor_data.py +14 -0
- enn/enn/neighbors.py +14 -0
- enn/enn/posterior_flags.py +8 -0
- enn/enn/weighted_stats.py +14 -0
- enn/turbo/components/__init__.py +41 -0
- enn/turbo/components/acquisition.py +13 -0
- enn/turbo/components/acquisition_optimizer_protocol.py +19 -0
- enn/turbo/components/builder.py +22 -0
- enn/turbo/components/chebyshev_incumbent_selector.py +76 -0
- enn/turbo/components/enn_surrogate.py +115 -0
- enn/turbo/components/gp_surrogate.py +144 -0
- enn/turbo/components/hnr_acq_optimizer.py +83 -0
- enn/turbo/components/incumbent_selector.py +11 -0
- enn/turbo/components/incumbent_selector_protocol.py +16 -0
- enn/turbo/components/no_incumbent_selector.py +21 -0
- enn/turbo/components/no_surrogate.py +49 -0
- enn/turbo/components/pareto_acq_optimizer.py +49 -0
- enn/turbo/components/posterior_result.py +12 -0
- enn/turbo/components/protocols.py +13 -0
- enn/turbo/components/random_acq_optimizer.py +21 -0
- enn/turbo/components/scalar_incumbent_selector.py +39 -0
- enn/turbo/components/surrogate_protocol.py +32 -0
- enn/turbo/components/surrogate_result.py +12 -0
- enn/turbo/components/surrogates.py +5 -0
- enn/turbo/components/thompson_acq_optimizer.py +49 -0
- enn/turbo/components/trust_region_protocol.py +24 -0
- enn/turbo/components/ucb_acq_optimizer.py +49 -0
- enn/turbo/config/__init__.py +87 -0
- enn/turbo/config/acq_type.py +8 -0
- enn/turbo/config/acquisition.py +26 -0
- enn/turbo/config/base.py +4 -0
- enn/turbo/config/candidate_gen_config.py +49 -0
- enn/turbo/config/candidate_rv.py +7 -0
- enn/turbo/config/draw_acquisition_config.py +14 -0
- enn/turbo/config/enn_index_driver.py +6 -0
- enn/turbo/config/enn_surrogate_config.py +42 -0
- enn/turbo/config/enums.py +7 -0
- enn/turbo/config/factory.py +118 -0
- enn/turbo/config/gp_surrogate_config.py +14 -0
- enn/turbo/config/hnr_optimizer_config.py +7 -0
- enn/turbo/config/init_config.py +17 -0
- enn/turbo/config/init_strategies/__init__.py +9 -0
- enn/turbo/config/init_strategies/hybrid_init.py +23 -0
- enn/turbo/config/init_strategies/init_strategy.py +19 -0
- enn/turbo/config/init_strategies/lhd_only_init.py +24 -0
- enn/turbo/config/morbo_tr_config.py +82 -0
- enn/turbo/config/nds_optimizer_config.py +7 -0
- enn/turbo/config/no_surrogate_config.py +14 -0
- enn/turbo/config/no_tr_config.py +31 -0
- enn/turbo/config/optimizer_config.py +72 -0
- enn/turbo/config/pareto_acquisition_config.py +14 -0
- enn/turbo/config/raasp_driver.py +6 -0
- enn/turbo/config/raasp_optimizer_config.py +7 -0
- enn/turbo/config/random_acquisition_config.py +14 -0
- enn/turbo/config/rescalarize.py +7 -0
- enn/turbo/config/surrogate.py +12 -0
- enn/turbo/config/trust_region.py +34 -0
- enn/turbo/config/turbo_tr_config.py +71 -0
- enn/turbo/config/ucb_acquisition_config.py +14 -0
- enn/turbo/config/validation.py +45 -0
- enn/turbo/hypervolume.py +30 -0
- enn/turbo/impl_helpers.py +68 -0
- enn/turbo/morbo_trust_region.py +131 -70
- enn/turbo/no_trust_region.py +32 -39
- enn/turbo/optimizer.py +300 -0
- enn/turbo/optimizer_config.py +8 -0
- enn/turbo/proposal.py +36 -38
- enn/turbo/sampling.py +21 -0
- enn/turbo/strategies/__init__.py +9 -0
- enn/turbo/strategies/lhd_only_strategy.py +36 -0
- enn/turbo/strategies/optimization_strategy.py +19 -0
- enn/turbo/strategies/turbo_hybrid_strategy.py +124 -0
- enn/turbo/tr_helpers.py +202 -0
- enn/turbo/turbo_gp.py +0 -1
- enn/turbo/turbo_gp_base.py +0 -1
- enn/turbo/turbo_gp_fit.py +187 -0
- enn/turbo/turbo_gp_noisy.py +0 -1
- enn/turbo/turbo_optimizer_utils.py +98 -0
- enn/turbo/turbo_trust_region.py +126 -58
- enn/turbo/turbo_utils.py +98 -161
- enn/turbo/types/__init__.py +7 -0
- enn/turbo/types/appendable_array.py +85 -0
- enn/turbo/types/gp_data_prep.py +13 -0
- enn/turbo/types/gp_fit_result.py +11 -0
- enn/turbo/types/obs_lists.py +10 -0
- enn/turbo/types/prepare_ask_result.py +14 -0
- enn/turbo/types/tell_inputs.py +14 -0
- {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/METADATA +18 -11
- ennbo-0.1.7.dist-info/RECORD +111 -0
- enn/enn/__init__.py +0 -4
- enn/turbo/__init__.py +0 -11
- enn/turbo/base_turbo_impl.py +0 -144
- enn/turbo/lhd_only_impl.py +0 -49
- enn/turbo/turbo_config.py +0 -72
- enn/turbo/turbo_enn_impl.py +0 -201
- enn/turbo/turbo_mode.py +0 -10
- enn/turbo/turbo_mode_impl.py +0 -76
- enn/turbo/turbo_one_impl.py +0 -302
- enn/turbo/turbo_optimizer.py +0 -525
- enn/turbo/turbo_zero_impl.py +0 -29
- ennbo-0.1.2.dist-info/RECORD +0 -29
- {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/WHEEL +0 -0
- {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
import numpy as np
|
|
4
|
+
from .posterior_result import PosteriorResult
|
|
5
|
+
from .surrogate_result import SurrogateResult
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from numpy.random import Generator
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GPSurrogate:
|
|
12
|
+
def __init__(self) -> None:
|
|
13
|
+
self._model: Any | None = None
|
|
14
|
+
self._y_mean: float | Any = 0.0
|
|
15
|
+
self._y_std: float | Any = 1.0
|
|
16
|
+
self._lengthscales: np.ndarray | None = None
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def lengthscales(self) -> np.ndarray | None:
|
|
20
|
+
return self._lengthscales
|
|
21
|
+
|
|
22
|
+
def fit(
|
|
23
|
+
self,
|
|
24
|
+
x_obs: np.ndarray,
|
|
25
|
+
y_obs: np.ndarray,
|
|
26
|
+
y_var: np.ndarray | None = None,
|
|
27
|
+
*,
|
|
28
|
+
num_steps: int = 0,
|
|
29
|
+
rng: Generator | None = None,
|
|
30
|
+
) -> SurrogateResult:
|
|
31
|
+
from ..turbo_gp_fit import fit_gp
|
|
32
|
+
|
|
33
|
+
x_obs = np.asarray(x_obs, dtype=float)
|
|
34
|
+
y_obs = np.asarray(y_obs, dtype=float)
|
|
35
|
+
num_dim = x_obs.shape[1]
|
|
36
|
+
if y_obs.ndim == 2 and y_obs.shape[1] == 1:
|
|
37
|
+
y_obs = y_obs.ravel()
|
|
38
|
+
gp_result = fit_gp(
|
|
39
|
+
x_obs.tolist(),
|
|
40
|
+
y_obs.tolist(),
|
|
41
|
+
num_dim,
|
|
42
|
+
yvar_obs_list=y_var.ravel().tolist() if y_var is not None else None,
|
|
43
|
+
num_steps=num_steps,
|
|
44
|
+
)
|
|
45
|
+
self._model = gp_result.model
|
|
46
|
+
if gp_result.y_mean is not None:
|
|
47
|
+
self._y_mean = gp_result.y_mean
|
|
48
|
+
if gp_result.y_std is not None:
|
|
49
|
+
self._y_std = gp_result.y_std
|
|
50
|
+
lengthscales = None
|
|
51
|
+
if self._model is not None:
|
|
52
|
+
lengthscale = (
|
|
53
|
+
self._model.covar_module.base_kernel.lengthscale.cpu().detach().numpy()
|
|
54
|
+
)
|
|
55
|
+
if lengthscale.ndim == 3:
|
|
56
|
+
lengthscale = lengthscale.mean(axis=0)
|
|
57
|
+
lengthscales = lengthscale.ravel()
|
|
58
|
+
lengthscales_stabilized = lengthscales / lengthscales.mean()
|
|
59
|
+
del lengthscales
|
|
60
|
+
lengthscales_geom_normed = lengthscales_stabilized / np.prod(
|
|
61
|
+
np.power(lengthscales_stabilized, 1.0 / len(lengthscales_stabilized))
|
|
62
|
+
)
|
|
63
|
+
self._lengthscales = lengthscales_geom_normed
|
|
64
|
+
return SurrogateResult(model=self._model, lengthscales=self._lengthscales)
|
|
65
|
+
|
|
66
|
+
def _as_2d(self, a: np.ndarray) -> np.ndarray:
|
|
67
|
+
a = np.asarray(a, dtype=float)
|
|
68
|
+
if a.ndim == 1:
|
|
69
|
+
return a.reshape(-1, 1)
|
|
70
|
+
if a.ndim == 2:
|
|
71
|
+
return a.T
|
|
72
|
+
raise ValueError(a.shape)
|
|
73
|
+
|
|
74
|
+
def _unstandardize(self, y_std_2d: np.ndarray) -> np.ndarray:
|
|
75
|
+
y_std_2d = np.asarray(y_std_2d, dtype=float)
|
|
76
|
+
if y_std_2d.ndim != 2:
|
|
77
|
+
raise ValueError(y_std_2d.shape)
|
|
78
|
+
y_mean = np.asarray(self._y_mean, dtype=float).reshape(-1)
|
|
79
|
+
y_std = np.asarray(self._y_std, dtype=float).reshape(-1)
|
|
80
|
+
num_metrics = y_std_2d.shape[1]
|
|
81
|
+
if y_mean.size == 1 and num_metrics != 1:
|
|
82
|
+
y_mean = np.full(num_metrics, float(y_mean[0]), dtype=float)
|
|
83
|
+
if y_std.size == 1 and num_metrics != 1:
|
|
84
|
+
y_std = np.full(num_metrics, float(y_std[0]), dtype=float)
|
|
85
|
+
return y_mean.reshape(1, -1) + y_std.reshape(1, -1) * y_std_2d
|
|
86
|
+
|
|
87
|
+
def predict(self, x: np.ndarray) -> PosteriorResult:
|
|
88
|
+
import torch
|
|
89
|
+
from ..turbo_utils import get_gp_posterior_suppress_warning
|
|
90
|
+
|
|
91
|
+
if self._model is None:
|
|
92
|
+
raise RuntimeError("GPSurrogate.predict requires a fitted model")
|
|
93
|
+
x_torch = torch.as_tensor(x, dtype=torch.float64)
|
|
94
|
+
with torch.no_grad():
|
|
95
|
+
posterior = get_gp_posterior_suppress_warning(self._model, x_torch)
|
|
96
|
+
mu_std = posterior.mean.cpu().numpy()
|
|
97
|
+
var_std = posterior.variance.cpu().numpy()
|
|
98
|
+
mu = self._unstandardize(self._as_2d(mu_std))
|
|
99
|
+
sigma_std_2d = self._as_2d(var_std**0.5)
|
|
100
|
+
y_std = np.asarray(self._y_std, dtype=float).reshape(-1)
|
|
101
|
+
if y_std.size == 1 and sigma_std_2d.shape[1] != 1:
|
|
102
|
+
y_std = np.full(sigma_std_2d.shape[1], float(y_std[0]), dtype=float)
|
|
103
|
+
sigma = y_std.reshape(1, -1) * sigma_std_2d
|
|
104
|
+
return PosteriorResult(mu=mu, sigma=sigma)
|
|
105
|
+
|
|
106
|
+
def get_incumbent_candidate_indices(self, y_obs: np.ndarray) -> np.ndarray:
|
|
107
|
+
return np.arange(len(y_obs), dtype=int)
|
|
108
|
+
|
|
109
|
+
def sample(self, x: np.ndarray, num_samples: int, rng: Generator) -> np.ndarray:
|
|
110
|
+
import gpytorch
|
|
111
|
+
import torch
|
|
112
|
+
from ..turbo_utils import torch_seed_context
|
|
113
|
+
|
|
114
|
+
if self._model is None:
|
|
115
|
+
raise RuntimeError("GPSurrogate.sample requires a fitted model")
|
|
116
|
+
x_torch = torch.as_tensor(x, dtype=torch.float64)
|
|
117
|
+
seed = int(rng.integers(2**31 - 1))
|
|
118
|
+
with (
|
|
119
|
+
torch.no_grad(),
|
|
120
|
+
gpytorch.settings.fast_pred_var(),
|
|
121
|
+
torch_seed_context(seed, device=x_torch.device),
|
|
122
|
+
):
|
|
123
|
+
posterior = self._model.posterior(x_torch)
|
|
124
|
+
samples = posterior.sample(sample_shape=torch.Size([num_samples]))
|
|
125
|
+
samples_np = samples.detach().cpu().numpy()
|
|
126
|
+
num_candidates = len(x)
|
|
127
|
+
num_metrics = len(self._y_mean) if hasattr(self._y_mean, "__len__") else 1
|
|
128
|
+
if samples_np.ndim == 2:
|
|
129
|
+
samples_np = samples_np[:, :, np.newaxis]
|
|
130
|
+
else:
|
|
131
|
+
assert samples_np.shape == (num_samples, num_metrics, num_candidates), (
|
|
132
|
+
f"GP raw samples shape mismatch: got {samples_np.shape}, "
|
|
133
|
+
f"expected ({num_samples}, {num_metrics}, {num_candidates})"
|
|
134
|
+
)
|
|
135
|
+
samples_np = np.transpose(samples_np, (0, 2, 1))
|
|
136
|
+
assert samples_np.shape == (num_samples, num_candidates, num_metrics), (
|
|
137
|
+
f"GP samples shape after transpose: got {samples_np.shape}, "
|
|
138
|
+
f"expected ({num_samples}, {num_candidates}, {num_metrics})"
|
|
139
|
+
)
|
|
140
|
+
y_mean = np.asarray(self._y_mean, dtype=float).reshape(1, 1, -1)
|
|
141
|
+
y_std = np.asarray(self._y_std, dtype=float).reshape(1, 1, -1)
|
|
142
|
+
result = y_mean + y_std * samples_np
|
|
143
|
+
assert result.shape == (num_samples, num_candidates, num_metrics)
|
|
144
|
+
return result
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
import numpy as np
|
|
4
|
+
from .thompson_acq_optimizer import ThompsonAcqOptimizer
|
|
5
|
+
from .ucb_acq_optimizer import UCBAcqOptimizer
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from numpy.random import Generator
|
|
9
|
+
from .protocols import Surrogate
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class HnRAcqOptimizer:
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
base_optimizer: ThompsonAcqOptimizer | UCBAcqOptimizer,
|
|
16
|
+
num_iterations: int = 100,
|
|
17
|
+
) -> None:
|
|
18
|
+
self._base = base_optimizer
|
|
19
|
+
self._num_iterations = num_iterations
|
|
20
|
+
|
|
21
|
+
def _score_fn_ucb(
|
|
22
|
+
self, x_pt: np.ndarray, surrogate: Surrogate, beta: float = 1.0
|
|
23
|
+
) -> float:
|
|
24
|
+
posterior = surrogate.predict(x_pt.reshape(1, -1))
|
|
25
|
+
mu = float(posterior.mu[0, 0])
|
|
26
|
+
sigma = float(posterior.sigma[0, 0]) if posterior.sigma is not None else 0.0
|
|
27
|
+
return mu + beta * sigma
|
|
28
|
+
|
|
29
|
+
def _score_fn_thompson(
|
|
30
|
+
self, x_pt: np.ndarray, surrogate: Surrogate, seed: int
|
|
31
|
+
) -> float:
|
|
32
|
+
fixed_rng = np.random.default_rng(seed)
|
|
33
|
+
samples = surrogate.sample(x_pt.reshape(1, -1), 1, fixed_rng)
|
|
34
|
+
return float(samples[0, 0, 0])
|
|
35
|
+
|
|
36
|
+
def _optimize_one_arm(
|
|
37
|
+
self,
|
|
38
|
+
x_start: np.ndarray,
|
|
39
|
+
num_dim: int,
|
|
40
|
+
rng: Generator,
|
|
41
|
+
score_fn,
|
|
42
|
+
) -> np.ndarray:
|
|
43
|
+
x_current = x_start.copy()
|
|
44
|
+
current_score = score_fn(x_current)
|
|
45
|
+
for _ in range(self._num_iterations):
|
|
46
|
+
direction = rng.standard_normal(num_dim)
|
|
47
|
+
direction = direction / np.linalg.norm(direction)
|
|
48
|
+
step_size = rng.uniform(0.01, 0.1)
|
|
49
|
+
x_proposed = np.clip(x_current + step_size * direction, 0.0, 1.0)
|
|
50
|
+
proposed_score = score_fn(x_proposed)
|
|
51
|
+
if proposed_score > current_score:
|
|
52
|
+
x_current = x_proposed
|
|
53
|
+
current_score = proposed_score
|
|
54
|
+
return x_current
|
|
55
|
+
|
|
56
|
+
def select(
|
|
57
|
+
self,
|
|
58
|
+
x_cand: np.ndarray,
|
|
59
|
+
num_arms: int,
|
|
60
|
+
surrogate: Surrogate,
|
|
61
|
+
rng: Generator,
|
|
62
|
+
*,
|
|
63
|
+
tr_state: Any | None = None,
|
|
64
|
+
) -> np.ndarray:
|
|
65
|
+
num_dim = x_cand.shape[1]
|
|
66
|
+
x_arms = np.zeros((num_arms, num_dim), dtype=float)
|
|
67
|
+
is_ucb = isinstance(self._base, UCBAcqOptimizer)
|
|
68
|
+
for arm_idx in range(num_arms):
|
|
69
|
+
start_idx = rng.integers(0, len(x_cand))
|
|
70
|
+
x_start = x_cand[start_idx]
|
|
71
|
+
if is_ucb:
|
|
72
|
+
beta = getattr(self._base, "_beta", 1.0)
|
|
73
|
+
|
|
74
|
+
def score_fn(x_pt):
|
|
75
|
+
return self._score_fn_ucb(x_pt, surrogate, beta)
|
|
76
|
+
else:
|
|
77
|
+
seed = int(rng.integers(0, 2**31))
|
|
78
|
+
|
|
79
|
+
def score_fn(x_pt, s=seed):
|
|
80
|
+
return self._score_fn_thompson(x_pt, surrogate, s)
|
|
81
|
+
|
|
82
|
+
x_arms[arm_idx] = self._optimize_one_arm(x_start, num_dim, rng, score_fn)
|
|
83
|
+
return x_arms
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .chebyshev_incumbent_selector import ChebyshevIncumbentSelector
|
|
2
|
+
from .incumbent_selector_protocol import IncumbentSelector
|
|
3
|
+
from .no_incumbent_selector import NoIncumbentSelector
|
|
4
|
+
from .scalar_incumbent_selector import ScalarIncumbentSelector
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"ChebyshevIncumbentSelector",
|
|
8
|
+
"IncumbentSelector",
|
|
9
|
+
"NoIncumbentSelector",
|
|
10
|
+
"ScalarIncumbentSelector",
|
|
11
|
+
]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Protocol
|
|
3
|
+
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
import numpy as np
|
|
6
|
+
from numpy.random import Generator
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class IncumbentSelector(Protocol):
|
|
10
|
+
def select(
|
|
11
|
+
self,
|
|
12
|
+
y_obs: np.ndarray,
|
|
13
|
+
mu_obs: np.ndarray | None,
|
|
14
|
+
rng: Generator,
|
|
15
|
+
) -> int: ...
|
|
16
|
+
def reset(self, rng: Generator) -> None: ...
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
import numpy as np
|
|
7
|
+
from numpy.random import Generator
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class NoIncumbentSelector:
|
|
12
|
+
def select(
|
|
13
|
+
self,
|
|
14
|
+
y_obs: np.ndarray,
|
|
15
|
+
mu_obs: np.ndarray | None,
|
|
16
|
+
rng: Generator,
|
|
17
|
+
) -> int:
|
|
18
|
+
return 0
|
|
19
|
+
|
|
20
|
+
def reset(self, rng: Generator) -> None:
|
|
21
|
+
pass
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING
|
|
3
|
+
import numpy as np
|
|
4
|
+
from .posterior_result import PosteriorResult
|
|
5
|
+
from .surrogate_result import SurrogateResult
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from numpy.random import Generator
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class NoSurrogate:
|
|
12
|
+
def __init__(self) -> None:
|
|
13
|
+
self._x_obs: np.ndarray | None = None
|
|
14
|
+
self._y_obs: np.ndarray | None = None
|
|
15
|
+
|
|
16
|
+
@property
|
|
17
|
+
def lengthscales(self) -> np.ndarray | None:
|
|
18
|
+
return getattr(self, "_lengthscales", None)
|
|
19
|
+
|
|
20
|
+
def fit(
|
|
21
|
+
self,
|
|
22
|
+
x_obs: np.ndarray,
|
|
23
|
+
y_obs: np.ndarray,
|
|
24
|
+
y_var: np.ndarray | None = None,
|
|
25
|
+
*,
|
|
26
|
+
num_steps: int = 0,
|
|
27
|
+
rng: Generator | None = None,
|
|
28
|
+
) -> SurrogateResult:
|
|
29
|
+
self._x_obs = np.asarray(x_obs, dtype=float)
|
|
30
|
+
self._y_obs = np.asarray(y_obs, dtype=float)
|
|
31
|
+
if self._y_obs.ndim == 1:
|
|
32
|
+
self._y_obs = self._y_obs.reshape(-1, 1)
|
|
33
|
+
return SurrogateResult(model=None, lengthscales=None)
|
|
34
|
+
|
|
35
|
+
def predict(self, x: np.ndarray) -> PosteriorResult:
|
|
36
|
+
if self._x_obs is None or self._y_obs is None:
|
|
37
|
+
raise RuntimeError("NoSurrogate.predict requires fit() first")
|
|
38
|
+
x = np.asarray(x, dtype=float)
|
|
39
|
+
if np.array_equal(x, self._x_obs):
|
|
40
|
+
return PosteriorResult(mu=self._y_obs.copy(), sigma=None)
|
|
41
|
+
raise RuntimeError("NoSurrogate.predict only works for training points")
|
|
42
|
+
|
|
43
|
+
def get_incumbent_candidate_indices(self, y_obs: np.ndarray) -> np.ndarray:
|
|
44
|
+
return np.arange(len(y_obs), dtype=int)
|
|
45
|
+
|
|
46
|
+
def sample(self, x: np.ndarray, num_samples: int, rng: Generator) -> np.ndarray:
|
|
47
|
+
n = len(x)
|
|
48
|
+
num_metrics = self._y_obs.shape[1] if hasattr(self, "_y_obs") else 1
|
|
49
|
+
return rng.standard_normal((num_samples, n, num_metrics))
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from numpy.random import Generator
|
|
7
|
+
from .protocols import Surrogate
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ParetoAcqOptimizer:
|
|
11
|
+
def select(
|
|
12
|
+
self,
|
|
13
|
+
x_cand: np.ndarray,
|
|
14
|
+
num_arms: int,
|
|
15
|
+
surrogate: Surrogate,
|
|
16
|
+
rng: Generator,
|
|
17
|
+
*,
|
|
18
|
+
tr_state: Any | None = None,
|
|
19
|
+
) -> np.ndarray:
|
|
20
|
+
from enn.enn.enn_util import arms_from_pareto_fronts
|
|
21
|
+
|
|
22
|
+
posterior = surrogate.predict(x_cand)
|
|
23
|
+
mu = posterior.mu
|
|
24
|
+
se = posterior.sigma if posterior.sigma is not None else np.zeros_like(mu)
|
|
25
|
+
if mu.ndim == 2 and mu.shape[1] > 1:
|
|
26
|
+
from nds import ndomsort
|
|
27
|
+
|
|
28
|
+
n = mu.shape[0]
|
|
29
|
+
i_keep: list[int] = []
|
|
30
|
+
remaining_mask = np.ones(n, dtype=bool)
|
|
31
|
+
while len(i_keep) < num_arms and np.any(remaining_mask):
|
|
32
|
+
remaining_indices = np.where(remaining_mask)[0]
|
|
33
|
+
fronts = ndomsort.non_domin_sort(
|
|
34
|
+
-mu[remaining_indices], only_front_indices=True
|
|
35
|
+
)
|
|
36
|
+
front_indices = remaining_indices[np.where(fronts == 0)[0]]
|
|
37
|
+
if len(i_keep) + len(front_indices) <= num_arms:
|
|
38
|
+
i_keep.extend(front_indices.tolist())
|
|
39
|
+
remaining_mask[front_indices] = False
|
|
40
|
+
else:
|
|
41
|
+
needed = num_arms - len(i_keep)
|
|
42
|
+
selected = rng.choice(front_indices, size=needed, replace=False)
|
|
43
|
+
i_keep.extend(selected.tolist())
|
|
44
|
+
break
|
|
45
|
+
return x_cand[np.array(i_keep, dtype=int)]
|
|
46
|
+
else:
|
|
47
|
+
mu_1d = mu[:, 0] if mu.ndim == 2 else mu
|
|
48
|
+
se_1d = se[:, 0] if se.ndim == 2 else se
|
|
49
|
+
return arms_from_pareto_fronts(x_cand, mu_1d, se_1d, num_arms, rng)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .acquisition_optimizer_protocol import AcquisitionOptimizer
|
|
2
|
+
from .posterior_result import PosteriorResult
|
|
3
|
+
from .surrogate_protocol import Surrogate
|
|
4
|
+
from .surrogate_result import SurrogateResult
|
|
5
|
+
from .trust_region_protocol import TrustRegion
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"AcquisitionOptimizer",
|
|
9
|
+
"PosteriorResult",
|
|
10
|
+
"Surrogate",
|
|
11
|
+
"SurrogateResult",
|
|
12
|
+
"TrustRegion",
|
|
13
|
+
]
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from numpy.random import Generator
|
|
7
|
+
from .protocols import Surrogate
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RandomAcqOptimizer:
|
|
11
|
+
def select(
|
|
12
|
+
self,
|
|
13
|
+
x_cand: np.ndarray,
|
|
14
|
+
num_arms: int,
|
|
15
|
+
surrogate: Surrogate,
|
|
16
|
+
rng: Generator,
|
|
17
|
+
*,
|
|
18
|
+
tr_state: Any | None = None,
|
|
19
|
+
) -> np.ndarray:
|
|
20
|
+
idx = rng.choice(len(x_cand), size=num_arms, replace=False)
|
|
21
|
+
return x_cand[idx]
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
import numpy as np
|
|
7
|
+
from numpy.random import Generator
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ScalarIncumbentSelector:
|
|
12
|
+
noise_aware: bool
|
|
13
|
+
|
|
14
|
+
def select(
|
|
15
|
+
self,
|
|
16
|
+
y_obs: np.ndarray,
|
|
17
|
+
mu_obs: np.ndarray | None,
|
|
18
|
+
rng: Generator,
|
|
19
|
+
) -> int:
|
|
20
|
+
import numpy as np
|
|
21
|
+
from ..turbo_utils import argmax_random_tie
|
|
22
|
+
|
|
23
|
+
y = np.asarray(y_obs, dtype=float)
|
|
24
|
+
if y.ndim == 2:
|
|
25
|
+
y = y[:, 0]
|
|
26
|
+
if self.noise_aware:
|
|
27
|
+
if mu_obs is None:
|
|
28
|
+
raise ValueError(
|
|
29
|
+
"noise_aware=True requires a surrogate that provides mu. "
|
|
30
|
+
"Either use a GP/ENN surrogate or set noise_aware=False."
|
|
31
|
+
)
|
|
32
|
+
mu = np.asarray(mu_obs, dtype=float)
|
|
33
|
+
if mu.ndim == 2:
|
|
34
|
+
mu = mu[:, 0]
|
|
35
|
+
return int(argmax_random_tie(mu, rng=rng))
|
|
36
|
+
return int(argmax_random_tie(y, rng=rng))
|
|
37
|
+
|
|
38
|
+
def reset(self, rng: Generator) -> None:
|
|
39
|
+
pass
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Any, Protocol
|
|
3
|
+
from .posterior_result import PosteriorResult
|
|
4
|
+
from .surrogate_result import SurrogateResult
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
import numpy as np
|
|
8
|
+
from numpy.random import Generator
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Surrogate(Protocol):
|
|
12
|
+
@property
|
|
13
|
+
def lengthscales(self) -> np.ndarray | None: ...
|
|
14
|
+
def fit(
|
|
15
|
+
self,
|
|
16
|
+
x_obs: np.ndarray,
|
|
17
|
+
y_obs: np.ndarray,
|
|
18
|
+
y_var: np.ndarray | None = None,
|
|
19
|
+
*,
|
|
20
|
+
num_steps: int = 0,
|
|
21
|
+
rng: Generator | None = None,
|
|
22
|
+
) -> SurrogateResult: ...
|
|
23
|
+
def predict(self, x: np.ndarray) -> PosteriorResult: ...
|
|
24
|
+
def sample(self, x: np.ndarray, num_samples: int, rng: Generator) -> np.ndarray: ...
|
|
25
|
+
def find_x_center(
|
|
26
|
+
self,
|
|
27
|
+
x_obs: np.ndarray,
|
|
28
|
+
y_obs: np.ndarray,
|
|
29
|
+
tr_state: Any,
|
|
30
|
+
rng: Generator,
|
|
31
|
+
) -> np.ndarray | None: ...
|
|
32
|
+
def get_incumbent_candidate_indices(self, y_obs: np.ndarray) -> np.ndarray: ...
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from numpy.random import Generator
|
|
7
|
+
from .protocols import Surrogate
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ThompsonAcqOptimizer:
|
|
11
|
+
def select(
|
|
12
|
+
self,
|
|
13
|
+
x_cand: np.ndarray,
|
|
14
|
+
num_arms: int,
|
|
15
|
+
surrogate: Surrogate,
|
|
16
|
+
rng: Generator,
|
|
17
|
+
*,
|
|
18
|
+
tr_state: Any | None = None,
|
|
19
|
+
) -> np.ndarray:
|
|
20
|
+
num_candidates = len(x_cand)
|
|
21
|
+
samples = surrogate.sample(x_cand, num_arms, rng)
|
|
22
|
+
assert samples.ndim == 3, f"samples.ndim={samples.ndim}, expected 3"
|
|
23
|
+
assert (
|
|
24
|
+
samples.shape[0] == num_arms
|
|
25
|
+
), f"samples.shape[0]={samples.shape[0]}, expected num_arms={num_arms}"
|
|
26
|
+
assert (
|
|
27
|
+
samples.shape[1] == num_candidates
|
|
28
|
+
), f"samples.shape[1]={samples.shape[1]}, expected num_candidates={num_candidates}"
|
|
29
|
+
num_metrics = samples.shape[2]
|
|
30
|
+
if tr_state is not None and hasattr(tr_state, "scalarize"):
|
|
31
|
+
indices = []
|
|
32
|
+
for i in range(num_arms):
|
|
33
|
+
sample_i = samples[i]
|
|
34
|
+
assert sample_i.shape == (num_candidates, num_metrics), (
|
|
35
|
+
f"sample_i.shape={sample_i.shape}, "
|
|
36
|
+
f"expected ({num_candidates}, {num_metrics})"
|
|
37
|
+
)
|
|
38
|
+
scores = tr_state.scalarize(sample_i, clip=False)
|
|
39
|
+
assert scores.shape == (
|
|
40
|
+
num_candidates,
|
|
41
|
+
), f"scores.shape={scores.shape}, expected ({num_candidates},)"
|
|
42
|
+
for prev_idx in indices:
|
|
43
|
+
scores[prev_idx] = -np.inf
|
|
44
|
+
idx = np.argmax(scores)
|
|
45
|
+
indices.append(idx)
|
|
46
|
+
return x_cand[indices]
|
|
47
|
+
else:
|
|
48
|
+
arm_indices = np.argmax(samples[:, :, 0], axis=1)
|
|
49
|
+
return x_cand[arm_indices]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Protocol
|
|
3
|
+
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
import numpy as np
|
|
6
|
+
from numpy.random import Generator
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class TrustRegion(Protocol):
|
|
10
|
+
@property
|
|
11
|
+
def length(self) -> float: ...
|
|
12
|
+
def compute_bounds(
|
|
13
|
+
self, x_center: np.ndarray, lengthscales: np.ndarray | None = None
|
|
14
|
+
) -> tuple[np.ndarray, np.ndarray]: ...
|
|
15
|
+
def update(self, y_obs: np.ndarray, y_incumbent: np.ndarray) -> None: ...
|
|
16
|
+
def needs_restart(self) -> bool: ...
|
|
17
|
+
def restart(self) -> None: ...
|
|
18
|
+
def get_incumbent_indices(self, y: np.ndarray, rng: Generator) -> np.ndarray: ...
|
|
19
|
+
def get_incumbent_index(
|
|
20
|
+
self, y: np.ndarray, rng: Generator, mu: np.ndarray | None = None
|
|
21
|
+
) -> int: ...
|
|
22
|
+
def get_incumbent_value(
|
|
23
|
+
self, y_obs: np.ndarray, rng: Generator, mu_obs: np.ndarray | None = None
|
|
24
|
+
) -> np.ndarray: ...
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from numpy.random import Generator
|
|
7
|
+
from .protocols import Surrogate
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class UCBAcqOptimizer:
|
|
11
|
+
def __init__(self, beta: float = 1.0) -> None:
|
|
12
|
+
self._beta = beta
|
|
13
|
+
|
|
14
|
+
def select(
|
|
15
|
+
self,
|
|
16
|
+
x_cand: np.ndarray,
|
|
17
|
+
num_arms: int,
|
|
18
|
+
surrogate: Surrogate,
|
|
19
|
+
rng: Generator,
|
|
20
|
+
*,
|
|
21
|
+
tr_state: Any | None = None,
|
|
22
|
+
) -> np.ndarray:
|
|
23
|
+
num_candidates = len(x_cand)
|
|
24
|
+
posterior = surrogate.predict(x_cand)
|
|
25
|
+
mu = posterior.mu
|
|
26
|
+
sigma = posterior.sigma if posterior.sigma is not None else np.zeros_like(mu)
|
|
27
|
+
assert mu.ndim == 2, f"mu.ndim={mu.ndim}, expected 2"
|
|
28
|
+
assert (
|
|
29
|
+
mu.shape[0] == num_candidates
|
|
30
|
+
), f"mu.shape[0]={mu.shape[0]}, expected {num_candidates}"
|
|
31
|
+
num_metrics = mu.shape[1]
|
|
32
|
+
if tr_state is not None and hasattr(tr_state, "scalarize"):
|
|
33
|
+
ucb = mu + self._beta * sigma
|
|
34
|
+
assert ucb.shape == (
|
|
35
|
+
num_candidates,
|
|
36
|
+
num_metrics,
|
|
37
|
+
), f"ucb.shape={ucb.shape}, expected ({num_candidates}, {num_metrics})"
|
|
38
|
+
scores = tr_state.scalarize(ucb, clip=False)
|
|
39
|
+
assert scores.shape == (
|
|
40
|
+
num_candidates,
|
|
41
|
+
), f"scores.shape={scores.shape}, expected ({num_candidates},)"
|
|
42
|
+
else:
|
|
43
|
+
scores = mu[:, 0] + self._beta * sigma[:, 0]
|
|
44
|
+
assert scores.shape == (num_candidates,)
|
|
45
|
+
shuffled_indices = rng.permutation(len(scores))
|
|
46
|
+
shuffled_scores = scores[shuffled_indices]
|
|
47
|
+
top_k_in_shuffled = np.argpartition(-shuffled_scores, num_arms - 1)[:num_arms]
|
|
48
|
+
idx = shuffled_indices[top_k_in_shuffled]
|
|
49
|
+
return x_cand[idx]
|