ennbo 0.1.2__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- enn/__init__.py +25 -13
- enn/benchmarks/__init__.py +3 -0
- enn/benchmarks/ackley.py +5 -0
- enn/benchmarks/ackley_class.py +17 -0
- enn/benchmarks/ackley_core.py +12 -0
- enn/benchmarks/double_ackley.py +24 -0
- enn/enn/candidates.py +14 -0
- enn/enn/conditional_posterior_draw_internals.py +15 -0
- enn/enn/draw_internals.py +15 -0
- enn/enn/enn.py +16 -269
- enn/enn/enn_class.py +423 -0
- enn/enn/enn_conditional.py +325 -0
- enn/enn/enn_fit.py +69 -70
- enn/enn/enn_hash.py +79 -0
- enn/enn/enn_index.py +92 -0
- enn/enn/enn_like_protocol.py +35 -0
- enn/enn/enn_normal.py +0 -1
- enn/enn/enn_params.py +3 -22
- enn/enn/enn_params_class.py +24 -0
- enn/enn/enn_util.py +60 -46
- enn/enn/neighbor_data.py +14 -0
- enn/enn/neighbors.py +14 -0
- enn/enn/posterior_flags.py +8 -0
- enn/enn/weighted_stats.py +14 -0
- enn/turbo/components/__init__.py +41 -0
- enn/turbo/components/acquisition.py +13 -0
- enn/turbo/components/acquisition_optimizer_protocol.py +19 -0
- enn/turbo/components/builder.py +22 -0
- enn/turbo/components/chebyshev_incumbent_selector.py +76 -0
- enn/turbo/components/enn_surrogate.py +115 -0
- enn/turbo/components/gp_surrogate.py +144 -0
- enn/turbo/components/hnr_acq_optimizer.py +83 -0
- enn/turbo/components/incumbent_selector.py +11 -0
- enn/turbo/components/incumbent_selector_protocol.py +16 -0
- enn/turbo/components/no_incumbent_selector.py +21 -0
- enn/turbo/components/no_surrogate.py +49 -0
- enn/turbo/components/pareto_acq_optimizer.py +49 -0
- enn/turbo/components/posterior_result.py +12 -0
- enn/turbo/components/protocols.py +13 -0
- enn/turbo/components/random_acq_optimizer.py +21 -0
- enn/turbo/components/scalar_incumbent_selector.py +39 -0
- enn/turbo/components/surrogate_protocol.py +32 -0
- enn/turbo/components/surrogate_result.py +12 -0
- enn/turbo/components/surrogates.py +5 -0
- enn/turbo/components/thompson_acq_optimizer.py +49 -0
- enn/turbo/components/trust_region_protocol.py +24 -0
- enn/turbo/components/ucb_acq_optimizer.py +49 -0
- enn/turbo/config/__init__.py +87 -0
- enn/turbo/config/acq_type.py +8 -0
- enn/turbo/config/acquisition.py +26 -0
- enn/turbo/config/base.py +4 -0
- enn/turbo/config/candidate_gen_config.py +49 -0
- enn/turbo/config/candidate_rv.py +7 -0
- enn/turbo/config/draw_acquisition_config.py +14 -0
- enn/turbo/config/enn_index_driver.py +6 -0
- enn/turbo/config/enn_surrogate_config.py +42 -0
- enn/turbo/config/enums.py +7 -0
- enn/turbo/config/factory.py +118 -0
- enn/turbo/config/gp_surrogate_config.py +14 -0
- enn/turbo/config/hnr_optimizer_config.py +7 -0
- enn/turbo/config/init_config.py +17 -0
- enn/turbo/config/init_strategies/__init__.py +9 -0
- enn/turbo/config/init_strategies/hybrid_init.py +23 -0
- enn/turbo/config/init_strategies/init_strategy.py +19 -0
- enn/turbo/config/init_strategies/lhd_only_init.py +24 -0
- enn/turbo/config/morbo_tr_config.py +82 -0
- enn/turbo/config/nds_optimizer_config.py +7 -0
- enn/turbo/config/no_surrogate_config.py +14 -0
- enn/turbo/config/no_tr_config.py +31 -0
- enn/turbo/config/optimizer_config.py +72 -0
- enn/turbo/config/pareto_acquisition_config.py +14 -0
- enn/turbo/config/raasp_driver.py +6 -0
- enn/turbo/config/raasp_optimizer_config.py +7 -0
- enn/turbo/config/random_acquisition_config.py +14 -0
- enn/turbo/config/rescalarize.py +7 -0
- enn/turbo/config/surrogate.py +12 -0
- enn/turbo/config/trust_region.py +34 -0
- enn/turbo/config/turbo_tr_config.py +71 -0
- enn/turbo/config/ucb_acquisition_config.py +14 -0
- enn/turbo/config/validation.py +45 -0
- enn/turbo/hypervolume.py +30 -0
- enn/turbo/impl_helpers.py +68 -0
- enn/turbo/morbo_trust_region.py +131 -70
- enn/turbo/no_trust_region.py +32 -39
- enn/turbo/optimizer.py +300 -0
- enn/turbo/optimizer_config.py +8 -0
- enn/turbo/proposal.py +36 -38
- enn/turbo/sampling.py +21 -0
- enn/turbo/strategies/__init__.py +9 -0
- enn/turbo/strategies/lhd_only_strategy.py +36 -0
- enn/turbo/strategies/optimization_strategy.py +19 -0
- enn/turbo/strategies/turbo_hybrid_strategy.py +124 -0
- enn/turbo/tr_helpers.py +202 -0
- enn/turbo/turbo_gp.py +0 -1
- enn/turbo/turbo_gp_base.py +0 -1
- enn/turbo/turbo_gp_fit.py +187 -0
- enn/turbo/turbo_gp_noisy.py +0 -1
- enn/turbo/turbo_optimizer_utils.py +98 -0
- enn/turbo/turbo_trust_region.py +126 -58
- enn/turbo/turbo_utils.py +98 -161
- enn/turbo/types/__init__.py +7 -0
- enn/turbo/types/appendable_array.py +85 -0
- enn/turbo/types/gp_data_prep.py +13 -0
- enn/turbo/types/gp_fit_result.py +11 -0
- enn/turbo/types/obs_lists.py +10 -0
- enn/turbo/types/prepare_ask_result.py +14 -0
- enn/turbo/types/tell_inputs.py +14 -0
- {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/METADATA +18 -11
- ennbo-0.1.7.dist-info/RECORD +111 -0
- enn/enn/__init__.py +0 -4
- enn/turbo/__init__.py +0 -11
- enn/turbo/base_turbo_impl.py +0 -144
- enn/turbo/lhd_only_impl.py +0 -49
- enn/turbo/turbo_config.py +0 -72
- enn/turbo/turbo_enn_impl.py +0 -201
- enn/turbo/turbo_mode.py +0 -10
- enn/turbo/turbo_mode_impl.py +0 -76
- enn/turbo/turbo_one_impl.py +0 -302
- enn/turbo/turbo_optimizer.py +0 -525
- enn/turbo/turbo_zero_impl.py +0 -29
- ennbo-0.1.2.dist-info/RECORD +0 -29
- {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/WHEEL +0 -0
- {ennbo-0.1.2.dist-info → ennbo-0.1.7.dist-info}/licenses/LICENSE +0 -0
enn/turbo/turbo_one_impl.py
DELETED
|
@@ -1,302 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import TYPE_CHECKING, Any, Callable
|
|
4
|
-
|
|
5
|
-
if TYPE_CHECKING:
|
|
6
|
-
import numpy as np
|
|
7
|
-
from numpy.random import Generator
|
|
8
|
-
|
|
9
|
-
from .base_turbo_impl import BaseTurboImpl
|
|
10
|
-
from .turbo_config import TurboOneConfig
|
|
11
|
-
from .turbo_utils import gp_thompson_sample
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class TurboOneImpl(BaseTurboImpl):
|
|
15
|
-
def __init__(self, config: TurboOneConfig) -> None:
|
|
16
|
-
super().__init__(config)
|
|
17
|
-
self._gp_model: Any | None = None
|
|
18
|
-
self._gp_y_mean: float | Any = 0.0
|
|
19
|
-
self._gp_y_std: float | Any = 1.0
|
|
20
|
-
self._fitted_n_obs: int = 0
|
|
21
|
-
|
|
22
|
-
def _as_2d(self, a: np.ndarray) -> np.ndarray:
|
|
23
|
-
import numpy as np
|
|
24
|
-
|
|
25
|
-
a = np.asarray(a, dtype=float)
|
|
26
|
-
if a.ndim == 1:
|
|
27
|
-
return a.reshape(-1, 1)
|
|
28
|
-
if a.ndim == 2:
|
|
29
|
-
return a.T
|
|
30
|
-
raise ValueError(a.shape)
|
|
31
|
-
|
|
32
|
-
def _broadcast_gp_mean_std(self, num_metrics: int) -> tuple[np.ndarray, np.ndarray]:
|
|
33
|
-
import numpy as np
|
|
34
|
-
|
|
35
|
-
num_metrics = int(num_metrics)
|
|
36
|
-
if num_metrics <= 0:
|
|
37
|
-
raise ValueError(num_metrics)
|
|
38
|
-
gp_y_mean = np.asarray(self._gp_y_mean, dtype=float).reshape(-1)
|
|
39
|
-
gp_y_std = np.asarray(self._gp_y_std, dtype=float).reshape(-1)
|
|
40
|
-
if gp_y_mean.size == 1 and num_metrics != 1:
|
|
41
|
-
gp_y_mean = np.full(num_metrics, float(gp_y_mean[0]), dtype=float)
|
|
42
|
-
if gp_y_std.size == 1 and num_metrics != 1:
|
|
43
|
-
gp_y_std = np.full(num_metrics, float(gp_y_std[0]), dtype=float)
|
|
44
|
-
if gp_y_mean.shape != (num_metrics,) or gp_y_std.shape != (num_metrics,):
|
|
45
|
-
raise ValueError((gp_y_mean.shape, gp_y_std.shape, num_metrics))
|
|
46
|
-
return gp_y_mean, gp_y_std
|
|
47
|
-
|
|
48
|
-
def _unstandardize(self, y_std_2d: np.ndarray) -> np.ndarray:
|
|
49
|
-
import numpy as np
|
|
50
|
-
|
|
51
|
-
y_std_2d = np.asarray(y_std_2d, dtype=float)
|
|
52
|
-
if y_std_2d.ndim != 2:
|
|
53
|
-
raise ValueError(y_std_2d.shape)
|
|
54
|
-
num_metrics = int(y_std_2d.shape[1])
|
|
55
|
-
gp_y_mean, gp_y_std = self._broadcast_gp_mean_std(num_metrics)
|
|
56
|
-
return gp_y_mean.reshape(1, -1) + gp_y_std.reshape(1, -1) * y_std_2d
|
|
57
|
-
|
|
58
|
-
def get_x_center(
|
|
59
|
-
self,
|
|
60
|
-
x_obs_list: list,
|
|
61
|
-
y_obs_list: list,
|
|
62
|
-
rng: Generator,
|
|
63
|
-
tr_state: Any = None,
|
|
64
|
-
) -> np.ndarray | None:
|
|
65
|
-
import numpy as np
|
|
66
|
-
import torch
|
|
67
|
-
import warnings
|
|
68
|
-
|
|
69
|
-
from .turbo_utils import argmax_random_tie
|
|
70
|
-
|
|
71
|
-
if len(y_obs_list) == 0:
|
|
72
|
-
return None
|
|
73
|
-
if self._gp_model is None:
|
|
74
|
-
if len(y_obs_list) <= 1:
|
|
75
|
-
x_array = np.asarray(x_obs_list, dtype=float)
|
|
76
|
-
y_array = np.asarray(y_obs_list, dtype=float)
|
|
77
|
-
if y_array.ndim == 2:
|
|
78
|
-
if self._config.tr_type == "morbo" and tr_state is not None:
|
|
79
|
-
scores = tr_state.scalarize(y_array, clip=True)
|
|
80
|
-
else:
|
|
81
|
-
scores = y_array[:, 0]
|
|
82
|
-
best_idx = argmax_random_tie(scores, rng=rng)
|
|
83
|
-
return x_array[best_idx]
|
|
84
|
-
return super().get_x_center(x_obs_list, y_obs_list, rng, tr_state)
|
|
85
|
-
raise RuntimeError(
|
|
86
|
-
"TurboOneImpl.get_x_center requires a fitted GP model for 2+ observations; "
|
|
87
|
-
"call prepare_ask() first."
|
|
88
|
-
)
|
|
89
|
-
if self._fitted_n_obs != len(x_obs_list):
|
|
90
|
-
raise RuntimeError(
|
|
91
|
-
f"GP fitted on {self._fitted_n_obs} obs but get_x_center called with {len(x_obs_list)}"
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
x_array = np.asarray(x_obs_list, dtype=float)
|
|
95
|
-
x_torch = torch.as_tensor(x_array, dtype=torch.float64)
|
|
96
|
-
try:
|
|
97
|
-
from gpytorch.utils.warnings import GPInputWarning
|
|
98
|
-
except Exception: # pragma: no cover
|
|
99
|
-
GPInputWarning = None
|
|
100
|
-
|
|
101
|
-
with torch.no_grad():
|
|
102
|
-
if GPInputWarning is None:
|
|
103
|
-
posterior = self._gp_model.posterior(x_torch)
|
|
104
|
-
else:
|
|
105
|
-
# We intentionally evaluate the GP posterior at the training inputs
|
|
106
|
-
# (observed points) when choosing the center. GPyTorch warns about this
|
|
107
|
-
# in debug mode, but it's expected for our usage.
|
|
108
|
-
with warnings.catch_warnings():
|
|
109
|
-
warnings.filterwarnings(
|
|
110
|
-
"ignore",
|
|
111
|
-
message=r"The input matches the stored training data\..*",
|
|
112
|
-
category=GPInputWarning,
|
|
113
|
-
)
|
|
114
|
-
posterior = self._gp_model.posterior(x_torch)
|
|
115
|
-
mu_std = posterior.mean.cpu().numpy()
|
|
116
|
-
|
|
117
|
-
mu = self._unstandardize(self._as_2d(mu_std))
|
|
118
|
-
|
|
119
|
-
# For morbo: scalarize mu values
|
|
120
|
-
if self._config.tr_type == "morbo" and tr_state is not None:
|
|
121
|
-
scalarized = tr_state.scalarize(mu, clip=False)
|
|
122
|
-
best_idx = argmax_random_tie(scalarized, rng=rng)
|
|
123
|
-
else:
|
|
124
|
-
if mu.shape[1] != 1:
|
|
125
|
-
raise ValueError(mu.shape)
|
|
126
|
-
best_idx = argmax_random_tie(mu[:, 0], rng=rng)
|
|
127
|
-
|
|
128
|
-
return x_array[best_idx]
|
|
129
|
-
|
|
130
|
-
def needs_tr_list(self) -> bool:
|
|
131
|
-
return True
|
|
132
|
-
|
|
133
|
-
def try_early_ask(
|
|
134
|
-
self,
|
|
135
|
-
num_arms: int,
|
|
136
|
-
x_obs_list: list,
|
|
137
|
-
draw_initial_fn: Callable[[int], np.ndarray],
|
|
138
|
-
get_init_lhd_points_fn: Callable[[int], np.ndarray],
|
|
139
|
-
) -> np.ndarray | None:
|
|
140
|
-
if len(x_obs_list) == 0:
|
|
141
|
-
return get_init_lhd_points_fn(num_arms)
|
|
142
|
-
return None
|
|
143
|
-
|
|
144
|
-
def handle_restart(
|
|
145
|
-
self,
|
|
146
|
-
x_obs_list: list,
|
|
147
|
-
y_obs_list: list,
|
|
148
|
-
yvar_obs_list: list,
|
|
149
|
-
init_idx: int,
|
|
150
|
-
num_init: int,
|
|
151
|
-
) -> tuple[bool, int]:
|
|
152
|
-
x_obs_list.clear()
|
|
153
|
-
y_obs_list.clear()
|
|
154
|
-
yvar_obs_list.clear()
|
|
155
|
-
return True, 0
|
|
156
|
-
|
|
157
|
-
def prepare_ask(
|
|
158
|
-
self,
|
|
159
|
-
x_obs_list: list,
|
|
160
|
-
y_obs_list: list,
|
|
161
|
-
yvar_obs_list: list,
|
|
162
|
-
num_dim: int,
|
|
163
|
-
gp_num_steps: int,
|
|
164
|
-
rng: Any | None = None,
|
|
165
|
-
) -> tuple[Any, float | None, float | None, np.ndarray | None]:
|
|
166
|
-
import numpy as np
|
|
167
|
-
|
|
168
|
-
from .turbo_utils import fit_gp
|
|
169
|
-
|
|
170
|
-
if len(x_obs_list) == 0:
|
|
171
|
-
return None, None, None, None
|
|
172
|
-
self._gp_model, _likelihood, gp_y_mean_fitted, gp_y_std_fitted = fit_gp(
|
|
173
|
-
x_obs_list,
|
|
174
|
-
y_obs_list,
|
|
175
|
-
num_dim,
|
|
176
|
-
yvar_obs_list=yvar_obs_list if yvar_obs_list else None,
|
|
177
|
-
num_steps=gp_num_steps,
|
|
178
|
-
)
|
|
179
|
-
self._fitted_n_obs = len(x_obs_list)
|
|
180
|
-
if gp_y_mean_fitted is not None:
|
|
181
|
-
self._gp_y_mean = gp_y_mean_fitted
|
|
182
|
-
if gp_y_std_fitted is not None:
|
|
183
|
-
self._gp_y_std = gp_y_std_fitted
|
|
184
|
-
lengthscales = None
|
|
185
|
-
if self._gp_model is not None:
|
|
186
|
-
lengthscale = (
|
|
187
|
-
self._gp_model.covar_module.base_kernel.lengthscale.cpu()
|
|
188
|
-
.detach()
|
|
189
|
-
.numpy()
|
|
190
|
-
)
|
|
191
|
-
if lengthscale.ndim == 3:
|
|
192
|
-
lengthscale = lengthscale.mean(axis=0)
|
|
193
|
-
lengthscales = lengthscale.ravel()
|
|
194
|
-
# First line helps stabilize second line.
|
|
195
|
-
lengthscales = lengthscales / lengthscales.mean()
|
|
196
|
-
lengthscales = lengthscales / np.prod(
|
|
197
|
-
np.power(lengthscales, 1.0 / len(lengthscales))
|
|
198
|
-
)
|
|
199
|
-
return self._gp_model, gp_y_mean_fitted, gp_y_std_fitted, lengthscales
|
|
200
|
-
|
|
201
|
-
def select_candidates(
|
|
202
|
-
self,
|
|
203
|
-
x_cand: np.ndarray,
|
|
204
|
-
num_arms: int,
|
|
205
|
-
num_dim: int,
|
|
206
|
-
rng: Generator,
|
|
207
|
-
fallback_fn: Callable[[np.ndarray, int], np.ndarray],
|
|
208
|
-
from_unit_fn: Callable[[np.ndarray], np.ndarray],
|
|
209
|
-
tr_state: Any = None,
|
|
210
|
-
) -> np.ndarray:
|
|
211
|
-
import numpy as np
|
|
212
|
-
|
|
213
|
-
if self._gp_model is None:
|
|
214
|
-
if self._fitted_n_obs >= 2:
|
|
215
|
-
raise RuntimeError(
|
|
216
|
-
"TurboOneImpl.select_candidates requires a fitted GP model for 2+ observations; "
|
|
217
|
-
"call prepare_ask() first."
|
|
218
|
-
)
|
|
219
|
-
return fallback_fn(x_cand, num_arms)
|
|
220
|
-
|
|
221
|
-
if self._config.tr_type == "morbo" and tr_state is not None:
|
|
222
|
-
import gpytorch
|
|
223
|
-
import torch
|
|
224
|
-
|
|
225
|
-
from .turbo_utils import torch_seed_context
|
|
226
|
-
|
|
227
|
-
x_torch = torch.as_tensor(x_cand, dtype=torch.float64)
|
|
228
|
-
seed = int(rng.integers(2**31 - 1))
|
|
229
|
-
with (
|
|
230
|
-
torch.no_grad(),
|
|
231
|
-
gpytorch.settings.fast_pred_var(),
|
|
232
|
-
torch_seed_context(seed, device=x_torch.device),
|
|
233
|
-
):
|
|
234
|
-
posterior = self._gp_model.posterior(x_torch)
|
|
235
|
-
samples = posterior.sample(sample_shape=torch.Size([1]))
|
|
236
|
-
|
|
237
|
-
if samples.ndim == 2:
|
|
238
|
-
samples_std = samples[0].detach().cpu().numpy().reshape(-1, 1)
|
|
239
|
-
elif samples.ndim == 3:
|
|
240
|
-
samples_std = samples[0].detach().cpu().numpy().T
|
|
241
|
-
else:
|
|
242
|
-
raise ValueError(samples.shape)
|
|
243
|
-
|
|
244
|
-
y_samples = self._unstandardize(samples_std)
|
|
245
|
-
scores = tr_state.scalarize(y_samples, clip=False)
|
|
246
|
-
shuffled_indices = rng.permutation(len(scores))
|
|
247
|
-
shuffled_scores = scores[shuffled_indices]
|
|
248
|
-
top_k_in_shuffled = np.argpartition(-shuffled_scores, num_arms - 1)[
|
|
249
|
-
:num_arms
|
|
250
|
-
]
|
|
251
|
-
idx = shuffled_indices[top_k_in_shuffled]
|
|
252
|
-
return from_unit_fn(x_cand[idx])
|
|
253
|
-
|
|
254
|
-
if (
|
|
255
|
-
np.asarray(self._gp_y_mean).ndim != 0
|
|
256
|
-
or np.asarray(self._gp_y_std).ndim != 0
|
|
257
|
-
):
|
|
258
|
-
raise ValueError("multi-output GP requires tr_type='morbo'")
|
|
259
|
-
idx = gp_thompson_sample(
|
|
260
|
-
self._gp_model,
|
|
261
|
-
x_cand,
|
|
262
|
-
num_arms,
|
|
263
|
-
rng,
|
|
264
|
-
float(self._gp_y_mean),
|
|
265
|
-
float(self._gp_y_std),
|
|
266
|
-
)
|
|
267
|
-
return from_unit_fn(x_cand[idx])
|
|
268
|
-
|
|
269
|
-
def estimate_y(self, x_unit: np.ndarray, y_observed: np.ndarray) -> np.ndarray:
|
|
270
|
-
import torch
|
|
271
|
-
|
|
272
|
-
if self._gp_model is None:
|
|
273
|
-
raise RuntimeError(
|
|
274
|
-
"TurboOneImpl.estimate_y requires a fitted GP model; call prepare_ask() first."
|
|
275
|
-
)
|
|
276
|
-
x_torch = torch.as_tensor(x_unit, dtype=torch.float64)
|
|
277
|
-
with torch.no_grad():
|
|
278
|
-
posterior = self._gp_model.posterior(x_torch)
|
|
279
|
-
mu_std = posterior.mean.cpu().numpy()
|
|
280
|
-
|
|
281
|
-
mu = self._unstandardize(self._as_2d(mu_std))
|
|
282
|
-
if mu.shape[1] == 1:
|
|
283
|
-
return mu[:, 0]
|
|
284
|
-
return mu
|
|
285
|
-
|
|
286
|
-
def get_mu_sigma(self, x_unit: np.ndarray) -> tuple[np.ndarray, np.ndarray] | None:
|
|
287
|
-
import torch
|
|
288
|
-
|
|
289
|
-
if self._gp_model is None:
|
|
290
|
-
return None
|
|
291
|
-
x_torch = torch.as_tensor(x_unit, dtype=torch.float64)
|
|
292
|
-
with torch.no_grad():
|
|
293
|
-
posterior = self._gp_model.posterior(x_torch)
|
|
294
|
-
mu_std = posterior.mean.cpu().numpy()
|
|
295
|
-
sigma_std = posterior.variance.cpu().numpy() ** 0.5
|
|
296
|
-
|
|
297
|
-
mu_std_2d = self._as_2d(mu_std)
|
|
298
|
-
sigma_std_2d = self._as_2d(sigma_std)
|
|
299
|
-
mu = self._unstandardize(mu_std_2d)
|
|
300
|
-
_gp_y_mean, gp_y_std = self._broadcast_gp_mean_std(int(mu_std_2d.shape[1]))
|
|
301
|
-
sigma = gp_y_std.reshape(1, -1) * sigma_std_2d
|
|
302
|
-
return mu, sigma
|