pyfemtet 0.5.3__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyfemtet might be problematic. Click here for more details.

Files changed (62) hide show
  1. pyfemtet/__init__.py +1 -1
  2. pyfemtet/{message → _message}/locales/ja/LC_MESSAGES/messages.po +89 -77
  3. pyfemtet/{message → _message}/locales/messages.pot +88 -76
  4. pyfemtet/{message → _message}/messages.py +1 -1
  5. pyfemtet/_warning.py +23 -0
  6. pyfemtet/dispatch_extensions/__init__.py +12 -0
  7. pyfemtet/{dispatch_extensions.py → dispatch_extensions/_impl.py} +45 -43
  8. pyfemtet/logger/__init__.py +3 -0
  9. pyfemtet/{logger.py → logger/_impl.py} +12 -6
  10. pyfemtet/opt/__init__.py +3 -0
  11. pyfemtet/opt/_femopt.py +265 -68
  12. pyfemtet/opt/_femopt_core.py +111 -68
  13. pyfemtet/opt/_test_utils/record_history.py +1 -1
  14. pyfemtet/opt/interface/__init__.py +0 -1
  15. pyfemtet/opt/interface/_base.py +3 -3
  16. pyfemtet/opt/interface/_femtet.py +116 -59
  17. pyfemtet/opt/interface/_femtet_with_nx/_interface.py +35 -12
  18. pyfemtet/opt/interface/_femtet_with_sldworks.py +22 -2
  19. pyfemtet/opt/optimizer/__init__.py +5 -1
  20. pyfemtet/opt/optimizer/_base.py +81 -55
  21. pyfemtet/opt/optimizer/{_optuna_botorchsampler_parameter_constraint_helper.py → _optuna/_botorch_patch/enable_nonlinear_constraint.py} +10 -127
  22. pyfemtet/opt/optimizer/{_optuna.py → _optuna/_optuna.py} +122 -19
  23. pyfemtet/opt/optimizer/_optuna/_pof_botorch.py +1833 -0
  24. pyfemtet/opt/optimizer/_scipy.py +20 -5
  25. pyfemtet/opt/optimizer/_scipy_scalar.py +20 -5
  26. pyfemtet/opt/prediction/{base.py → _base.py} +3 -2
  27. pyfemtet/opt/prediction/single_task_gp.py +10 -5
  28. pyfemtet/opt/samples/femprj_sample/constrained_pipe.py +2 -2
  29. pyfemtet/opt/samples/femprj_sample/her_ex40_parametric.py +2 -2
  30. pyfemtet/opt/visualization/{base.py → _base.py} +1 -1
  31. pyfemtet/opt/visualization/{complex_components → _complex_components}/alert_region.py +2 -2
  32. pyfemtet/opt/visualization/{complex_components → _complex_components}/control_femtet.py +3 -3
  33. pyfemtet/opt/visualization/{complex_components → _complex_components}/main_figure_creator.py +1 -1
  34. pyfemtet/opt/visualization/{complex_components → _complex_components}/main_graph.py +5 -5
  35. pyfemtet/opt/visualization/{complex_components → _complex_components}/pm_graph.py +5 -5
  36. pyfemtet/opt/visualization/{complex_components → _complex_components}/pm_graph_creator.py +2 -2
  37. pyfemtet/opt/visualization/_create_wrapped_components.py +2 -2
  38. pyfemtet/opt/visualization/_process_monitor/__init__.py +0 -0
  39. pyfemtet/opt/visualization/{process_monitor → _process_monitor}/application.py +3 -3
  40. pyfemtet/opt/visualization/{process_monitor → _process_monitor}/pages.py +10 -10
  41. pyfemtet/opt/visualization/_wrapped_components/__init__.py +0 -0
  42. pyfemtet/opt/visualization/{wrapped_components → _wrapped_components}/dbc.py +1 -1
  43. pyfemtet/opt/visualization/{wrapped_components → _wrapped_components}/dcc.py +1 -1
  44. pyfemtet/opt/visualization/{wrapped_components → _wrapped_components}/html.py +1 -1
  45. pyfemtet/opt/visualization/result_viewer/application.py +4 -4
  46. pyfemtet/opt/visualization/result_viewer/pages.py +9 -9
  47. {pyfemtet-0.5.3.dist-info → pyfemtet-0.6.0.dist-info}/METADATA +2 -2
  48. {pyfemtet-0.5.3.dist-info → pyfemtet-0.6.0.dist-info}/RECORD +60 -56
  49. {pyfemtet-0.5.3.dist-info → pyfemtet-0.6.0.dist-info}/WHEEL +1 -1
  50. pyfemtet/message/locales/ja/LC_MESSAGES/messages.mo +0 -0
  51. pyfemtet/opt/samples/femprj_sample/.gitignore +0 -2
  52. /pyfemtet/{message → _message}/1. make_pot.bat +0 -0
  53. /pyfemtet/{message → _message}/2. make_mo.bat +0 -0
  54. /pyfemtet/{message → _message}/__init__.py +0 -0
  55. /pyfemtet/{message → _message}/babel.cfg +0 -0
  56. /pyfemtet/opt/{visualization/complex_components → optimizer/_optuna}/__init__.py +0 -0
  57. /pyfemtet/opt/{visualization/process_monitor → optimizer/_optuna/_botorch_patch}/__init__.py +0 -0
  58. /pyfemtet/opt/{parameter.py → optimizer/parameter.py} +0 -0
  59. /pyfemtet/opt/visualization/{wrapped_components → _complex_components}/__init__.py +0 -0
  60. /pyfemtet/opt/visualization/{wrapped_components → _wrapped_components}/str_enum.py +0 -0
  61. {pyfemtet-0.5.3.dist-info → pyfemtet-0.6.0.dist-info}/LICENSE +0 -0
  62. {pyfemtet-0.5.3.dist-info → pyfemtet-0.6.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,1833 @@
1
+ from __future__ import annotations
2
+
3
+ # ===== constant =====
4
+ _USE_FIXED_NOISE = True
5
+ def _get_use_fixed_noise() -> bool:
6
+ return _USE_FIXED_NOISE
7
+ def _set_use_fixed_noise(value: bool):
8
+ global _USE_FIXED_NOISE
9
+ _USE_FIXED_NOISE = value
10
+
11
+ # ignore warnings
12
+ import warnings
13
+ from botorch.exceptions.warnings import InputDataWarning
14
+ from optuna.exceptions import ExperimentalWarning
15
+
16
+ warnings.filterwarnings('ignore', category=InputDataWarning)
17
+ warnings.filterwarnings('ignore', category=ExperimentalWarning)
18
+
19
+ from pyfemtet.opt.optimizer._optuna._botorch_patch.enable_nonlinear_constraint import NonlinearInequalityConstraints
20
+
21
+ from collections.abc import Callable
22
+ from collections.abc import Sequence
23
+ from typing import Any
24
+ import random
25
+
26
+ from dataclasses import dataclass
27
+
28
+ import numpy
29
+ from optuna import logging
30
+ from optuna._experimental import experimental_class
31
+ from optuna._experimental import experimental_func
32
+ from optuna._imports import try_import
33
+ from optuna._transform import _SearchSpaceTransform
34
+ from optuna.distributions import BaseDistribution
35
+ from optuna.samplers import BaseSampler
36
+ from optuna.samplers import RandomSampler
37
+ from optuna.samplers._base import _CONSTRAINTS_KEY
38
+ from optuna.samplers._base import _process_constraints_after_trial
39
+ from optuna.search_space import IntersectionSearchSpace
40
+ from optuna.study import Study
41
+ from optuna.study import StudyDirection
42
+ from optuna.trial import FrozenTrial
43
+ from optuna.trial import TrialState
44
+ from packaging import version
45
+
46
+ with try_import() as _imports:
47
+ from botorch.acquisition.knowledge_gradient import qKnowledgeGradient
48
+ from botorch.acquisition.monte_carlo import qExpectedImprovement
49
+ from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement
50
+ from botorch.acquisition.multi_objective import monte_carlo
51
+ from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement
52
+ from botorch.acquisition.multi_objective.objective import (
53
+ FeasibilityWeightedMCMultiOutputObjective,
54
+ )
55
+ from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective
56
+ from botorch.acquisition.objective import ConstrainedMCObjective
57
+ from botorch.acquisition.objective import GenericMCObjective
58
+ from botorch.models import ModelListGP
59
+ from botorch.models import SingleTaskGP
60
+ from botorch.models.transforms.outcome import Standardize
61
+ from botorch.optim import optimize_acqf
62
+ from botorch.sampling import SobolQMCNormalSampler
63
+ from botorch.sampling.list_sampler import ListSampler
64
+ import botorch.version
65
+
66
+ if version.parse(botorch.version.version) < version.parse("0.8.0"):
67
+ from botorch.fit import fit_gpytorch_model as fit_gpytorch_mll
68
+
69
+
70
+ def _get_sobol_qmc_normal_sampler(num_samples: int) -> SobolQMCNormalSampler:
71
+ return SobolQMCNormalSampler(num_samples)
72
+
73
+ else:
74
+ from botorch.fit import fit_gpytorch_mll
75
+
76
+
77
+ def _get_sobol_qmc_normal_sampler(num_samples: int) -> SobolQMCNormalSampler:
78
+ return SobolQMCNormalSampler(torch.Size((num_samples,)))
79
+
80
+ from gpytorch.mlls import ExactMarginalLogLikelihood
81
+ from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
82
+ import torch
83
+
84
+ from botorch.utils.multi_objective.box_decompositions import NondominatedPartitioning
85
+ from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
86
+ from botorch.utils.sampling import manual_seed
87
+ from botorch.utils.sampling import sample_simplex
88
+ from botorch.utils.transforms import normalize
89
+ from botorch.utils.transforms import unnormalize
90
+
91
+ _logger = logging.get_logger(__name__)
92
+
93
+ with try_import() as _imports_logei:
94
+ from botorch.acquisition.analytic import LogConstrainedExpectedImprovement
95
+ from botorch.acquisition.analytic import LogExpectedImprovement
96
+
97
+ with try_import() as _imports_qhvkg:
98
+ from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import (
99
+ qHypervolumeKnowledgeGradient,
100
+ )
101
+
102
+
103
+ def _validate_botorch_version_for_constrained_opt(func_name: str) -> None:
104
+ if version.parse(botorch.version.version) < version.parse("0.9.0"):
105
+ raise ImportError(
106
+ f"{func_name} requires botorch>=0.9.0 for constrained problems, but got "
107
+ f"botorch={botorch.version.version}.\n"
108
+ "Please run ``pip install botorch --upgrade``."
109
+ )
110
+
111
+
112
+ def _get_constraint_funcs(n_constraints: int) -> list[Callable[["torch.Tensor"], "torch.Tensor"]]:
113
+ return [lambda Z: Z[..., -n_constraints + i] for i in range(n_constraints)]
114
+
115
+
116
+ # helper function
117
+ def symlog(x):
118
+ """Symmetric logarithm function.
119
+
120
+ Args:
121
+ x (torch.Tensor): Input tensor.
122
+
123
+ Returns:
124
+ torch.Tensor: The symlog of the input tensor.
125
+ """
126
+ # Apply the symlog transformation
127
+ return torch.where(
128
+ x >= 0,
129
+ torch.log(x + 1),
130
+ -torch.log(1 - x)
131
+ )
132
+
133
+
134
+ # ベースとなる獲得関数クラスに pof 係数を追加したクラスを作成する関数
135
+ def acqf_patch_factory(acqf_class, pof_config=None):
136
+ """ベース acqf クラスに pof 係数の計算を追加したクラスを作成します。
137
+
138
+ 出力されたクラスは、 set_model_c() メソッドで学習済みの
139
+ feasibility を評価するための SingleTaskGP オブジェクトを
140
+ 指定する必要があります。
141
+ """
142
+ from torch.distributions import Normal
143
+
144
+ if pof_config is None:
145
+ pof_config = PoFConfig()
146
+
147
+ # optuna_integration.botorch.botorch.qExpectedImprovement
148
+ class ACQFWithPOF(acqf_class):
149
+ """Introduces PoF coefficients for a given class of acquisition functions."""
150
+ model_c: SingleTaskGP
151
+
152
+ enable_pof: bool = pof_config.enable_pof # PoF を考慮するかどうかを規定します。
153
+ gamma: float or torch.Tensor = pof_config.gamma # PoF に対する指数です。大きいほど feasibility を重視します。0 だと PoF を考慮しません。
154
+ threshold: float or torch.Tensor = pof_config.threshold # PoF を cdf で計算する際の境界値です。0 ~ 1 が基本で、 0.5 が推奨です。大きいほど feasibility を重視します。
155
+
156
+ enable_log: bool = pof_config.enable_log # ベース獲得関数値に symlog を適用します。
157
+ enable_positive_only_pof: bool = pof_config.enable_positive_only_pof # ベース獲得関数が正のときのみ PoF を乗じます。
158
+
159
+ enable_dynamic_pof: bool = pof_config.enable_dynamic_pof # gamma を動的に変更します。 True のとき、gamma は無視されます。
160
+ enable_dynamic_threshold: bool = pof_config.enable_dynamic_threshold # threshold を動的に変更します。 True のとき、threshold は無視されます。
161
+
162
+ enable_repeat_penalty: bool = pof_config.enable_repeat_penalty # サンプル済みの点の近傍のベース獲得関数値にペナルティ係数を適用します。
163
+ _repeat_penalty: float or torch.Tensor = pof_config._repeat_penalty # enable_repeat_penalty が True のときに使用される内部変数です。
164
+
165
+ enable_dynamic_repeat_penalty: bool = pof_config.enable_dynamic_repeat_penalty # 同じ値が繰り返された場合にペナルティ係数を強化します。True の場合、enable_repeat_penalty は True として振舞います。
166
+ repeat_watch_window: int = pof_config.repeat_watch_window # enable_dynamic_repeat_penalty が True のとき、直近いくつの提案値を参照してペナルティの大きさを決めるかを既定します。
167
+ repeat_watch_norm_distance: float = pof_config.repeat_watch_norm_distance # [0, 1] で正規化されたパラメータ空間においてパラメータの提案同士のノルムがどれくらいの大きさ以下であればペナルティを強くするかを規定します。極端な値は数値不安定性を引き起こす可能性があります。
168
+ _repeat_penalty_gamma: float or torch.Tensor = pof_config._repeat_penalty_gamma # _repeat_penalty の指数で、内部変数です。
169
+
170
+
171
+ def set_model_c(self, model_c: SingleTaskGP):
172
+ self.model_c = model_c
173
+
174
+ def pof(self, X: torch.Tensor):
175
+ # 予測点の平均と標準偏差をもとにした正規分布の関数を作る
176
+ _X = X.squeeze(1)
177
+ posterior = self.model_c.posterior(_X)
178
+ mean = posterior.mean
179
+ sigma = posterior.variance.sqrt()
180
+
181
+ # 積分する
182
+ normal = Normal(mean, sigma)
183
+ # ここの閾値を true に近づけるほど厳しくなる
184
+ # true の値を超えて大きくしすぎると、多分 true も false も
185
+ # 差が出なくなる
186
+ if isinstance(self.threshold, float):
187
+ cdf = 1. - normal.cdf(torch.tensor(self.threshold, device='cpu').double())
188
+ else:
189
+ cdf = 1. - normal.cdf(self.threshold)
190
+
191
+ return cdf.squeeze(1)
192
+
193
+ def forward(self, X: torch.Tensor) -> torch.Tensor:
194
+ # ===== ベース目的関数 =====
195
+ base_acqf = super().forward(X)
196
+
197
+ # ===== 各種 dynamic 手法を使う際の共通処理 =====
198
+ if (
199
+ self.enable_dynamic_pof
200
+ or self.enable_dynamic_threshold
201
+ or self.enable_dynamic_threshold
202
+ or self.enable_repeat_penalty
203
+ or self.enable_dynamic_repeat_penalty
204
+ ):
205
+ # ===== 正規化不確実性の計算 =====
206
+ _X = X.squeeze(1) # batch x 1 x dim -> batch x dim
207
+ # X の予測標準偏差を取得する
208
+ post = self.model_c.posterior(_X)
209
+ current_stddev = post.variance.sqrt() # batch x dim
210
+ # 既知のポイントの標準偏差を取得する
211
+ post = self.model_c.posterior(self.model_c.train_inputs[0])
212
+ known_stddev = post.variance.sqrt().mean(dim=0)
213
+ # known_stddev: サンプル済みポイントの標準偏差なので小さいはず。
214
+ # current_stddev: 未知の点の標準偏差なので大きいはず。逆に、小さければ既知の点に近い。
215
+ # 既知のポイントの標準偏差で規格化し、平均を取って一次元にする
216
+ buff = current_stddev / known_stddev
217
+ norm_stddev = buff.mean(dim=1) # (batch, ), 1 ~ 100 くらいの値
218
+
219
+ # ===== 動的 gamma =====
220
+ if self.enable_dynamic_pof:
221
+ buff = 1000. / norm_stddev # 1 ~ 100 くらいの値
222
+ buff = symlog(buff) # 1 ~ 4 くらいの値?
223
+ self.gamma = buff
224
+
225
+ # ===== 動的 threshold =====
226
+ if self.enable_dynamic_threshold:
227
+ # 効きすぎる傾向?
228
+ self.threshold = (1 - torch.sigmoid(norm_stddev - 1 - 4) / 2).unsqueeze(1)
229
+
230
+ # ===== 繰り返しペナルティ =====
231
+ if self.enable_repeat_penalty:
232
+ # ベースペナルティは不確実性
233
+ # stddev が小さい
234
+ # = サンプル済み付近
235
+ # = 獲得関数を小さくしたい
236
+ # = stddev をそのまま係数にする
237
+ self._repeat_penalty = norm_stddev
238
+
239
+ # ===== 動的繰り返しペナルティ =====
240
+ if self.enable_dynamic_repeat_penalty:
241
+ # 計算コストが多くないので念のためベースペナルティを(再)定義
242
+ self._repeat_penalty = norm_stddev
243
+ # サンプル数が watch_window 以下なら何もできない
244
+ if len(self.model_c.train_inputs[0]) > self.repeat_watch_window:
245
+ # 直近 N サンプルの x のばらつきが小さいほど
246
+ # その optimize_scipy 全体でペナルティを強化する
247
+ monitor_window = self.model_c.train_inputs[0][-self.repeat_watch_window:]
248
+ g = monitor_window.mean(dim=0)
249
+ distance = torch.norm(monitor_window - g, dim=1).mean()
250
+ self._repeat_penalty_gamma = self.repeat_watch_norm_distance / distance
251
+
252
+ # ===== PoF 計算 =====
253
+ if self.enable_pof:
254
+ pof = self.pof(X)
255
+ else:
256
+ pof = 1.
257
+
258
+ # ===== その他 =====
259
+ if self.enable_log:
260
+ base_acqf = symlog(base_acqf)
261
+
262
+ if self.enable_positive_only_pof:
263
+ pof = torch.where(
264
+ base_acqf >= 0,
265
+ pof,
266
+ torch.ones_like(pof)
267
+ )
268
+
269
+ ret = -torch.log(1 - torch.sigmoid(base_acqf)) * pof ** self.gamma * self._repeat_penalty ** self._repeat_penalty_gamma
270
+ return ret
271
+
272
+ return ACQFWithPOF
273
+
274
+
275
+ # noinspection PyIncorrectDocstring
276
+ @experimental_func("3.3.0")
277
+ def logei_candidates_func(
278
+ train_x: "torch.Tensor",
279
+ train_obj: "torch.Tensor",
280
+ train_con: "torch.Tensor" | None,
281
+ bounds: "torch.Tensor",
282
+ pending_x: "torch.Tensor" | None,
283
+ model_c: "SingleTaskGP",
284
+ _constraints,
285
+ _study,
286
+ _opt,
287
+ pof_config,
288
+ ) -> "torch.Tensor":
289
+ """Log Expected Improvement (LogEI).
290
+
291
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
292
+ with single-objective optimization.
293
+
294
+ Args:
295
+ train_x:
296
+ Previous parameter configurations. A ``torch.Tensor`` of shape
297
+ ``(n_trials, n_params)``. ``n_trials`` is the number of already observed trials
298
+ and ``n_params`` is the number of parameters. ``n_params`` may be larger than the
299
+ actual number of parameters if categorical parameters are included in the search
300
+ space, since these parameters are one-hot encoded.
301
+ Values are not normalized.
302
+ train_obj:
303
+ Previously observed objectives. A ``torch.Tensor`` of shape
304
+ ``(n_trials, n_objectives)``. ``n_trials`` is identical to that of ``train_x``.
305
+ ``n_objectives`` is the number of objectives. Observations are not normalized.
306
+ train_con:
307
+ Objective constraints. A ``torch.Tensor`` of shape ``(n_trials, n_constraints)``.
308
+ ``n_trials`` is identical to that of ``train_x``. ``n_constraints`` is the number of
309
+ constraints. A constraint is violated if strictly larger than 0. If no constraints are
310
+ involved in the optimization, this argument will be :obj:`None`.
311
+ bounds:
312
+ Search space bounds. A ``torch.Tensor`` of shape ``(2, n_params)``. ``n_params`` is
313
+ identical to that of ``train_x``. The first and the second rows correspond to the
314
+ lower and upper bounds for each parameter respectively.
315
+ pending_x:
316
+ Pending parameter configurations. A ``torch.Tensor`` of shape
317
+ ``(n_pending, n_params)``. ``n_pending`` is the number of the trials which are already
318
+ suggested all their parameters but have not completed their evaluation, and
319
+ ``n_params`` is identical to that of ``train_x``.
320
+ model_c:
321
+ Feasibility model.
322
+
323
+ Returns:
324
+ Next set of candidates. Usually the return value of BoTorch's ``optimize_acqf``.
325
+
326
+ """
327
+
328
+ # We need botorch >=0.8.1 for LogExpectedImprovement.
329
+ if not _imports_logei.is_successful():
330
+ raise ImportError(
331
+ "logei_candidates_func requires botorch >=0.8.1. "
332
+ "Please upgrade botorch or use qei_candidates_func as candidates_func instead."
333
+ )
334
+
335
+ if train_obj.size(-1) != 1:
336
+ raise ValueError("Objective may only contain single values with logEI.")
337
+ n_constraints = train_con.size(1) if train_con is not None else 0
338
+ if n_constraints > 0:
339
+ assert train_con is not None
340
+ train_y = torch.cat([train_obj, train_con], dim=-1)
341
+
342
+ is_feas = (train_con <= 0).all(dim=-1)
343
+ train_obj_feas = train_obj[is_feas]
344
+
345
+ if train_obj_feas.numel() == 0:
346
+ _logger.warning(
347
+ "No objective values are feasible. Using 0 as the best objective in logEI."
348
+ )
349
+ best_f = train_obj.min()
350
+ else:
351
+ best_f = train_obj_feas.max()
352
+
353
+ else:
354
+ train_y = train_obj
355
+ best_f = train_obj.max()
356
+
357
+ train_x = normalize(train_x, bounds=bounds)
358
+
359
+ model = SingleTaskGP(
360
+ train_x,
361
+ train_y,
362
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
363
+ outcome_transform=Standardize(m=train_y.size(-1))
364
+ )
365
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
366
+ fit_gpytorch_mll(mll)
367
+ if n_constraints > 0:
368
+ ACQF = acqf_patch_factory(LogConstrainedExpectedImprovement, pof_config)
369
+ acqf = ACQF(
370
+ model=model,
371
+ best_f=best_f,
372
+ objective_index=0,
373
+ constraints={i: (None, 0.0) for i in range(1, n_constraints + 1)},
374
+ )
375
+ else:
376
+ ACQF = acqf_patch_factory(LogExpectedImprovement, pof_config)
377
+ acqf = ACQF(
378
+ model=model,
379
+ best_f=best_f,
380
+ )
381
+ acqf.set_model_c(model_c)
382
+
383
+ standard_bounds = torch.zeros_like(bounds)
384
+ standard_bounds[1] = 1
385
+
386
+ # optimize_acqf の探索に parameter constraints を追加します。
387
+ if len(_constraints) > 0:
388
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
389
+
390
+ # 1, batch_limit, nonlinear_..., ic_generator
391
+ kwargs = nc.create_kwargs()
392
+ q = kwargs.pop('q')
393
+ batch_limit = kwargs.pop('options')["batch_limit"]
394
+
395
+ candidates, _ = optimize_acqf(
396
+ acq_function=acqf,
397
+ bounds=standard_bounds,
398
+ q=q,
399
+ num_restarts=10,
400
+ raw_samples=512,
401
+ options={"batch_limit": batch_limit, "maxiter": 200},
402
+ sequential=True,
403
+ **kwargs
404
+ )
405
+
406
+ else:
407
+ candidates, _ = optimize_acqf(
408
+ acq_function=acqf,
409
+ bounds=standard_bounds,
410
+ q=1,
411
+ num_restarts=10,
412
+ raw_samples=512,
413
+ options={"batch_limit": 5, "maxiter": 200},
414
+ sequential=True,
415
+ )
416
+
417
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
418
+
419
+ return candidates
420
+
421
+
422
+ # noinspection PyIncorrectDocstring
423
+ @experimental_func("2.4.0")
424
+ def qei_candidates_func(
425
+ train_x: "torch.Tensor",
426
+ train_obj: "torch.Tensor",
427
+ train_con: "torch.Tensor" | None,
428
+ bounds: "torch.Tensor",
429
+ pending_x: "torch.Tensor" | None,
430
+ model_c: "SingleTaskGP",
431
+ _constraints,
432
+ _study,
433
+ _opt,
434
+ pof_config,
435
+ ) -> "torch.Tensor":
436
+ """Quasi MC-based batch Expected Improvement (qEI).
437
+
438
+ Args:
439
+ train_x:
440
+ Previous parameter configurations. A ``torch.Tensor`` of shape
441
+ ``(n_trials, n_params)``. ``n_trials`` is the number of already observed trials
442
+ and ``n_params`` is the number of parameters. ``n_params`` may be larger than the
443
+ actual number of parameters if categorical parameters are included in the search
444
+ space, since these parameters are one-hot encoded.
445
+ Values are not normalized.
446
+ train_obj:
447
+ Previously observed objectives. A ``torch.Tensor`` of shape
448
+ ``(n_trials, n_objectives)``. ``n_trials`` is identical to that of ``train_x``.
449
+ ``n_objectives`` is the number of objectives. Observations are not normalized.
450
+ train_con:
451
+ Objective constraints. A ``torch.Tensor`` of shape ``(n_trials, n_constraints)``.
452
+ ``n_trials`` is identical to that of ``train_x``. ``n_constraints`` is the number of
453
+ constraints. A constraint is violated if strictly larger than 0. If no constraints are
454
+ involved in the optimization, this argument will be :obj:`None`.
455
+ bounds:
456
+ Search space bounds. A ``torch.Tensor`` of shape ``(2, n_params)``. ``n_params`` is
457
+ identical to that of ``train_x``. The first and the second rows correspond to the
458
+ lower and upper bounds for each parameter respectively.
459
+ pending_x:
460
+ Pending parameter configurations. A ``torch.Tensor`` of shape
461
+ ``(n_pending, n_params)``. ``n_pending`` is the number of the trials which are already
462
+ suggested all their parameters but have not completed their evaluation, and
463
+ ``n_params`` is identical to that of ``train_x``.
464
+ model_c:
465
+ Feasibility model.
466
+ Returns:
467
+ Next set of candidates. Usually the return value of BoTorch's ``optimize_acqf``.
468
+
469
+ """
470
+
471
+ if train_obj.size(-1) != 1:
472
+ raise ValueError("Objective may only contain single values with qEI.")
473
+ if train_con is not None:
474
+ _validate_botorch_version_for_constrained_opt("qei_candidates_func")
475
+ train_y = torch.cat([train_obj, train_con], dim=-1)
476
+
477
+ is_feas = (train_con <= 0).all(dim=-1)
478
+ train_obj_feas = train_obj[is_feas]
479
+
480
+ if train_obj_feas.numel() == 0:
481
+ # TODO(hvy): Do not use 0 as the best observation.
482
+ _logger.warning(
483
+ "No objective values are feasible. Using 0 as the best objective in qEI."
484
+ )
485
+ best_f = torch.zeros(())
486
+ else:
487
+ best_f = train_obj_feas.max()
488
+
489
+ n_constraints = train_con.size(1)
490
+ additonal_qei_kwargs = {
491
+ "objective": GenericMCObjective(lambda Z, X: Z[..., 0]),
492
+ "constraints": _get_constraint_funcs(n_constraints),
493
+ }
494
+ else:
495
+ train_y = train_obj
496
+
497
+ best_f = train_obj.max()
498
+
499
+ additonal_qei_kwargs = {}
500
+
501
+ train_x = normalize(train_x, bounds=bounds)
502
+ if pending_x is not None:
503
+ pending_x = normalize(pending_x, bounds=bounds)
504
+
505
+ model = SingleTaskGP(
506
+ train_x,
507
+ train_y,
508
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
509
+ outcome_transform=Standardize(m=train_y.size(-1))
510
+ )
511
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
512
+ fit_gpytorch_mll(mll)
513
+
514
+ ACQF = acqf_patch_factory(qExpectedImprovement, pof_config)
515
+ acqf = ACQF(
516
+ model=model,
517
+ best_f=best_f,
518
+ sampler=_get_sobol_qmc_normal_sampler(256),
519
+ X_pending=pending_x,
520
+ **additonal_qei_kwargs,
521
+ )
522
+ acqf.set_model_c(model_c)
523
+
524
+ standard_bounds = torch.zeros_like(bounds)
525
+ standard_bounds[1] = 1
526
+
527
+ if len(_constraints) > 0:
528
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
529
+
530
+ # 1, batch_limit, nonlinear_..., ic_generator
531
+ kwargs = nc.create_kwargs()
532
+ q = kwargs.pop('q')
533
+ batch_limit = kwargs.pop('options')["batch_limit"]
534
+
535
+ candidates, _ = optimize_acqf(
536
+ acq_function=acqf,
537
+ bounds=standard_bounds,
538
+ q=q,
539
+ num_restarts=10,
540
+ raw_samples=512,
541
+ options={"batch_limit": batch_limit, "maxiter": 200},
542
+ sequential=True,
543
+ **kwargs
544
+ )
545
+
546
+ else:
547
+
548
+ candidates, _ = optimize_acqf(
549
+ acq_function=acqf,
550
+ bounds=standard_bounds,
551
+ q=1,
552
+ num_restarts=10,
553
+ raw_samples=512,
554
+ options={"batch_limit": 5, "maxiter": 200},
555
+ sequential=True,
556
+ )
557
+
558
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
559
+
560
+ return candidates
561
+
562
+
563
+ # noinspection PyIncorrectDocstring
564
+ @experimental_func("3.3.0")
565
+ def qnei_candidates_func(
566
+ train_x: "torch.Tensor",
567
+ train_obj: "torch.Tensor",
568
+ train_con: "torch.Tensor" | None,
569
+ bounds: "torch.Tensor",
570
+ pending_x: "torch.Tensor" | None,
571
+ model_c: "SingleTaskGP",
572
+ _constraints,
573
+ _study,
574
+ _opt,
575
+ pof_config,
576
+ ) -> "torch.Tensor":
577
+ """Quasi MC-based batch Noisy Expected Improvement (qNEI).
578
+
579
+ This function may perform better than qEI (`qei_candidates_func`) when
580
+ the evaluated values of objective function are noisy.
581
+
582
+ .. seealso::
583
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
584
+ descriptions.
585
+ """
586
+ if train_obj.size(-1) != 1:
587
+ raise ValueError("Objective may only contain single values with qNEI.")
588
+ if train_con is not None:
589
+ _validate_botorch_version_for_constrained_opt("qnei_candidates_func")
590
+ train_y = torch.cat([train_obj, train_con], dim=-1)
591
+
592
+ n_constraints = train_con.size(1)
593
+ additional_qnei_kwargs = {
594
+ "objective": GenericMCObjective(lambda Z, X: Z[..., 0]),
595
+ "constraints": _get_constraint_funcs(n_constraints),
596
+ }
597
+ else:
598
+ train_y = train_obj
599
+
600
+ additional_qnei_kwargs = {}
601
+
602
+ train_x = normalize(train_x, bounds=bounds)
603
+ if pending_x is not None:
604
+ pending_x = normalize(pending_x, bounds=bounds)
605
+
606
+ model = SingleTaskGP(
607
+ train_x,
608
+ train_y,
609
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
610
+ outcome_transform=Standardize(m=train_y.size(-1))
611
+ )
612
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
613
+ fit_gpytorch_mll(mll)
614
+
615
+ ACQF = acqf_patch_factory(qNoisyExpectedImprovement, pof_config)
616
+ acqf = ACQF(
617
+ model=model,
618
+ X_baseline=train_x,
619
+ sampler=_get_sobol_qmc_normal_sampler(256),
620
+ X_pending=pending_x,
621
+ **additional_qnei_kwargs,
622
+ )
623
+ acqf.set_model_c(model_c)
624
+
625
+ standard_bounds = torch.zeros_like(bounds)
626
+ standard_bounds[1] = 1
627
+
628
+ # optimize_acqf の探索に parameter constraints を追加します。
629
+ if len(_constraints) > 0:
630
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
631
+
632
+ # 1, batch_limit, nonlinear_..., ic_generator
633
+ kwargs = nc.create_kwargs()
634
+ q = kwargs.pop('q')
635
+ batch_limit = kwargs.pop('options')["batch_limit"]
636
+
637
+ candidates, _ = optimize_acqf(
638
+ acq_function=acqf,
639
+ bounds=standard_bounds,
640
+ q=q,
641
+ num_restarts=10,
642
+ raw_samples=512,
643
+ options={"batch_limit": batch_limit, "maxiter": 200},
644
+ sequential=True,
645
+ **kwargs
646
+ )
647
+
648
+ else:
649
+ candidates, _ = optimize_acqf(
650
+ acq_function=acqf,
651
+ bounds=standard_bounds,
652
+ q=1,
653
+ num_restarts=10,
654
+ raw_samples=512,
655
+ options={"batch_limit": 5, "maxiter": 200},
656
+ sequential=True,
657
+ )
658
+
659
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
660
+
661
+ return candidates
662
+
663
+
664
+ # noinspection PyIncorrectDocstring
665
+ @experimental_func("2.4.0")
666
+ def qehvi_candidates_func(
667
+ train_x: "torch.Tensor",
668
+ train_obj: "torch.Tensor",
669
+ train_con: "torch.Tensor" | None,
670
+ bounds: "torch.Tensor",
671
+ pending_x: "torch.Tensor" | None,
672
+ model_c: "SingleTaskGP",
673
+ _constraints,
674
+ _study,
675
+ _opt,
676
+ pof_config,
677
+ ) -> "torch.Tensor":
678
+ """Quasi MC-based batch Expected Hypervolume Improvement (qEHVI).
679
+
680
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
681
+ with multi-objective optimization when the number of objectives is three or less.
682
+
683
+ .. seealso::
684
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
685
+ descriptions.
686
+ """
687
+
688
+ n_objectives = train_obj.size(-1)
689
+
690
+ if train_con is not None:
691
+ train_y = torch.cat([train_obj, train_con], dim=-1)
692
+
693
+ is_feas = (train_con <= 0).all(dim=-1)
694
+ train_obj_feas = train_obj[is_feas]
695
+
696
+ n_constraints = train_con.size(1)
697
+ additional_qehvi_kwargs = {
698
+ "objective": IdentityMCMultiOutputObjective(outcomes=list(range(n_objectives))),
699
+ "constraints": _get_constraint_funcs(n_constraints),
700
+ }
701
+ else:
702
+ train_y = train_obj
703
+
704
+ train_obj_feas = train_obj
705
+
706
+ additional_qehvi_kwargs = {}
707
+
708
+ train_x = normalize(train_x, bounds=bounds)
709
+ if pending_x is not None:
710
+ pending_x = normalize(pending_x, bounds=bounds)
711
+
712
+ model = SingleTaskGP(
713
+ train_x,
714
+ train_y,
715
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
716
+ outcome_transform=Standardize(m=train_y.size(-1))
717
+ )
718
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
719
+ fit_gpytorch_mll(mll)
720
+
721
+ # Approximate box decomposition similar to Ax when the number of objectives is large.
722
+ # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
723
+ if n_objectives > 4:
724
+ alpha = 10 ** (-8 + n_objectives)
725
+ else:
726
+ alpha = 0.0
727
+
728
+ ref_point = train_obj.min(dim=0).values - 1e-8
729
+
730
+ partitioning = NondominatedPartitioning(ref_point=ref_point, Y=train_obj_feas, alpha=alpha)
731
+
732
+ ref_point_list = ref_point.tolist()
733
+
734
+ ACQF = acqf_patch_factory(monte_carlo.qExpectedHypervolumeImprovement, pof_config)
735
+ acqf = ACQF(
736
+ model=model,
737
+ ref_point=ref_point_list,
738
+ partitioning=partitioning,
739
+ sampler=_get_sobol_qmc_normal_sampler(256),
740
+ X_pending=pending_x,
741
+ **additional_qehvi_kwargs,
742
+ )
743
+ acqf.set_model_c(model_c)
744
+
745
+ standard_bounds = torch.zeros_like(bounds)
746
+ standard_bounds[1] = 1
747
+
748
+ # optimize_acqf の探索に parameter constraints を追加します。
749
+ if len(_constraints) > 0:
750
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
751
+
752
+ # 1, batch_limit, nonlinear_..., ic_generator
753
+ kwargs = nc.create_kwargs()
754
+ q = kwargs.pop('q')
755
+ batch_limit = kwargs.pop('options')["batch_limit"]
756
+
757
+ candidates, _ = optimize_acqf(
758
+ acq_function=acqf,
759
+ bounds=standard_bounds,
760
+ q=q,
761
+ num_restarts=20,
762
+ raw_samples=1024,
763
+ options={"batch_limit": batch_limit, "maxiter": 200, "nonnegative": True},
764
+ sequential=True,
765
+ **kwargs
766
+ )
767
+
768
+ else:
769
+ candidates, _ = optimize_acqf(
770
+ acq_function=acqf,
771
+ bounds=standard_bounds,
772
+ q=1,
773
+ num_restarts=20,
774
+ raw_samples=1024,
775
+ options={"batch_limit": 5, "maxiter": 200, "nonnegative": True},
776
+ sequential=True,
777
+ )
778
+
779
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
780
+
781
+ return candidates
782
+
783
+
784
+ # noinspection PyIncorrectDocstring
785
+ @experimental_func("3.5.0")
786
+ def ehvi_candidates_func(
787
+ train_x: "torch.Tensor",
788
+ train_obj: "torch.Tensor",
789
+ train_con: "torch.Tensor" | None,
790
+ bounds: "torch.Tensor",
791
+ pending_x: "torch.Tensor" | None,
792
+ model_c: "SingleTaskGP",
793
+ _constraints,
794
+ _study,
795
+ _opt,
796
+ pof_config,
797
+ ) -> "torch.Tensor":
798
+ """Expected Hypervolume Improvement (EHVI).
799
+
800
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
801
+ with multi-objective optimization without constraints.
802
+
803
+ .. seealso::
804
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
805
+ descriptions.
806
+ """
807
+
808
+ n_objectives = train_obj.size(-1)
809
+ if train_con is not None:
810
+ raise ValueError("Constraints are not supported with ehvi_candidates_func.")
811
+
812
+ train_y = train_obj
813
+ train_x = normalize(train_x, bounds=bounds)
814
+
815
+ model = SingleTaskGP(
816
+ train_x,
817
+ train_y,
818
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
819
+ outcome_transform=Standardize(m=train_y.size(-1))
820
+ )
821
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
822
+ fit_gpytorch_mll(mll)
823
+
824
+ # Approximate box decomposition similar to Ax when the number of objectives is large.
825
+ # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
826
+ if n_objectives > 4:
827
+ alpha = 10 ** (-8 + n_objectives)
828
+ else:
829
+ alpha = 0.0
830
+
831
+ ref_point = train_obj.min(dim=0).values - 1e-8
832
+
833
+ partitioning = NondominatedPartitioning(ref_point=ref_point, Y=train_y, alpha=alpha)
834
+
835
+ ref_point_list = ref_point.tolist()
836
+
837
+ ACQF = acqf_patch_factory(ExpectedHypervolumeImprovement)
838
+ acqf = ACQF(
839
+ model=model,
840
+ ref_point=ref_point_list,
841
+ partitioning=partitioning,
842
+ )
843
+ acqf.set_model_c(model_c)
844
+ standard_bounds = torch.zeros_like(bounds)
845
+ standard_bounds[1] = 1
846
+
847
+ # optimize_acqf の探索に parameter constraints を追加します。
848
+ if len(_constraints) > 0:
849
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
850
+
851
+ # 1, batch_limit, nonlinear_..., ic_generator
852
+ kwargs = nc.create_kwargs()
853
+ q = kwargs.pop('q')
854
+ batch_limit = kwargs.pop('options')["batch_limit"]
855
+
856
+ candidates, _ = optimize_acqf(
857
+ acq_function=acqf,
858
+ bounds=standard_bounds,
859
+ q=q,
860
+ num_restarts=10,
861
+ raw_samples=512,
862
+ options={"batch_limit": batch_limit, "maxiter": 200},
863
+ sequential=True,
864
+ **kwargs
865
+ )
866
+
867
+ else:
868
+ candidates, _ = optimize_acqf(
869
+ acq_function=acqf,
870
+ bounds=standard_bounds,
871
+ q=1,
872
+ num_restarts=20,
873
+ raw_samples=1024,
874
+ options={"batch_limit": 5, "maxiter": 200},
875
+ sequential=True,
876
+ )
877
+
878
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
879
+
880
+ return candidates
881
+
882
+
883
+ # noinspection PyIncorrectDocstring
884
+ @experimental_func("3.1.0")
885
+ def qnehvi_candidates_func(
886
+ train_x: "torch.Tensor",
887
+ train_obj: "torch.Tensor",
888
+ train_con: "torch.Tensor" | None,
889
+ bounds: "torch.Tensor",
890
+ pending_x: "torch.Tensor" | None,
891
+ model_c: "SingleTaskGP",
892
+ _constraints,
893
+ _study,
894
+ _opt,
895
+ pof_config,
896
+ ) -> "torch.Tensor":
897
+ """Quasi MC-based batch Noisy Expected Hypervolume Improvement (qNEHVI).
898
+
899
+ According to Botorch/Ax documentation,
900
+ this function may perform better than qEHVI (`qehvi_candidates_func`).
901
+ (cf. https://botorch.org/tutorials/constrained_multi_objective_bo )
902
+
903
+ .. seealso::
904
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
905
+ descriptions.
906
+ """
907
+
908
+ n_objectives = train_obj.size(-1)
909
+
910
+ if train_con is not None:
911
+ train_y = torch.cat([train_obj, train_con], dim=-1)
912
+
913
+ n_constraints = train_con.size(1)
914
+ additional_qnehvi_kwargs = {
915
+ "objective": IdentityMCMultiOutputObjective(outcomes=list(range(n_objectives))),
916
+ "constraints": _get_constraint_funcs(n_constraints),
917
+ }
918
+ else:
919
+ train_y = train_obj
920
+
921
+ additional_qnehvi_kwargs = {}
922
+
923
+ train_x = normalize(train_x, bounds=bounds)
924
+ if pending_x is not None:
925
+ pending_x = normalize(pending_x, bounds=bounds)
926
+
927
+ model = SingleTaskGP(
928
+ train_x,
929
+ train_y,
930
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
931
+ outcome_transform=Standardize(m=train_y.size(-1))
932
+ )
933
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
934
+ fit_gpytorch_mll(mll)
935
+
936
+ # Approximate box decomposition similar to Ax when the number of objectives is large.
937
+ # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
938
+ if n_objectives > 4:
939
+ alpha = 10 ** (-8 + n_objectives)
940
+ else:
941
+ alpha = 0.0
942
+
943
+ ref_point = train_obj.min(dim=0).values - 1e-8
944
+
945
+ ref_point_list = ref_point.tolist()
946
+
947
+ # prune_baseline=True is generally recommended by the documentation of BoTorch.
948
+ # cf. https://botorch.org/api/acquisition.html (accessed on 2022/11/18)
949
+ ACQF = acqf_patch_factory(monte_carlo.qNoisyExpectedHypervolumeImprovement, pof_config)
950
+ acqf = ACQF(
951
+ model=model,
952
+ ref_point=ref_point_list,
953
+ X_baseline=train_x,
954
+ alpha=alpha,
955
+ prune_baseline=True,
956
+ sampler=_get_sobol_qmc_normal_sampler(256),
957
+ X_pending=pending_x,
958
+ **additional_qnehvi_kwargs,
959
+ )
960
+ acqf.set_model_c(model_c)
961
+
962
+ standard_bounds = torch.zeros_like(bounds)
963
+ standard_bounds[1] = 1
964
+
965
+ # optimize_acqf の探索に parameter constraints を追加します。
966
+ if len(_constraints) > 0:
967
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
968
+
969
+ # 1, batch_limit, nonlinear_..., ic_generator
970
+ kwargs = nc.create_kwargs()
971
+ q = kwargs.pop('q')
972
+ batch_limit = kwargs.pop('options')["batch_limit"]
973
+
974
+ candidates, _ = optimize_acqf(
975
+ acq_function=acqf,
976
+ bounds=standard_bounds,
977
+ q=q,
978
+ num_restarts=20,
979
+ raw_samples=1024,
980
+ options={"batch_limit": batch_limit, "maxiter": 200, "nonnegative": True},
981
+ sequential=True,
982
+ **kwargs
983
+ )
984
+
985
+ else:
986
+ candidates, _ = optimize_acqf(
987
+ acq_function=acqf,
988
+ bounds=standard_bounds,
989
+ q=1,
990
+ num_restarts=20,
991
+ raw_samples=1024,
992
+ options={"batch_limit": 5, "maxiter": 200, "nonnegative": True},
993
+ sequential=True,
994
+ )
995
+
996
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
997
+
998
+ return candidates
999
+
1000
+
1001
+ # noinspection PyIncorrectDocstring
1002
+ @experimental_func("2.4.0")
1003
+ def qparego_candidates_func(
1004
+ train_x: "torch.Tensor",
1005
+ train_obj: "torch.Tensor",
1006
+ train_con: "torch.Tensor" | None,
1007
+ bounds: "torch.Tensor",
1008
+ pending_x: "torch.Tensor" | None,
1009
+ model_c: "SingleTaskGP",
1010
+ _constraints,
1011
+ _study,
1012
+ _opt,
1013
+ pof_config,
1014
+ ) -> "torch.Tensor":
1015
+ """Quasi MC-based extended ParEGO (qParEGO) for constrained multi-objective optimization.
1016
+
1017
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
1018
+ with multi-objective optimization when the number of objectives is larger than three.
1019
+
1020
+ .. seealso::
1021
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
1022
+ descriptions.
1023
+ """
1024
+
1025
+ n_objectives = train_obj.size(-1)
1026
+
1027
+ weights = sample_simplex(n_objectives).squeeze()
1028
+ scalarization = get_chebyshev_scalarization(weights=weights, Y=train_obj)
1029
+
1030
+ if train_con is not None:
1031
+ _validate_botorch_version_for_constrained_opt("qparego_candidates_func")
1032
+ train_y = torch.cat([train_obj, train_con], dim=-1)
1033
+ n_constraints = train_con.size(1)
1034
+ objective = GenericMCObjective(lambda Z, X: scalarization(Z[..., :n_objectives]))
1035
+ additional_qei_kwargs = {
1036
+ "constraints": _get_constraint_funcs(n_constraints),
1037
+ }
1038
+ else:
1039
+ train_y = train_obj
1040
+
1041
+ objective = GenericMCObjective(scalarization)
1042
+ additional_qei_kwargs = {}
1043
+
1044
+ train_x = normalize(train_x, bounds=bounds)
1045
+ if pending_x is not None:
1046
+ pending_x = normalize(pending_x, bounds=bounds)
1047
+
1048
+ model = SingleTaskGP(
1049
+ train_x,
1050
+ train_y,
1051
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
1052
+ outcome_transform=Standardize(m=train_y.size(-1))
1053
+ )
1054
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
1055
+ fit_gpytorch_mll(mll)
1056
+
1057
+ ACQF = acqf_patch_factory(qExpectedImprovement, pof_config)
1058
+ acqf = ACQF(
1059
+ model=model,
1060
+ best_f=objective(train_y).max(),
1061
+ sampler=_get_sobol_qmc_normal_sampler(256),
1062
+ objective=objective,
1063
+ X_pending=pending_x,
1064
+ **additional_qei_kwargs,
1065
+ )
1066
+ acqf.set_model_c(model_c)
1067
+
1068
+ standard_bounds = torch.zeros_like(bounds)
1069
+ standard_bounds[1] = 1
1070
+
1071
+ # optimize_acqf の探索に parameter constraints を追加します。
1072
+ if len(_constraints) > 0:
1073
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
1074
+
1075
+ # 1, batch_limit, nonlinear_..., ic_generator
1076
+ kwargs = nc.create_kwargs()
1077
+ q = kwargs.pop('q')
1078
+ batch_limit = kwargs.pop('options')["batch_limit"]
1079
+
1080
+ candidates, _ = optimize_acqf(
1081
+ acq_function=acqf,
1082
+ bounds=standard_bounds,
1083
+ q=q,
1084
+ num_restarts=20,
1085
+ raw_samples=1024,
1086
+ options={"batch_limit": batch_limit, "maxiter": 200},
1087
+ sequential=True,
1088
+ **kwargs
1089
+ )
1090
+
1091
+ else:
1092
+ candidates, _ = optimize_acqf(
1093
+ acq_function=acqf,
1094
+ bounds=standard_bounds,
1095
+ q=1,
1096
+ num_restarts=20,
1097
+ raw_samples=1024,
1098
+ options={"batch_limit": 5, "maxiter": 200},
1099
+ sequential=True,
1100
+ )
1101
+
1102
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
1103
+
1104
+ return candidates
1105
+
1106
+
1107
+ @experimental_func("4.0.0")
1108
+ def qkg_candidates_func(
1109
+ train_x: "torch.Tensor",
1110
+ train_obj: "torch.Tensor",
1111
+ train_con: "torch.Tensor" | None,
1112
+ bounds: "torch.Tensor",
1113
+ pending_x: "torch.Tensor" | None,
1114
+ model_c: "SingleTaskGP",
1115
+ _constraints,
1116
+ _study,
1117
+ _opt,
1118
+ pof_config,
1119
+ ) -> "torch.Tensor":
1120
+ """Quasi MC-based batch Knowledge Gradient (qKG).
1121
+
1122
+ According to Botorch/Ax documentation,
1123
+ this function may perform better than qEI (`qei_candidates_func`).
1124
+ (cf. https://botorch.org/tutorials/one_shot_kg )
1125
+
1126
+ .. seealso::
1127
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
1128
+ descriptions.
1129
+
1130
+ """
1131
+
1132
+ if train_obj.size(-1) != 1:
1133
+ raise ValueError("Objective may only contain single values with qKG.")
1134
+ if train_con is not None:
1135
+ train_y = torch.cat([train_obj, train_con], dim=-1)
1136
+ n_constraints = train_con.size(1)
1137
+ objective = ConstrainedMCObjective(
1138
+ objective=lambda Z, X: Z[..., 0],
1139
+ constraints=_get_constraint_funcs(n_constraints),
1140
+ )
1141
+ else:
1142
+ train_y = train_obj
1143
+ objective = None # Using the default identity objective.
1144
+
1145
+ train_x = normalize(train_x, bounds=bounds)
1146
+ if pending_x is not None:
1147
+ pending_x = normalize(pending_x, bounds=bounds)
1148
+
1149
+ model = SingleTaskGP(
1150
+ train_x,
1151
+ train_y,
1152
+ train_Yvar=1e-4*torch.ones_like(train_y) if _get_use_fixed_noise() else None,
1153
+ outcome_transform=Standardize(m=train_y.size(-1))
1154
+ )
1155
+ mll = ExactMarginalLogLikelihood(model.likelihood, model)
1156
+ fit_gpytorch_mll(mll)
1157
+
1158
+ ACQF = acqf_patch_factory(qKnowledgeGradient, pof_config)
1159
+ acqf = ACQF(
1160
+ model=model,
1161
+ num_fantasies=256,
1162
+ objective=objective,
1163
+ X_pending=pending_x,
1164
+ )
1165
+ acqf.set_model_c(model_c)
1166
+
1167
+ standard_bounds = torch.zeros_like(bounds)
1168
+ standard_bounds[1] = 1
1169
+
1170
+ # optimize_acqf の探索に parameter constraints を追加します。
1171
+ if len(_constraints) > 0:
1172
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
1173
+
1174
+ # 1, batch_limit, nonlinear_..., ic_generator
1175
+ kwargs = nc.create_kwargs()
1176
+ q = kwargs.pop('q')
1177
+ batch_limit = kwargs.pop('options')["batch_limit"]
1178
+
1179
+ candidates, _ = optimize_acqf(
1180
+ acq_function=acqf,
1181
+ bounds=standard_bounds,
1182
+ q=q,
1183
+ num_restarts=10,
1184
+ raw_samples=512,
1185
+ options={"batch_limit": batch_limit, "maxiter": 200},
1186
+ sequential=True,
1187
+ **kwargs
1188
+ )
1189
+
1190
+ else:
1191
+ candidates, _ = optimize_acqf(
1192
+ acq_function=acqf,
1193
+ bounds=standard_bounds,
1194
+ q=1,
1195
+ num_restarts=10,
1196
+ raw_samples=512,
1197
+ options={"batch_limit": 8, "maxiter": 200},
1198
+ sequential=True,
1199
+ )
1200
+
1201
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
1202
+
1203
+ return candidates
1204
+
1205
+
1206
+ # noinspection PyIncorrectDocstring,SpellCheckingInspection
1207
+ @experimental_func("4.0.0")
1208
+ def qhvkg_candidates_func(
1209
+ train_x: "torch.Tensor",
1210
+ train_obj: "torch.Tensor",
1211
+ train_con: "torch.Tensor" | None,
1212
+ bounds: "torch.Tensor",
1213
+ pending_x: "torch.Tensor" | None,
1214
+ model_c: "SingleTaskGP",
1215
+ _constraints,
1216
+ _study,
1217
+ _opt,
1218
+ pof_config,
1219
+ ) -> "torch.Tensor":
1220
+ """Quasi MC-based batch Hypervolume Knowledge Gradient (qHVKG).
1221
+
1222
+ According to Botorch/Ax documentation,
1223
+ this function may perform better than qEHVI (`qehvi_candidates_func`).
1224
+ (cf. https://botorch.org/tutorials/decoupled_mobo )
1225
+
1226
+ .. seealso::
1227
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
1228
+ descriptions.
1229
+ """
1230
+
1231
+ # We need botorch >=0.9.5 for qHypervolumeKnowledgeGradient.
1232
+ if not _imports_qhvkg.is_successful():
1233
+ raise ImportError(
1234
+ "qhvkg_candidates_func requires botorch >=0.9.5. "
1235
+ "Please upgrade botorch or use qehvi_candidates_func as candidates_func instead."
1236
+ )
1237
+
1238
+ if train_con is not None:
1239
+ train_y = torch.cat([train_obj, train_con], dim=-1)
1240
+ else:
1241
+ train_y = train_obj
1242
+
1243
+ train_x = normalize(train_x, bounds=bounds)
1244
+ if pending_x is not None:
1245
+ pending_x = normalize(pending_x, bounds=bounds)
1246
+
1247
+ models = [
1248
+ SingleTaskGP(
1249
+ train_x,
1250
+ train_y[..., [i]],
1251
+ train_Yvar=1e-4*torch.ones_like(train_y[..., [i]]) if _get_use_fixed_noise() else None,
1252
+ outcome_transform=Standardize(m=1)
1253
+ )
1254
+ for i in range(train_y.shape[-1])
1255
+ ]
1256
+ model = ModelListGP(*models)
1257
+ mll = SumMarginalLogLikelihood(model.likelihood, model)
1258
+ fit_gpytorch_mll(mll)
1259
+
1260
+ n_constraints = train_con.size(1) if train_con is not None else 0
1261
+ objective = FeasibilityWeightedMCMultiOutputObjective(
1262
+ model,
1263
+ X_baseline=train_x,
1264
+ constraint_idcs=[-n_constraints + i for i in range(n_constraints)],
1265
+ )
1266
+
1267
+ ref_point = train_obj.min(dim=0).values - 1e-8
1268
+
1269
+ ACQF = acqf_patch_factory(qHypervolumeKnowledgeGradient, pof_config)
1270
+ acqf = ACQF(
1271
+ model=model,
1272
+ ref_point=ref_point,
1273
+ num_fantasies=16,
1274
+ X_pending=pending_x,
1275
+ objective=objective,
1276
+ sampler=ListSampler(
1277
+ *[
1278
+ SobolQMCNormalSampler(sample_shape=torch.Size([16]))
1279
+ for _ in range(model.num_outputs)
1280
+ ]
1281
+ ),
1282
+ inner_sampler=SobolQMCNormalSampler(sample_shape=torch.Size([32])),
1283
+ )
1284
+ acqf.set_model_c(model_c)
1285
+
1286
+ standard_bounds = torch.zeros_like(bounds)
1287
+ standard_bounds[1] = 1
1288
+
1289
+ # optimize_acqf の探索に parameter constraints を追加します。
1290
+ if len(_constraints) > 0:
1291
+ nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
1292
+
1293
+ # 1, batch_limit, nonlinear_..., ic_generator
1294
+ kwargs = nc.create_kwargs()
1295
+ q = kwargs.pop('q')
1296
+ batch_limit = kwargs.pop('options')["batch_limit"]
1297
+
1298
+ candidates, _ = optimize_acqf(
1299
+ acq_function=acqf,
1300
+ bounds=standard_bounds,
1301
+ q=q,
1302
+ num_restarts=1,
1303
+ raw_samples=1024,
1304
+ options={"batch_limit": batch_limit, "maxiter": 200, "nonnegative": True},
1305
+ sequential=True,
1306
+ **kwargs
1307
+ )
1308
+
1309
+ else:
1310
+ candidates, _ = optimize_acqf(
1311
+ acq_function=acqf,
1312
+ bounds=standard_bounds,
1313
+ q=1,
1314
+ num_restarts=1,
1315
+ raw_samples=1024,
1316
+ options={"batch_limit": 4, "maxiter": 200, "nonnegative": True},
1317
+ sequential=False,
1318
+ )
1319
+
1320
+ candidates = unnormalize(candidates.detach(), bounds=bounds)
1321
+
1322
+ return candidates
1323
+
1324
+
1325
+ def _get_default_candidates_func(
1326
+ n_objectives: int,
1327
+ has_constraint: bool,
1328
+ consider_running_trials: bool,
1329
+ ) -> Callable[
1330
+ [
1331
+ "torch.Tensor",
1332
+ "torch.Tensor",
1333
+ "torch.Tensor" | None,
1334
+ "torch.Tensor",
1335
+ "torch.Tensor" | None,
1336
+ "SingleTaskGP",
1337
+ "list[Constraint]",
1338
+ "Study",
1339
+ "OptunaOptimizer",
1340
+ "PoFConfig",
1341
+ ],
1342
+ "torch.Tensor",
1343
+ ]:
1344
+ if n_objectives > 3 and not has_constraint and not consider_running_trials:
1345
+ return ehvi_candidates_func
1346
+ elif n_objectives > 3:
1347
+ return qparego_candidates_func
1348
+ elif n_objectives > 1:
1349
+ return qehvi_candidates_func
1350
+ elif consider_running_trials:
1351
+ return qei_candidates_func
1352
+ else:
1353
+ return logei_candidates_func
1354
+
1355
+
1356
+ # ===== main re-implementation of BoTorchSampler =====
1357
+ @dataclass
1358
+ class PoFConfig:
1359
+ """Configuration of PoFBoTorchSampler
1360
+
1361
+ Args:
1362
+ enable_pof (bool):
1363
+ Whether to consider Probability of Feasibility.
1364
+ Defaults to True.
1365
+
1366
+ gamma (float or torch.Tensor):
1367
+ Exponent for Probability of Feasibility. A larger value places more emphasis on feasibility.
1368
+ If 0, Probability of Feasibility is not considered.
1369
+ Defaults to 1.
1370
+
1371
+ threshold (float or torch.Tensor):
1372
+ Boundary value for calculating Probability of Feasibility with CDF.
1373
+ Generally between 0 and 1, with 0.5 being recommended. A larger value places more emphasis on feasibility.
1374
+ Defaults to 0.5.
1375
+
1376
+ enable_log (bool):
1377
+ Whether to apply symlog to the base acquisition function values.
1378
+ Defaults to True.
1379
+
1380
+ enable_positive_only_pof (bool):
1381
+ Whether to apply Probability of Feasibility only when the base acquisition function is positive.
1382
+ Defaults to False.
1383
+
1384
+ enable_dynamic_pof (bool):
1385
+ Whether to change gamma dynamically. When True, ```gamma``` argument is ignored.
1386
+ Defaults to True.
1387
+
1388
+ enable_dynamic_threshold (bool):
1389
+ Whether to change threshold dynamically. When True, ```threshold``` argument is ignored.
1390
+ Defaults to False.
1391
+
1392
+ enable_repeat_penalty (bool):
1393
+ Whether to apply a penalty coefficient on the base acquisition function values near sampled points.
1394
+ Defaults to True.
1395
+
1396
+ enable_dynamic_repeat_penalty (bool):
1397
+ Enhances the penalty coefficient if the same value is repeated. When True, it behaves as if enable_repeat_penalty is set to True.
1398
+ Defaults to True.
1399
+
1400
+ repeat_watch_window (int):
1401
+ Specifies how many recent proposal values are referenced when determining the magnitude of penalties when enable_dynamic_repeat_penalty is True.
1402
+ Defaults to 3.
1403
+
1404
+ repeat_watch_norm_distance (float):
1405
+ Defines how small the norm distance between proposed parameters needs to be in normalized parameter space [0, 1]
1406
+ for a stronger penalty effect. Extreme values may cause numerical instability.
1407
+ Defaults to 0.1.
1408
+
1409
+ enable_no_noise (bool):
1410
+ Whether to treat observation errors as non-existent
1411
+ when training the regression model with the objective
1412
+ function value. The default is True because there is
1413
+ essentially no observational error in a FEM analysis.
1414
+ This is different from the original BoTorchSampler
1415
+ implementation.
1416
+
1417
+ """
1418
+ enable_pof: bool = True # PoF を考慮するかどうかを規定します。
1419
+ gamma: float or torch.Tensor = 1.0 # PoF に対する指数です。大きいほど feasibility を重視します。0 だと PoF を考慮しません。
1420
+ threshold: float or torch.Tensor = 0.5 # PoF を cdf で計算する際の境界値です。0 ~ 1 が基本で、 0.5 が推奨です。大きいほど feasibility を重視します。
1421
+
1422
+ enable_log: bool = True # ベース獲得関数値に symlog を適用します。
1423
+ enable_positive_only_pof: bool = False # ベース獲得関数が正のときのみ PoF を乗じます。
1424
+
1425
+ enable_dynamic_pof: bool = False # gamma を動的に変更します。 True のとき、gamma は無視されます。
1426
+ enable_dynamic_threshold: bool = False # threshold を動的に変更します。 True のとき、threshold は無視されます。
1427
+
1428
+ enable_repeat_penalty: bool = False # サンプル済みの点の近傍のベース獲得関数値にペナルティ係数を適用します。
1429
+ _repeat_penalty: float or torch.Tensor = 1. # enable_repeat_penalty が True のときに使用される内部変数です。
1430
+
1431
+ enable_dynamic_repeat_penalty: bool = False # 同じ値が繰り返された場合にペナルティ係数を強化します。True の場合、enable_repeat_penalty は True として振舞います。
1432
+ repeat_watch_window: int = 3 # enable_dynamic_repeat_penalty が True のとき、直近いくつの提案値を参照してペナルティの大きさを決めるかを既定します。
1433
+ repeat_watch_norm_distance: float = 0.1 # [0, 1] で正規化されたパラメータ空間においてパラメータの提案同士のノルムがどれくらいの大きさ以下であればペナルティを強くするかを規定します。極端な値は数値不安定性を引き起こす可能性があります。
1434
+ _repeat_penalty_gamma: float or torch.Tensor = 1. # _repeat_penalty の指数で、内部変数です。
1435
+
1436
+ enable_no_noise: bool = True
1437
+
1438
+ def _disable_all_features(self):
1439
+ # 拘束以外のすべてを disable にすることで、
1440
+ # BoTorchSampler の実装と同じにします。
1441
+ self.enable_pof = False
1442
+ self.enable_log = False
1443
+ self.enable_positive_only_pof = False
1444
+ self.enable_dynamic_pof = False
1445
+ self.enable_dynamic_threshold = False
1446
+ self.enable_repeat_penalty = False
1447
+ self.enable_dynamic_repeat_penalty = False
1448
+ self.enable_no_noise = False
1449
+
1450
+
1451
+ @experimental_class("2.4.0")
1452
+ class PoFBoTorchSampler(BaseSampler):
1453
+ """A sampler that forked from BoTorchSampler.
1454
+
1455
+ This sampler improves the BoTorchSampler to account
1456
+ for known/hidden constraints and repeated penalties.
1457
+
1458
+ See Also:
1459
+ https://optuna.readthedocs.io/en/v3.0.0-b1/reference/generated/optuna.integration.BoTorchSampler.html
1460
+
1461
+ Args:
1462
+ candidates_func:
1463
+ An optional function that suggests the next candidates. It must take the training
1464
+ data, the objectives, the constraints, the search space bounds and return the next
1465
+ candidates. The arguments are of type ``torch.Tensor``. The return value must be a
1466
+ ``torch.Tensor``. However, if ``constraints_func`` is omitted, constraints will be
1467
+ :obj:`None`. For any constraints that failed to compute, the tensor will contain
1468
+ NaN.
1469
+
1470
+ If omitted, it is determined automatically based on the number of objectives and
1471
+ whether a constraint is specified. If the
1472
+ number of objectives is one and no constraint is specified, log-Expected Improvement
1473
+ is used. If constraints are specified, quasi MC-based batch Expected Improvement
1474
+ (qEI) is used.
1475
+ If the number of objectives is either two or three, Quasi MC-based
1476
+ batch Expected Hypervolume Improvement (qEHVI) is used. Otherwise, for a larger number
1477
+ of objectives, analytic Expected Hypervolume Improvement is used if no constraints
1478
+ are specified, or the faster Quasi MC-based extended ParEGO (qParEGO) is used if
1479
+ constraints are present.
1480
+
1481
+ The function should assume *maximization* of the objective.
1482
+
1483
+ .. seealso::
1484
+ See :func:`optuna_integration.botorch.qei_candidates_func` for an example.
1485
+ constraints_func:
1486
+ An optional function that computes the objective constraints. It must take a
1487
+ :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must
1488
+ be a sequence of :obj:`float` s. A value strictly larger than 0 means that a
1489
+ constraint is violated. A value equal to or smaller than 0 is considered feasible.
1490
+
1491
+ If omitted, no constraints will be passed to ``candidates_func`` nor taken into
1492
+ account during suggestion.
1493
+ n_startup_trials:
1494
+ Number of initial trials, that is the number of trials to resort to independent
1495
+ sampling.
1496
+ consider_running_trials:
1497
+ If True, the acquisition function takes into consideration the running parameters
1498
+ whose evaluation has not completed. Enabling this option is considered to improve the
1499
+ performance of parallel optimization.
1500
+
1501
+ .. note::
1502
+ Added in v3.2.0 as an experimental argument.
1503
+ independent_sampler:
1504
+ An independent sampler to use for the initial trials and for parameters that are
1505
+ conditional.
1506
+ seed:
1507
+ Seed for random number generator.
1508
+ device:
1509
+ A ``torch.device`` to store input and output data of BoTorch. Please set a CUDA device
1510
+ if you fasten sampling.
1511
+ pof_config (PoFConfig or None):
1512
+ Sampler settings.
1513
+ """
1514
+
1515
+ def __init__(
1516
+ self,
1517
+ *,
1518
+ candidates_func: (
1519
+ Callable[
1520
+ [
1521
+ "torch.Tensor",
1522
+ "torch.Tensor",
1523
+ "torch.Tensor" | None,
1524
+ "torch.Tensor",
1525
+ "torch.Tensor" | None,
1526
+ "SingleTaskGP",
1527
+ "list[Constraint]",
1528
+ "Study",
1529
+ "OptunaOptimizer",
1530
+ "PoFConfig",
1531
+ ],
1532
+ "torch.Tensor",
1533
+ ]
1534
+ | None
1535
+ ) = None,
1536
+ constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None,
1537
+ n_startup_trials: int = 10,
1538
+ consider_running_trials: bool = False,
1539
+ independent_sampler: BaseSampler | None = None,
1540
+ seed: int | None = None,
1541
+ device: "torch.device" | None = None,
1542
+ pof_config: PoFConfig or None = None,
1543
+ ):
1544
+ _imports.check()
1545
+
1546
+ self._candidates_func = candidates_func
1547
+ self._constraints_func = constraints_func
1548
+ self._consider_running_trials = consider_running_trials
1549
+ self._independent_sampler = independent_sampler or RandomSampler(seed=seed)
1550
+ self._n_startup_trials = n_startup_trials
1551
+ self._seed = seed
1552
+
1553
+ self._study_id: int | None = None
1554
+ self._search_space = IntersectionSearchSpace()
1555
+ self._device = device or torch.device("cpu")
1556
+
1557
+ self.pof_config = pof_config or PoFConfig()
1558
+ _set_use_fixed_noise(self.pof_config.enable_no_noise)
1559
+
1560
+
1561
+ @property
1562
+ def use_fixed_noise(self) -> bool:
1563
+ return _get_use_fixed_noise()
1564
+
1565
+ @use_fixed_noise.setter
1566
+ def use_fixed_noise(self, value: bool):
1567
+ _set_use_fixed_noise(value)
1568
+
1569
+ def infer_relative_search_space(
1570
+ self,
1571
+ study: Study,
1572
+ trial: FrozenTrial,
1573
+ ) -> dict[str, BaseDistribution]:
1574
+ if self._study_id is None:
1575
+ self._study_id = study._study_id
1576
+ if self._study_id != study._study_id:
1577
+ # Note that the check below is meaningless when `InMemoryStorage` is used
1578
+ # because `InMemoryStorage.create_new_study` always returns the same study ID.
1579
+ raise RuntimeError("BoTorchSampler cannot handle multiple studies.")
1580
+
1581
+ search_space: dict[str, BaseDistribution] = {}
1582
+ for name, distribution in self._search_space.calculate(study).items():
1583
+ if distribution.single():
1584
+ # built-in `candidates_func` cannot handle distributions that contain just a
1585
+ # single value, so we skip them. Note that the parameter values for such
1586
+ # distributions are sampled in `Trial`.
1587
+ continue
1588
+ search_space[name] = distribution
1589
+
1590
+ return search_space
1591
+
1592
+ def sample_relative(
1593
+ self,
1594
+ study: Study,
1595
+ trial: FrozenTrial,
1596
+ search_space: dict[str, BaseDistribution],
1597
+ ) -> dict[str, Any]:
1598
+ assert isinstance(search_space, dict)
1599
+
1600
+ if len(search_space) == 0:
1601
+ return {}
1602
+
1603
+ completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))
1604
+ running_trials = [
1605
+ t for t in study.get_trials(deepcopy=False, states=(TrialState.RUNNING,)) if t != trial
1606
+ ]
1607
+ trials = completed_trials + running_trials
1608
+
1609
+ n_trials = len(trials)
1610
+ n_completed_trials = len(completed_trials)
1611
+ if n_trials < self._n_startup_trials:
1612
+ return {}
1613
+
1614
+ trans = _SearchSpaceTransform(search_space)
1615
+ n_objectives = len(study.directions)
1616
+ values: numpy.ndarray | torch.Tensor = numpy.empty(
1617
+ (n_trials, n_objectives), dtype=numpy.float64
1618
+ )
1619
+ params: numpy.ndarray | torch.Tensor
1620
+ con: numpy.ndarray | torch.Tensor | None = None
1621
+ bounds: numpy.ndarray | torch.Tensor = trans.bounds
1622
+ params = numpy.empty((n_trials, trans.bounds.shape[0]), dtype=numpy.float64)
1623
+ for trial_idx, trial in enumerate(trials):
1624
+ if trial.state == TrialState.COMPLETE:
1625
+ params[trial_idx] = trans.transform(trial.params)
1626
+ assert len(study.directions) == len(trial.values)
1627
+ for obj_idx, (direction, value) in enumerate(zip(study.directions, trial.values)):
1628
+ assert value is not None
1629
+ if (
1630
+ direction == StudyDirection.MINIMIZE
1631
+ ): # BoTorch always assumes maximization.
1632
+ value *= -1
1633
+ values[trial_idx, obj_idx] = value
1634
+ if self._constraints_func is not None:
1635
+ constraints = study._storage.get_trial_system_attrs(trial._trial_id).get(
1636
+ _CONSTRAINTS_KEY
1637
+ )
1638
+ if constraints is not None:
1639
+ n_constraints = len(constraints)
1640
+
1641
+ if con is None:
1642
+ con = numpy.full(
1643
+ (n_completed_trials, n_constraints), numpy.nan, dtype=numpy.float64
1644
+ )
1645
+ elif n_constraints != con.shape[1]:
1646
+ raise RuntimeError(
1647
+ f"Expected {con.shape[1]} constraints "
1648
+ f"but received {n_constraints}."
1649
+ )
1650
+ con[trial_idx] = constraints
1651
+ elif trial.state == TrialState.RUNNING:
1652
+ if all(p in trial.params for p in search_space):
1653
+ params[trial_idx] = trans.transform(trial.params)
1654
+ else:
1655
+ params[trial_idx] = numpy.nan
1656
+ else:
1657
+ assert False, "trail.state must be TrialState.COMPLETE or TrialState.RUNNING."
1658
+
1659
+ if self._constraints_func is not None:
1660
+ if con is None:
1661
+ warnings.warn(
1662
+ "`constraints_func` was given but no call to it correctly computed "
1663
+ "constraints. Constraints passed to `candidates_func` will be `None`."
1664
+ )
1665
+ elif numpy.isnan(con).any():
1666
+ warnings.warn(
1667
+ "`constraints_func` was given but some calls to it did not correctly compute "
1668
+ "constraints. Constraints passed to `candidates_func` will contain NaN."
1669
+ )
1670
+
1671
+ values = torch.from_numpy(values).to(self._device)
1672
+ params = torch.from_numpy(params).to(self._device)
1673
+ if con is not None:
1674
+ con = torch.from_numpy(con).to(self._device)
1675
+ bounds = torch.from_numpy(bounds).to(self._device)
1676
+
1677
+ if con is not None:
1678
+ if con.dim() == 1:
1679
+ con.unsqueeze_(-1)
1680
+ bounds.transpose_(0, 1)
1681
+
1682
+ if self._candidates_func is None:
1683
+ self._candidates_func = _get_default_candidates_func(
1684
+ n_objectives=n_objectives,
1685
+ has_constraint=con is not None,
1686
+ consider_running_trials=self._consider_running_trials,
1687
+ )
1688
+
1689
+ completed_values = values[:n_completed_trials]
1690
+ completed_params = params[:n_completed_trials]
1691
+ if self._consider_running_trials:
1692
+ running_params = params[n_completed_trials:]
1693
+ running_params = running_params[~torch.isnan(running_params).any(dim=1)]
1694
+ else:
1695
+ running_params = None
1696
+
1697
+ if self._seed is not None:
1698
+ random.seed(self._seed)
1699
+ numpy.random.seed(self._seed)
1700
+ torch.manual_seed(self._seed)
1701
+ torch.backends.cudnn.benchmark = False
1702
+ torch.backends.cudnn.deterministic = True
1703
+
1704
+ with manual_seed(self._seed):
1705
+
1706
+ # ===== model_c 構築 =====
1707
+ # ===== model_c を作成する =====
1708
+ # ----- bounds, train_x, train_y を準備する -----
1709
+ # train_x, train_y は元実装にあわせないと
1710
+ # ACQF.forward(X) の引数と一致しなくなる。
1711
+
1712
+ # strict constraint 違反またはモデル破綻で prune された trial
1713
+ pruned_trials = study.get_trials(deepcopy=False, states=(TrialState.PRUNED,))
1714
+ # 元実装と違い、このモデルを基に次の点を提案するわけではないので running は考えなくてよい
1715
+ trials = completed_trials + pruned_trials
1716
+ n_trials = len(trials)
1717
+
1718
+ # ----- train_x, train_y (completed_params, completed_values) を作る -----
1719
+ # trials から x, y(=feasibility) を収集する
1720
+ trans = _SearchSpaceTransform(search_space)
1721
+ bounds: numpy.ndarray | torch.Tensor = trans.bounds
1722
+ params: numpy.ndarray | torch.Tensor = numpy.empty((n_trials, trans.bounds.shape[0]), dtype=numpy.float64)
1723
+ values: numpy.ndarray | torch.Tensor = numpy.empty((n_trials, 1), dtype=numpy.float64)
1724
+ for trial_idx, trial in enumerate(trials):
1725
+ params[trial_idx] = trans.transform(trial.params)
1726
+ if trial.state == TrialState.COMPLETE:
1727
+ values[trial_idx, 0] = 1. # feasible
1728
+ elif trial.state == TrialState.PRUNED:
1729
+ values[trial_idx, 0] = 0. # infeasible
1730
+ else:
1731
+ assert False, "trial.state must be TrialState.COMPLETE or TrialState.PRUNED."
1732
+ bounds = torch.from_numpy(bounds).to(self._device)
1733
+ params = torch.from_numpy(params).to(self._device) # 未正規化, n_points x n_parameters Tensor
1734
+ values = torch.from_numpy(values).to(self._device) # 0 or 1, n_points x 1 Tensor
1735
+ bounds.transpose_(0, 1) # 未正規化, 2 x n_parameters Tensor
1736
+
1737
+ # ----- model_c を作る -----
1738
+ # with manual_seed(self._seed):
1739
+ train_x_c = normalize(params, bounds=bounds)
1740
+ train_y_c = values
1741
+ model_c = SingleTaskGP(
1742
+ train_x_c, # n_data x n_prm
1743
+ train_y_c, # n_data x n_obj
1744
+ # train_Yvar=1e-4 + torch.zeros_like(train_y_c),
1745
+ outcome_transform=Standardize(
1746
+ m=train_y_c.shape[-1], # The output dimension.
1747
+ )
1748
+ )
1749
+ mll_c = ExactMarginalLogLikelihood(
1750
+ model_c.likelihood,
1751
+ model_c
1752
+ )
1753
+ fit_gpytorch_mll(mll_c)
1754
+
1755
+ # ===== NonlinearConstraints の実装に必要なクラスを渡す =====
1756
+ # PyFemtet 専用関数が前提になっているからこの実装をせざるを得ない。
1757
+ # 将来的に optuna の拘束関数の取り扱いの実装が変わったら
1758
+ # そちらに実装を変更する(Constraints の変換をして optuna 単体でも使えるようにする)
1759
+ # これらは Optimizer の中でセットする
1760
+
1761
+ # noinspection PyUnresolvedReferences
1762
+ _constraints = self._pyfemtet_constraints
1763
+ # noinspection PyUnresolvedReferences
1764
+ _opt = self._pyfemtet_optimizer
1765
+
1766
+ # `manual_seed` makes the default candidates functions reproducible.
1767
+ # `SobolQMCNormalSampler`'s constructor has a `seed` argument, but its behavior is
1768
+ # deterministic when the BoTorch's seed is fixed.
1769
+ candidates = self._candidates_func(
1770
+ completed_params,
1771
+ completed_values,
1772
+ con,
1773
+ bounds,
1774
+ running_params,
1775
+ model_c,
1776
+ _constraints,
1777
+ study,
1778
+ _opt,
1779
+ self.pof_config,
1780
+ )
1781
+ if self._seed is not None:
1782
+ self._seed += 1
1783
+
1784
+ if not isinstance(candidates, torch.Tensor):
1785
+ raise TypeError("Candidates must be a torch.Tensor.")
1786
+ if candidates.dim() == 2:
1787
+ if candidates.size(0) != 1:
1788
+ raise ValueError(
1789
+ "Candidates batch optimization is not supported and the first dimension must "
1790
+ "have size 1 if candidates is a two-dimensional tensor. Actual: "
1791
+ f"{candidates.size()}."
1792
+ )
1793
+ # Batch size is one. Get rid of the batch dimension.
1794
+ candidates = candidates.squeeze(0)
1795
+ if candidates.dim() != 1:
1796
+ raise ValueError("Candidates must be one or two-dimensional.")
1797
+ if candidates.size(0) != bounds.size(1):
1798
+ raise ValueError(
1799
+ "Candidates size must match with the given bounds. Actual candidates: "
1800
+ f"{candidates.size(0)}, bounds: {bounds.size(1)}."
1801
+ )
1802
+
1803
+ return trans.untransform(candidates.cpu().numpy())
1804
+
1805
+ def sample_independent(
1806
+ self,
1807
+ study: Study,
1808
+ trial: FrozenTrial,
1809
+ param_name: str,
1810
+ param_distribution: BaseDistribution,
1811
+ ) -> Any:
1812
+ return self._independent_sampler.sample_independent(
1813
+ study, trial, param_name, param_distribution
1814
+ )
1815
+
1816
+ def reseed_rng(self) -> None:
1817
+ self._independent_sampler.reseed_rng()
1818
+ if self._seed is not None:
1819
+ self._seed = numpy.random.RandomState().randint(numpy.iinfo(numpy.int32).max)
1820
+
1821
+ def before_trial(self, study: Study, trial: FrozenTrial) -> None:
1822
+ self._independent_sampler.before_trial(study, trial)
1823
+
1824
+ def after_trial(
1825
+ self,
1826
+ study: Study,
1827
+ trial: FrozenTrial,
1828
+ state: TrialState,
1829
+ values: Sequence[float] | None,
1830
+ ) -> None:
1831
+ if self._constraints_func is not None:
1832
+ _process_constraints_after_trial(self._constraints_func, study, trial, state)
1833
+ self._independent_sampler.after_trial(study, trial, state, values)