physbo 2.0.0__cp310-cp310-macosx_12_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. physbo/__init__.py +17 -0
  2. physbo/blm/__init__.py +17 -0
  3. physbo/blm/basis/__init__.py +8 -0
  4. physbo/blm/basis/fourier.py +148 -0
  5. physbo/blm/core/__init__.py +8 -0
  6. physbo/blm/core/model.py +257 -0
  7. physbo/blm/inf/__init__.py +8 -0
  8. physbo/blm/inf/exact.py +192 -0
  9. physbo/blm/lik/__init__.py +10 -0
  10. physbo/blm/lik/_src/__init__.py +8 -0
  11. physbo/blm/lik/_src/cov.py +113 -0
  12. physbo/blm/lik/gauss.py +136 -0
  13. physbo/blm/lik/linear.py +117 -0
  14. physbo/blm/predictor.py +238 -0
  15. physbo/blm/prior/__init__.py +8 -0
  16. physbo/blm/prior/gauss.py +215 -0
  17. physbo/gp/__init__.py +15 -0
  18. physbo/gp/core/__init__.py +11 -0
  19. physbo/gp/core/learning.py +364 -0
  20. physbo/gp/core/model.py +420 -0
  21. physbo/gp/core/prior.py +207 -0
  22. physbo/gp/cov/__init__.py +8 -0
  23. physbo/gp/cov/_src/__init__.py +1 -0
  24. physbo/gp/cov/_src/enhance_gauss.cpython-310-darwin.so +0 -0
  25. physbo/gp/cov/gauss.py +393 -0
  26. physbo/gp/inf/__init__.py +8 -0
  27. physbo/gp/inf/exact.py +231 -0
  28. physbo/gp/lik/__init__.py +8 -0
  29. physbo/gp/lik/gauss.py +179 -0
  30. physbo/gp/mean/__init__.py +9 -0
  31. physbo/gp/mean/const.py +150 -0
  32. physbo/gp/mean/zero.py +66 -0
  33. physbo/gp/predictor.py +170 -0
  34. physbo/misc/__init__.py +15 -0
  35. physbo/misc/_src/__init__.py +1 -0
  36. physbo/misc/_src/cholupdate.cpython-310-darwin.so +0 -0
  37. physbo/misc/_src/diagAB.cpython-310-darwin.so +0 -0
  38. physbo/misc/_src/logsumexp.cpython-310-darwin.so +0 -0
  39. physbo/misc/_src/traceAB.cpython-310-darwin.so +0 -0
  40. physbo/misc/centering.py +28 -0
  41. physbo/misc/gauss_elim.py +35 -0
  42. physbo/misc/set_config.py +299 -0
  43. physbo/opt/__init__.py +8 -0
  44. physbo/opt/adam.py +107 -0
  45. physbo/predictor.py +261 -0
  46. physbo/search/__init__.py +11 -0
  47. physbo/search/discrete/__init__.py +11 -0
  48. physbo/search/discrete/policy.py +804 -0
  49. physbo/search/discrete/results.py +192 -0
  50. physbo/search/discrete_multi/__init__.py +11 -0
  51. physbo/search/discrete_multi/policy.py +552 -0
  52. physbo/search/discrete_multi/results.py +128 -0
  53. physbo/search/pareto.py +206 -0
  54. physbo/search/score.py +155 -0
  55. physbo/search/score_multi.py +197 -0
  56. physbo/search/utility.py +101 -0
  57. physbo/variable.py +222 -0
  58. physbo-2.0.0.dist-info/METADATA +110 -0
  59. physbo-2.0.0.dist-info/RECORD +61 -0
  60. physbo-2.0.0.dist-info/WHEEL +5 -0
  61. physbo-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,804 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ import numpy as np
9
+ import copy
10
+ import pickle as pickle
11
+ import itertools
12
+ import time
13
+
14
+ from .results import history
15
+ from .. import utility
16
+ from .. import score as search_score
17
+ from ...gp import predictor as gp_predictor
18
+ from ...blm import predictor as blm_predictor
19
+ from ...misc import set_config
20
+
21
+ from physbo.variable import variable
22
+
23
+
24
+ class policy:
25
+ def __init__(self, test_X, config=None, initial_data=None, comm=None):
26
+ """
27
+
28
+ Parameters
29
+ ----------
30
+ test_X: numpy.ndarray or physbo.variable
31
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
32
+ config: set_config object (physbo.misc.set_config)
33
+ initial_data: tuple[np.ndarray, np.ndarray]
34
+ The initial training datasets.
35
+ The first elements is the array of actions and the second is the array of value of objective functions
36
+ comm: MPI.Comm, optional
37
+ MPI Communicator
38
+ """
39
+ self.predictor = None
40
+ self.training = variable()
41
+ self.new_data = None
42
+ self.test = self._make_variable_X(test_X)
43
+ self.actions = np.arange(0, self.test.X.shape[0])
44
+ self.history = history()
45
+ if config is None:
46
+ self.config = set_config()
47
+ else:
48
+ self.config = config
49
+
50
+ if initial_data is not None:
51
+ if len(initial_data) != 2:
52
+ msg = "ERROR: initial_data should be 2-elements tuple or list (actions and objectives)"
53
+ raise RuntimeError(msg)
54
+ actions, fs = initial_data
55
+ if len(actions) != len(fs):
56
+ msg = "ERROR: len(initial_data[0]) != len(initial_data[1])"
57
+ raise RuntimeError(msg)
58
+ self.write(actions, fs)
59
+ self.actions = np.array(sorted(list(set(self.actions) - set(actions))))
60
+
61
+ if comm is None:
62
+ self.mpicomm = None
63
+ self.mpisize = 1
64
+ self.mpirank = 0
65
+ else:
66
+ self.mpicomm = comm
67
+ self.mpisize = comm.size
68
+ self.mpirank = comm.rank
69
+ self.actions = np.array_split(self.actions, self.mpisize)[self.mpirank]
70
+ self.config.learning.is_disp = (
71
+ self.config.learning.is_disp and self.mpirank == 0
72
+ )
73
+
74
+ def set_seed(self, seed):
75
+ """
76
+ Setting a seed parameter for np.random.
77
+
78
+ Parameters
79
+ ----------
80
+ seed: int
81
+ seed number
82
+ -------
83
+
84
+ """
85
+ self.seed = seed
86
+ np.random.seed(self.seed)
87
+
88
+ def write(
89
+ self,
90
+ action,
91
+ t,
92
+ X=None,
93
+ time_total=None,
94
+ time_update_predictor=None,
95
+ time_get_action=None,
96
+ time_run_simulator=None,
97
+ ):
98
+ """
99
+ Writing history (update history, not output to a file).
100
+
101
+ Parameters
102
+ ----------
103
+ action: numpy.ndarray
104
+ Indexes of actions.
105
+ t: numpy.ndarray
106
+ N dimensional array. The negative energy of each search candidate (value of the objective function to be optimized).
107
+ X: numpy.ndarray
108
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of each search candidate.
109
+ time_total: numpy.ndarray
110
+ N dimenstional array. The total elapsed time in each step.
111
+ If None (default), filled by 0.0.
112
+ time_update_predictor: numpy.ndarray
113
+ N dimenstional array. The elapsed time for updating predictor (e.g., learning hyperparemters) in each step.
114
+ If None (default), filled by 0.0.
115
+ time_get_action: numpy.ndarray
116
+ N dimenstional array. The elapsed time for getting next action in each step.
117
+ If None (default), filled by 0.0.
118
+ time_run_simulator: numpy.ndarray
119
+ N dimenstional array. The elapsed time for running the simulator in each step.
120
+ If None (default), filled by 0.0.
121
+
122
+ Returns
123
+ -------
124
+
125
+ """
126
+ if X is None:
127
+ X = self.test.X[action, :]
128
+ Z = self.test.Z[action, :] if self.test.Z is not None else None
129
+ else:
130
+ Z = self.predictor.get_basis(X) if self.predictor is not None else None
131
+
132
+ self.history.write(
133
+ t,
134
+ action,
135
+ time_total=time_total,
136
+ time_update_predictor=time_update_predictor,
137
+ time_get_action=time_get_action,
138
+ time_run_simulator=time_run_simulator,
139
+ )
140
+ self.training.add(X=X, t=t, Z=Z)
141
+ local_index = np.searchsorted(self.actions, action)
142
+ local_index = local_index[
143
+ np.take(self.actions, local_index, mode="clip") == action
144
+ ]
145
+ self.actions = self._delete_actions(local_index)
146
+ if self.new_data is None:
147
+ self.new_data = variable(X=X, t=t, Z=Z)
148
+ else:
149
+ self.new_data.add(X=X, t=t, Z=Z)
150
+
151
+ def random_search(
152
+ self, max_num_probes, num_search_each_probe=1, simulator=None, is_disp=True
153
+ ):
154
+ """
155
+ Performing random search.
156
+
157
+ Parameters
158
+ ----------
159
+ max_num_probes: int
160
+ Maximum number of random search process.
161
+ num_search_each_probe: int
162
+ Number of search at each random search process.
163
+ simulator: callable
164
+ Callable (function or object with ``__call__``) from action to t
165
+ Here, action is an integer which represents the index of the candidate.
166
+ is_disp: bool
167
+ If true, process messages are outputted.
168
+ Returns
169
+ -------
170
+ history: history object (physbo.search.discrete.results.history)
171
+ """
172
+
173
+ if self.mpirank != 0:
174
+ is_disp = False
175
+
176
+ N = int(num_search_each_probe)
177
+
178
+ if is_disp:
179
+ utility.show_interactive_mode(simulator, self.history)
180
+
181
+ for n in range(0, max_num_probes):
182
+ time_total = time.time()
183
+ if is_disp and N > 1:
184
+ utility.show_start_message_multi_search(self.history.num_runs)
185
+
186
+ time_get_action = time.time()
187
+ action = self._get_random_action(N)
188
+ time_get_action = time.time() - time_get_action
189
+
190
+ N_indeed = len(action)
191
+ if N_indeed == 0:
192
+ if self.mpirank == 0:
193
+ print("WARNING: All actions have already searched.")
194
+ return copy.deepcopy(self.history)
195
+
196
+ if simulator is None:
197
+ return action
198
+
199
+ time_run_simulator = time.time()
200
+ t = _run_simulator(simulator, action, self.mpicomm)
201
+ time_run_simulator = time.time() - time_run_simulator
202
+
203
+ time_total = time.time() - time_total
204
+ self.write(
205
+ action,
206
+ t,
207
+ time_total=[time_total] * N_indeed,
208
+ time_update_predictor=np.zeros(N_indeed, dtype=float),
209
+ time_get_action=[time_get_action] * N_indeed,
210
+ time_run_simulator=[time_run_simulator] * N_indeed,
211
+ )
212
+
213
+ if is_disp:
214
+ utility.show_search_results(self.history, N_indeed)
215
+
216
+ return copy.deepcopy(self.history)
217
+
218
+ def bayes_search(
219
+ self,
220
+ training=None,
221
+ max_num_probes=None,
222
+ num_search_each_probe=1,
223
+ predictor=None,
224
+ is_disp=True,
225
+ simulator=None,
226
+ score="TS",
227
+ interval=0,
228
+ num_rand_basis=0,
229
+ ):
230
+ """
231
+ Performing Bayesian optimization.
232
+
233
+ Parameters
234
+ ----------
235
+ training: physbo.variable
236
+ Training dataset.
237
+ max_num_probes: int
238
+ Maximum number of searching process by Bayesian optimization.
239
+ num_search_each_probe: int
240
+ Number of searching by Bayesian optimization at each process.
241
+ predictor: predictor object
242
+ Base class is defined in physbo.predictor.
243
+ If None, blm_predictor is defined.
244
+ is_disp: bool
245
+ If true, process messages are outputted.
246
+ simulator: callable
247
+ Callable (function or object with ``__call__``)
248
+ Here, action is an integer which represents the index of the candidate.
249
+ score: str
250
+ The type of aquision funciton.
251
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
252
+ interval: int
253
+ The interval number of learning the hyper parameter.
254
+ If you set the negative value to interval, the hyper parameter learning is not performed.
255
+ If you set zero to interval, the hyper parameter learning is performed only at the first step.
256
+ num_rand_basis: int
257
+ The number of basis function. If you choose 0, ordinary Gaussian process run.
258
+
259
+ Returns
260
+ -------
261
+ history: history object (physbo.search.discrete.results.history)
262
+ """
263
+
264
+ if self.mpirank != 0:
265
+ is_disp = False
266
+
267
+ old_disp = self.config.learning.is_disp
268
+ self.config.learning.is_disp = is_disp
269
+
270
+ if max_num_probes is None:
271
+ max_num_probes = 1
272
+ simulator = None
273
+
274
+ is_rand_expans = num_rand_basis != 0
275
+
276
+ if training is not None:
277
+ self.training = training
278
+
279
+ if predictor is not None:
280
+ self.predictor = predictor
281
+ elif self.predictor is None:
282
+ self._init_predictor(is_rand_expans)
283
+
284
+ if max_num_probes == 0 and interval >= 0:
285
+ self._learn_hyperparameter(num_rand_basis)
286
+
287
+ N = int(num_search_each_probe)
288
+
289
+ for n in range(max_num_probes):
290
+ time_total = time.time()
291
+
292
+ time_update_predictor = time.time()
293
+ if utility.is_learning(n, interval):
294
+ self._learn_hyperparameter(num_rand_basis)
295
+ else:
296
+ self._update_predictor()
297
+ time_update_predictor = time.time() - time_update_predictor
298
+
299
+ if num_search_each_probe != 1:
300
+ utility.show_start_message_multi_search(self.history.num_runs, score)
301
+
302
+ time_get_action = time.time()
303
+ K = self.config.search.multi_probe_num_sampling
304
+ alpha = self.config.search.alpha
305
+ action = self._get_actions(score, N, K, alpha)
306
+ time_get_action = time.time() - time_get_action
307
+
308
+ N_indeed = len(action)
309
+ if N_indeed == 0:
310
+ if self.mpirank == 0:
311
+ print("WARNING: All actions have already searched.")
312
+ break
313
+
314
+ if simulator is None:
315
+ self.config.learning.is_disp = old_disp
316
+ return action
317
+
318
+ time_run_simulator = time.time()
319
+ t = _run_simulator(simulator, action, self.mpicomm)
320
+ time_run_simulator = time.time() - time_run_simulator
321
+
322
+ time_total = time.time() - time_total
323
+ self.write(
324
+ action,
325
+ t,
326
+ time_total=[time_total] * N_indeed,
327
+ time_update_predictor=[time_update_predictor] * N_indeed,
328
+ time_get_action=[time_get_action] * N_indeed,
329
+ time_run_simulator=[time_run_simulator] * N_indeed,
330
+ )
331
+
332
+ if is_disp:
333
+ utility.show_search_results(self.history, N_indeed)
334
+ self._update_predictor()
335
+ self.config.learning.is_disp = old_disp
336
+ return copy.deepcopy(self.history)
337
+
338
+ @staticmethod
339
+ def _warn_no_predictor(method_name):
340
+ print("Warning: Since policy.predictor is not yet set,")
341
+ print(" a GP predictor (num_rand_basis=0) is used for predicting")
342
+ print(" If you want to use a BLM predictor (num_rand_basis>0),")
343
+ print(" call bayes_search(max_num_probes=0, num_rand_basis=nrb)")
344
+ print(" before calling {}.".format(method_name))
345
+
346
+ def get_post_fmean(self, xs):
347
+ """Calculate mean value of predictor (post distribution)"""
348
+ X = self._make_variable_X(xs)
349
+ if self.predictor is None:
350
+ self._warn_no_predictor("get_post_fmean()")
351
+ predictor = gp_predictor(self.config)
352
+ predictor.fit(self.training, 0)
353
+ predictor.prepare(self.training)
354
+ return predictor.get_post_fmean(self.training, X)
355
+ else:
356
+ self._update_predictor()
357
+ return self.predictor.get_post_fmean(self.training, X)
358
+
359
+ def get_post_fcov(self, xs):
360
+ """Calculate covariance of predictor (post distribution)"""
361
+ X = self._make_variable_X(xs)
362
+ if self.predictor is None:
363
+ self._warn_no_predictor("get_post_fcov()")
364
+ predictor = gp_predictor(self.config)
365
+ predictor.fit(self.training, 0)
366
+ predictor.prepare(self.training)
367
+ return predictor.get_post_fcov(self.training, X)
368
+ else:
369
+ self._update_predictor()
370
+ return self.predictor.get_post_fcov(self.training, X)
371
+
372
+ def get_score(
373
+ self,
374
+ mode,
375
+ *,
376
+ actions=None,
377
+ xs=None,
378
+ predictor=None,
379
+ training=None,
380
+ parallel=True,
381
+ alpha=1
382
+ ):
383
+ """
384
+ Calcualte score (acquisition function)
385
+
386
+ Parameters
387
+ ----------
388
+ mode: str
389
+ The type of aquisition funciton. TS, EI and PI are available.
390
+ These functions are defined in score.py.
391
+ actions: array of int
392
+ actions to calculate score
393
+ xs: physbo.variable or np.ndarray
394
+ input parameters to calculate score
395
+ predictor: predictor object
396
+ predictor used to calculate score.
397
+ If not given, self.predictor will be used.
398
+ training:physbo.variable
399
+ Training dataset.
400
+ If not given, self.training will be used.
401
+ parallel: bool
402
+ Calculate scores in parallel by MPI (default: True)
403
+ alpha: float
404
+ Tuning parameter which is used if mode = TS.
405
+ In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
406
+
407
+ Returns
408
+ -------
409
+ f: float or list of float
410
+ Score defined in each mode.
411
+
412
+ Raises
413
+ ------
414
+ RuntimeError
415
+ If both *actions* and *xs* are given
416
+
417
+ Notes
418
+ -----
419
+ When neither *actions* nor *xs* are given, scores for actions not yet searched will be calculated.
420
+
421
+ When *parallel* is True, it is assumed that the function receives the same input (*actions* or *xs*) for all the ranks.
422
+ If you want to split the input array itself, set *parallel* be False and merge results by yourself.
423
+ """
424
+ if training is None:
425
+ training = self.training
426
+
427
+ if training.X is None or training.X.shape[0] == 0:
428
+ msg = "ERROR: No training data is registered."
429
+ raise RuntimeError(msg)
430
+
431
+ if predictor is None:
432
+ if self.predictor is None:
433
+ self._warn_no_predictor("get_score()")
434
+ predictor = gp_predictor(self.config)
435
+ predictor.fit(training, 0)
436
+ predictor.prepare(training)
437
+ else:
438
+ self._update_predictor()
439
+ predictor = self.predictor
440
+
441
+ if xs is not None:
442
+ if actions is not None:
443
+ raise RuntimeError("ERROR: both actions and xs are given")
444
+ test = self._make_variable_X(xs)
445
+ if parallel and self.mpisize > 1:
446
+ actions = np.array_split(np.arange(test.X.shape[0]), self.mpisize)
447
+ test = test.get_subset(actions[self.mpirank])
448
+ else:
449
+ if actions is None:
450
+ actions = self.actions
451
+ else:
452
+ if isinstance(actions, int):
453
+ actions = [actions]
454
+ if parallel and self.mpisize > 1:
455
+ actions = np.array_split(actions, self.mpisize)[self.mpirank]
456
+ test = self.test.get_subset(actions)
457
+
458
+ f = search_score.score(
459
+ mode, predictor=predictor, training=training, test=test, alpha=alpha
460
+ )
461
+ if parallel and self.mpisize > 1:
462
+ fs = self.mpicomm.allgather(f)
463
+ f = np.hstack(fs)
464
+ return f
465
+
466
+ def _get_marginal_score(self, mode, chosen_actions, K, alpha):
467
+ """
468
+ Getting marginal scores.
469
+
470
+ Parameters
471
+ ----------
472
+ mode: str
473
+ The type of aquision funciton.
474
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
475
+ These functions are defined in score.py.
476
+ chosen_actions: numpy.ndarray
477
+ Array of selected actions.
478
+ K: int
479
+ The number of samples for evaluating score.
480
+ alpha: float
481
+ not used.
482
+
483
+ Returns
484
+ -------
485
+ f: list
486
+ N dimensional scores (score is defined in each mode)
487
+ """
488
+ f = np.zeros((K, len(self.actions)), dtype=float)
489
+
490
+ # draw K samples of the values of objective function of chosen actions
491
+ new_test_local = self.test.get_subset(chosen_actions)
492
+ virtual_t_local = self.predictor.get_predict_samples(
493
+ self.training, new_test_local, K
494
+ )
495
+ if self.mpisize == 1:
496
+ new_test = new_test_local
497
+ virtual_t = virtual_t_local
498
+ else:
499
+ new_test = variable()
500
+ for nt in self.mpicomm.allgather(new_test_local):
501
+ new_test.add(X=nt.X, t=nt.t, Z=nt.Z)
502
+ virtual_t = np.concatenate(self.mpicomm.allgather(virtual_t_local), axis=1)
503
+ # virtual_t = self.predictor.get_predict_samples(self.training, new_test, K)
504
+
505
+ for k in range(K):
506
+ predictor = copy.deepcopy(self.predictor)
507
+ train = copy.deepcopy(self.training)
508
+ virtual_train = new_test
509
+ virtual_train.t = virtual_t[k, :]
510
+
511
+ if virtual_train.Z is None:
512
+ train.add(virtual_train.X, virtual_train.t)
513
+ else:
514
+ train.add(virtual_train.X, virtual_train.t, virtual_train.Z)
515
+
516
+ predictor.update(train, virtual_train)
517
+
518
+ f[k, :] = self.get_score(
519
+ mode, predictor=predictor, training=train, parallel=False
520
+ )
521
+ return np.mean(f, axis=0)
522
+
523
+ def _get_actions(self, mode, N, K, alpha):
524
+ """
525
+ Getting next candidates
526
+
527
+ Parameters
528
+ ----------
529
+ mode: str
530
+ The type of aquisition funciton.
531
+ TS (Thompson Sampling), EI (Expected Improvement) and PI (Probability of Improvement) are available.
532
+ These functions are defined in score.py.
533
+ N: int
534
+ The total number of actions to return.
535
+ K: int
536
+ The total number of samples to evaluate marginal score
537
+ alpha: float
538
+ Tuning parameter which is used if mode = TS.
539
+ In TS, multi variation is tuned as np.random.multivariate_normal(mean, cov*alpha**2, size).
540
+
541
+ Returns
542
+ -------
543
+ chosen_actions: numpy.ndarray
544
+ An N-dimensional array of actions selected in each search process.
545
+ """
546
+ f = self.get_score(
547
+ mode,
548
+ predictor=self.predictor,
549
+ training=self.training,
550
+ alpha=alpha,
551
+ parallel=False,
552
+ )
553
+ champion, local_champion, local_index = self._find_champion(f)
554
+ if champion == -1:
555
+ return np.zeros(0, dtype=int)
556
+ if champion == local_champion:
557
+ self.actions = self._delete_actions(local_index)
558
+
559
+ chosen_actions = [champion]
560
+ for n in range(1, N):
561
+ f = self._get_marginal_score(mode, chosen_actions[0:n], K, alpha)
562
+ champion, local_champion, local_index = self._find_champion(f)
563
+ if champion == -1:
564
+ break
565
+ if champion == local_champion:
566
+ self.actions = self._delete_actions(local_index)
567
+ chosen_actions.append(champion)
568
+ return np.array(chosen_actions)
569
+
570
+ def _find_champion(self, f):
571
+ if len(f) == 0:
572
+ local_fmax = -float("inf")
573
+ local_index = -1
574
+ local_champion = -1
575
+ else:
576
+ local_fmax = np.max(f)
577
+ local_index = np.argmax(f)
578
+ local_champion = self.actions[local_index]
579
+ if self.mpisize == 1:
580
+ champion = local_champion
581
+ else:
582
+ local_champions = self.mpicomm.allgather(local_champion)
583
+ local_fs = self.mpicomm.allgather(local_fmax)
584
+ champion_rank = np.argmax(local_fs)
585
+ champion = local_champions[champion_rank]
586
+ return champion, local_champion, local_index
587
+
588
+ def _get_random_action(self, N):
589
+ """
590
+ Getting indexes of actions randomly.
591
+
592
+ Parameters
593
+ ----------
594
+ N: int
595
+ Total number of search candidates.
596
+ Returns
597
+ -------
598
+ action: numpy.ndarray
599
+ Indexes of actions selected randomly from search candidates.
600
+ """
601
+ if self.mpisize == 1:
602
+ n = len(self.actions)
603
+ if n <= N:
604
+ index = np.arange(0, n)
605
+ else:
606
+ index = np.random.choice(len(self.actions), N, replace=False)
607
+ action = self.actions[index]
608
+ self.actions = self._delete_actions(index)
609
+ else:
610
+ nactions = self.mpicomm.gather(len(self.actions), root=0)
611
+ local_indices = [[] for _ in range(self.mpisize)]
612
+ if self.mpirank == 0:
613
+ hi = np.add.accumulate(nactions)
614
+ lo = np.roll(hi, 1)
615
+ lo[0] = 0
616
+ if hi[-1] <= N:
617
+ index = np.arange(0, hi[-1])
618
+ else:
619
+ index = np.random.choice(hi[-1], N, replace=False)
620
+ ranks = np.searchsorted(hi, index, side="right")
621
+ for r, i in zip(ranks, index):
622
+ local_indices[r].append(i - lo[r])
623
+ local_indices = self.mpicomm.scatter(local_indices, root=0)
624
+ local_actions = self.actions[local_indices]
625
+ self.actions = self._delete_actions(local_indices)
626
+ action = self.mpicomm.allgather(local_actions)
627
+ action = itertools.chain.from_iterable(action)
628
+ action = np.array(list(action))
629
+ return action
630
+
631
+ def save(self, file_history, file_training=None, file_predictor=None):
632
+ """
633
+
634
+ Saving history, training and predictor into the corresponding files.
635
+
636
+ Parameters
637
+ ----------
638
+ file_history: str
639
+ The name of the file that stores the information of the history.
640
+ file_training: str
641
+ The name of the file that stores the training dataset.
642
+ file_predictor: str
643
+ The name of the file that stores the predictor dataset.
644
+
645
+ Returns
646
+ -------
647
+
648
+ """
649
+ if self.mpirank == 0:
650
+ self.history.save(file_history)
651
+
652
+ if file_training is not None:
653
+ self.training.save(file_training)
654
+
655
+ if file_predictor is not None:
656
+ with open(file_predictor, "wb") as f:
657
+ pickle.dump(self.predictor, f)
658
+
659
+ def load(self, file_history, file_training=None, file_predictor=None):
660
+ """
661
+
662
+ Loading files about history, training and predictor.
663
+
664
+ Parameters
665
+ ----------
666
+ file_history: str
667
+ The name of the file that stores the information of the history.
668
+ file_training: str
669
+ The name of the file that stores the training dataset.
670
+ file_predictor: str
671
+ The name of the file that stores the predictor dataset.
672
+
673
+ Returns
674
+ -------
675
+
676
+ """
677
+ self.history.load(file_history)
678
+
679
+ if file_training is None:
680
+ N = self.history.total_num_search
681
+ X = self.test.X[self.history.chosen_actions[0:N], :]
682
+ t = self.history.fx[0:N]
683
+ self.training = variable(X=X, t=t)
684
+ else:
685
+ self.training = variable()
686
+ self.training.load(file_training)
687
+
688
+ if file_predictor is not None:
689
+ with open(file_predictor, "rb") as f:
690
+ self.predictor = pickle.load(f)
691
+
692
+ N = self.history.total_num_search
693
+ self.actions = self._delete_actions(self.history.chosen_actions[:N])
694
+
695
+ def export_predictor(self):
696
+ """
697
+ Returning the predictor dataset
698
+
699
+ Returns
700
+ -------
701
+
702
+ """
703
+ return self.predictor
704
+
705
+ def export_training(self):
706
+ """
707
+ Returning the training dataset
708
+
709
+ Returns
710
+ -------
711
+
712
+ """
713
+ return self.training
714
+
715
+ def export_history(self):
716
+ """
717
+ Returning the information of the history.
718
+
719
+ Returns
720
+ -------
721
+
722
+ """
723
+ return self.history
724
+
725
+ def _init_predictor(self, is_rand_expans):
726
+ """
727
+ Initialize predictor.
728
+
729
+ Parameters
730
+ ----------
731
+ is_rand_expans: bool
732
+ If true, physbo.blm.predictor is selected.
733
+ If false, physbo.gp.predictor is selected.
734
+ """
735
+ if is_rand_expans:
736
+ self.predictor = blm_predictor(self.config)
737
+ else:
738
+ self.predictor = gp_predictor(self.config)
739
+
740
+ def _learn_hyperparameter(self, num_rand_basis):
741
+ self.predictor.fit(self.training, num_rand_basis)
742
+ self.test.Z = self.predictor.get_basis(self.test.X)
743
+ self.training.Z = self.predictor.get_basis(self.training.X)
744
+ self.predictor.prepare(self.training)
745
+ self.new_data = None
746
+
747
+ def _update_predictor(self):
748
+ if self.new_data is not None:
749
+ self.predictor.update(self.training, self.new_data)
750
+ self.new_data = None
751
+
752
+ def _make_variable_X(self, test_X):
753
+ """
754
+ Make a new *variable* with X=test_X
755
+
756
+ Parameters
757
+ ----------
758
+ test_X: numpy.ndarray or physbo.variable
759
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
760
+ Returns
761
+ -------
762
+ test_X: numpy.ndarray or physbo.variable
763
+ The set of candidates. Each row vector represents the feature vector of each search candidate.
764
+ """
765
+ if isinstance(test_X, np.ndarray):
766
+ test = variable(X=test_X)
767
+ elif isinstance(test_X, variable):
768
+ test = test_X
769
+ else:
770
+ raise TypeError("The type of test_X must be ndarray or physbo.variable")
771
+ return test
772
+
773
+ def _delete_actions(self, index, actions=None):
774
+ """
775
+ Returns remaining actions
776
+
777
+ Notes
778
+ -----
779
+ This method itself does not modify *self*
780
+
781
+ Parameters
782
+ ----------
783
+ index: int
784
+ Index of an action to be deleted.
785
+ actions: numpy.ndarray
786
+ Array of actions.
787
+ Returns
788
+ -------
789
+ actions: numpy.ndarray
790
+ Array of actions which does not include action specified by index.
791
+ """
792
+ if actions is None:
793
+ actions = self.actions
794
+ return np.delete(actions, index)
795
+
796
+
797
+ def _run_simulator(simulator, action, comm=None):
798
+ if comm is None:
799
+ return simulator(action)
800
+ if comm.rank == 0:
801
+ t = simulator(action)
802
+ else:
803
+ t = 0.0
804
+ return comm.bcast(t, root=0)