physbo 2.0.0__cp310-cp310-macosx_12_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. physbo/__init__.py +17 -0
  2. physbo/blm/__init__.py +17 -0
  3. physbo/blm/basis/__init__.py +8 -0
  4. physbo/blm/basis/fourier.py +148 -0
  5. physbo/blm/core/__init__.py +8 -0
  6. physbo/blm/core/model.py +257 -0
  7. physbo/blm/inf/__init__.py +8 -0
  8. physbo/blm/inf/exact.py +192 -0
  9. physbo/blm/lik/__init__.py +10 -0
  10. physbo/blm/lik/_src/__init__.py +8 -0
  11. physbo/blm/lik/_src/cov.py +113 -0
  12. physbo/blm/lik/gauss.py +136 -0
  13. physbo/blm/lik/linear.py +117 -0
  14. physbo/blm/predictor.py +238 -0
  15. physbo/blm/prior/__init__.py +8 -0
  16. physbo/blm/prior/gauss.py +215 -0
  17. physbo/gp/__init__.py +15 -0
  18. physbo/gp/core/__init__.py +11 -0
  19. physbo/gp/core/learning.py +364 -0
  20. physbo/gp/core/model.py +420 -0
  21. physbo/gp/core/prior.py +207 -0
  22. physbo/gp/cov/__init__.py +8 -0
  23. physbo/gp/cov/_src/__init__.py +1 -0
  24. physbo/gp/cov/_src/enhance_gauss.cpython-310-darwin.so +0 -0
  25. physbo/gp/cov/gauss.py +393 -0
  26. physbo/gp/inf/__init__.py +8 -0
  27. physbo/gp/inf/exact.py +231 -0
  28. physbo/gp/lik/__init__.py +8 -0
  29. physbo/gp/lik/gauss.py +179 -0
  30. physbo/gp/mean/__init__.py +9 -0
  31. physbo/gp/mean/const.py +150 -0
  32. physbo/gp/mean/zero.py +66 -0
  33. physbo/gp/predictor.py +170 -0
  34. physbo/misc/__init__.py +15 -0
  35. physbo/misc/_src/__init__.py +1 -0
  36. physbo/misc/_src/cholupdate.cpython-310-darwin.so +0 -0
  37. physbo/misc/_src/diagAB.cpython-310-darwin.so +0 -0
  38. physbo/misc/_src/logsumexp.cpython-310-darwin.so +0 -0
  39. physbo/misc/_src/traceAB.cpython-310-darwin.so +0 -0
  40. physbo/misc/centering.py +28 -0
  41. physbo/misc/gauss_elim.py +35 -0
  42. physbo/misc/set_config.py +299 -0
  43. physbo/opt/__init__.py +8 -0
  44. physbo/opt/adam.py +107 -0
  45. physbo/predictor.py +261 -0
  46. physbo/search/__init__.py +11 -0
  47. physbo/search/discrete/__init__.py +11 -0
  48. physbo/search/discrete/policy.py +804 -0
  49. physbo/search/discrete/results.py +192 -0
  50. physbo/search/discrete_multi/__init__.py +11 -0
  51. physbo/search/discrete_multi/policy.py +552 -0
  52. physbo/search/discrete_multi/results.py +128 -0
  53. physbo/search/pareto.py +206 -0
  54. physbo/search/score.py +155 -0
  55. physbo/search/score_multi.py +197 -0
  56. physbo/search/utility.py +101 -0
  57. physbo/variable.py +222 -0
  58. physbo-2.0.0.dist-info/METADATA +110 -0
  59. physbo-2.0.0.dist-info/RECORD +61 -0
  60. physbo-2.0.0.dist-info/WHEEL +5 -0
  61. physbo-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,420 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ import numpy as np
9
+
10
+ from physbo import blm
11
+ from physbo.gp import inf
12
+ from physbo.gp.core import learning
13
+ from physbo.gp.core.prior import prior
14
+
15
+
16
+ class model:
17
+ def __init__(self, lik, mean, cov, inf="exact"):
18
+ """
19
+
20
+ Parameters
21
+ ----------
22
+ lik
23
+ mean
24
+ cov
25
+ inf
26
+ """
27
+ self.lik = lik
28
+ self.prior = prior(mean=mean, cov=cov)
29
+ self.inf = inf
30
+ self.num_params = self.lik.num_params + self.prior.num_params
31
+ self.params = self.cat_params(self.lik.params, self.prior.params)
32
+ self.stats = ()
33
+
34
+ def cat_params(self, lik_params, prior_params):
35
+ """
36
+ Concatinate the likelihood and prior parameters
37
+
38
+ Parameters
39
+ ----------
40
+ lik_params: numpy.ndarray
41
+ Parameters for likelihood
42
+ prior_params: numpy.ndarray
43
+ Parameters for prior
44
+ Returns
45
+ -------
46
+ params: numpy.ndarray
47
+ parameters about likelihood and prior
48
+ """
49
+ params = np.append(lik_params, prior_params)
50
+ return params
51
+
52
+ def decomp_params(self, params=None):
53
+ """
54
+ decomposing the parameters to those of likelifood and priors
55
+
56
+ Parameters
57
+ ----------
58
+ params: numpy.ndarray
59
+ parameters
60
+
61
+ Returns
62
+ -------
63
+ lik_params: numpy.ndarray
64
+ prior_params: numpy.ndarray
65
+ """
66
+ if params is None:
67
+ params = np.copy(self.params)
68
+
69
+ lik_params = params[0 : self.lik.num_params]
70
+ prior_params = params[self.lik.num_params :]
71
+ return lik_params, prior_params
72
+
73
+ def set_params(self, params):
74
+ """
75
+ Setting parameters
76
+
77
+ Parameters
78
+ ----------
79
+ params: numpy.ndarray
80
+ Parameters.
81
+ """
82
+ self.params = params
83
+ lik_params, prior_params = self.decomp_params(params)
84
+ self.lik.set_params(lik_params)
85
+ self.prior.set_params(prior_params)
86
+
87
+ def sub_sampling(self, X, t, N):
88
+ """
89
+ Make subset for sampling
90
+
91
+ Parameters
92
+ ----------
93
+ X: numpy.ndarray
94
+ Each row of X denotes the d-dimensional feature vector of search candidate.
95
+ t: numpy.ndarray
96
+ The negative energy of each search candidate (value of the objective function to be optimized).
97
+ N: int
98
+ Total number of data in subset
99
+ Returns
100
+ -------
101
+ subX: numpy.ndarray
102
+ subt: numpy.ndarray
103
+ """
104
+ num_data = X.shape[0]
105
+
106
+ if N is not None and N < num_data:
107
+ index = np.random.permutation(num_data)
108
+ subX = X[index[0:N], :]
109
+ subt = t[index[0:N]]
110
+ else:
111
+ subX = X
112
+ subt = t
113
+ return subX, subt
114
+
115
+ def export_blm(self, num_basis):
116
+ """
117
+ Exporting the blm(Baysean linear model) predictor
118
+
119
+ Parameters
120
+ ----------
121
+ num_basis: int
122
+ Total number of basis
123
+ Returns
124
+ -------
125
+ physbo.blm.core.model
126
+ """
127
+ if not hasattr(self.prior.cov, "rand_expans"):
128
+ raise ValueError("The kernel must be.")
129
+
130
+ basis_params = self.prior.cov.rand_expans(num_basis)
131
+ basis = blm.basis.fourier(basis_params)
132
+ prior = blm.prior.gauss(num_basis)
133
+ lik = blm.lik.gauss(
134
+ blm.lik.linear(basis, bias=self.prior.get_mean(1)),
135
+ blm.lik.cov(self.lik.params),
136
+ )
137
+ blr = blm.model(lik, prior)
138
+
139
+ return blr
140
+
141
+ def eval_marlik(self, params, X, t, N=None):
142
+ """
143
+ Evaluating marginal likelihood.
144
+
145
+ Parameters
146
+ ----------
147
+ params: numpy.ndarray
148
+ Parameters.
149
+ X: numpy.ndarray
150
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
151
+ t: numpy.ndarray
152
+ N dimensional array.
153
+ The negative energy of each search candidate (value of the objective function to be optimized).
154
+ N: int
155
+ Total number of subset data (if not specified, all dataset is used)
156
+ Returns
157
+ -------
158
+ marlik: float
159
+ Marginal likelihood.
160
+ """
161
+ subX, subt = self.sub_sampling(X, t, N)
162
+ if self.inf == "exact":
163
+ marlik = inf.exact.eval_marlik(self, subX, subt, params=params)
164
+ else:
165
+ pass
166
+
167
+ return marlik
168
+
169
+ def get_grad_marlik(self, params, X, t, N=None):
170
+ """
171
+ Evaluating gradiant of marginal likelihood.
172
+
173
+ Parameters
174
+ ----------
175
+ params: numpy.ndarray
176
+ Parameters.
177
+ X: numpy.ndarray
178
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
179
+ t: numpy.ndarray
180
+ N dimensional array.
181
+ The negative energy of each search candidate (value of the objective function to be optimized).
182
+ N: int
183
+ Total number of subset data (if not specified, all dataset is used)
184
+
185
+ Returns
186
+ -------
187
+ grad_marlik: numpy.ndarray
188
+ Gradiant of marginal likelihood.
189
+ """
190
+ subX, subt = self.sub_sampling(X, t, N)
191
+ if self.inf == "exact":
192
+ grad_marlik = inf.exact.get_grad_marlik(self, subX, subt, params=params)
193
+ return grad_marlik
194
+
195
+ def get_params_bound(self):
196
+ """
197
+ Getting boundary of the parameters.
198
+
199
+ Returns
200
+ -------
201
+ bound: list
202
+ An array with the tuple (min_params, max_params).
203
+ """
204
+ if self.lik.num_params != 0:
205
+ bound = self.lik.get_params_bound()
206
+
207
+ if self.prior.mean.num_params != 0:
208
+ bound.extend(self.prior.mean.get_params_bound())
209
+
210
+ if self.prior.cov.num_params != 0:
211
+ bound.extend(self.prior.cov.get_params_bound())
212
+ return bound
213
+
214
+ def prepare(self, X, t, params=None):
215
+ """
216
+
217
+ Parameters
218
+ ----------
219
+ X: numpy.ndarray
220
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
221
+
222
+ t: numpy.ndarray
223
+ N dimensional array.
224
+ The negative energy of each search candidate (value of the objective function to be optimized).
225
+ params: numpy.ndarray
226
+ Parameters.
227
+ """
228
+ if params is None:
229
+ params = np.copy(self.params)
230
+ if self.inf == "exact":
231
+ self.stats = inf.exact.prepare(self, X, t, params)
232
+ else:
233
+ pass
234
+
235
+ def get_post_fmean(self, X, Z, params=None):
236
+ """
237
+ Calculating posterior mean of model (function)
238
+
239
+ Parameters
240
+ ==========
241
+ X: numpy.ndarray
242
+ inputs
243
+ Z: numpy.ndarray
244
+ feature maps
245
+ params: numpy.ndarray
246
+ Parameters
247
+ See also
248
+ ========
249
+ physbo.gp.inf.exact.get_post_fmean
250
+ """
251
+ if params is None:
252
+ params = np.copy(self.params)
253
+
254
+ if self.inf == "exact":
255
+ post_fmu = inf.exact.get_post_fmean(self, X, Z, params)
256
+
257
+ return post_fmu
258
+
259
+ def get_post_fcov(self, X, Z, params=None, diag=True):
260
+ """
261
+ Calculating posterior covariance matrix of model (function)
262
+
263
+ Parameters
264
+ ----------
265
+ X: numpy.ndarray
266
+ inputs
267
+ Z: numpy.ndarray
268
+ feature maps
269
+ params: numpy.ndarray
270
+ Parameters
271
+ diag: bool
272
+ If X is the diagonalization matrix, true.
273
+
274
+ Returns
275
+ -------
276
+ physbo.gp.inf.exact.get_post_fcov
277
+
278
+ """
279
+ if params is None:
280
+ params = np.copy(self.params)
281
+
282
+ if self.inf == "exact":
283
+ post_fcov = inf.exact.get_post_fcov(self, X, Z, params, diag)
284
+
285
+ return post_fcov
286
+
287
+ def post_sampling(self, X, Z, params=None, N=1, alpha=1):
288
+ """
289
+ draws samples of mean value of model
290
+
291
+ Parameters
292
+ ==========
293
+ X: numpy.ndarray
294
+ inputs
295
+ Z: numpy.ndarray
296
+ feature maps
297
+ N: int
298
+ number of samples
299
+ (default: 1)
300
+ alpha: float
301
+ noise for sampling source
302
+ Returns
303
+ =======
304
+ numpy.ndarray
305
+ """
306
+ if params is None:
307
+ params = np.copy(self.params)
308
+
309
+ fmean = self.get_post_fmean(X, Z, params=None)
310
+ fcov = self.get_post_fcov(X, Z, params=None, diag=False)
311
+ return np.random.multivariate_normal(fmean, fcov * alpha**2, N)
312
+
313
+ def predict_sampling(self, X, Z, params=None, N=1):
314
+ """
315
+
316
+ Parameters
317
+ ----------
318
+ X: numpy.ndarray
319
+ training datasets
320
+ Z: numpy.ndarray
321
+ input for sampling objective values
322
+ params: numpy.ndarray
323
+ Parameters
324
+ N: int
325
+ number of samples
326
+ (default: 1)
327
+
328
+ Returns
329
+ -------
330
+ numpy.ndarray
331
+
332
+ """
333
+ if params is None:
334
+ params = np.copy(self.params)
335
+
336
+ ndata = Z.shape[0]
337
+ if ndata == 0:
338
+ return np.zeros((N, 0))
339
+ fmean = self.get_post_fmean(X, Z, params=None)
340
+ fcov = self.get_post_fcov(X, Z, params=None, diag=False) + self.lik.get_cov(
341
+ ndata
342
+ )
343
+
344
+ return np.random.multivariate_normal(fmean, fcov, N)
345
+
346
+ def print_params(self):
347
+ """
348
+ Printing parameters
349
+ """
350
+ print("\n")
351
+ if self.lik.num_params != 0:
352
+ print("likelihood parameter = ", self.lik.params)
353
+
354
+ if self.prior.mean.num_params != 0:
355
+ print("mean parameter in GP prior: ", self.prior.mean.params)
356
+
357
+ print("covariance parameter in GP prior: ", self.prior.cov.params)
358
+ print("\n")
359
+
360
+ def get_cand_params(self, X, t):
361
+ """
362
+ Getting candidate for parameters
363
+
364
+ Parameters
365
+ ----------
366
+ X: numpy.ndarray
367
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
368
+
369
+ t: numpy.ndarray
370
+ N dimensional array.
371
+ The negative energy of each search candidate (value of the objective function to be optimized).
372
+ Returns
373
+ -------
374
+ params: numpy.ndarray
375
+ Parameters
376
+ """
377
+ params = np.zeros(self.num_params)
378
+ if self.lik.num_params != 0:
379
+ params[0 : self.lik.num_params] = self.lik.get_cand_params(t)
380
+
381
+ temp = self.lik.num_params
382
+
383
+ if self.prior.mean.num_params != 0:
384
+ params[
385
+ temp : temp + self.prior.mean.num_params
386
+ ] = self.prior.mean.get_cand_params(t)
387
+
388
+ temp += self.prior.mean.num_params
389
+
390
+ if self.prior.cov.num_params != 0:
391
+ params[temp:] = self.prior.cov.get_cand_params(X, t)
392
+
393
+ return params
394
+
395
+ def fit(self, X, t, config):
396
+ """
397
+ Fitting function (update parameters)
398
+
399
+ Parameters
400
+ ----------
401
+ X: numpy.ndarray
402
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
403
+
404
+ t: numpy.ndarray
405
+ N dimensional array.
406
+ The negative energy of each search candidate (value of the objective function to be optimized).
407
+ config: physbo.misc.set_config object
408
+
409
+ """
410
+ method = config.learning.method
411
+
412
+ if method == "adam":
413
+ adam = learning.adam(self, config)
414
+ params = adam.run(X, t)
415
+
416
+ if method in ("bfgs", "batch"):
417
+ bfgs = learning.batch(self, config)
418
+ params = bfgs.run(X, t)
419
+
420
+ self.set_params(params)
@@ -0,0 +1,207 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ import numpy as np
9
+ import scipy
10
+
11
+
12
+ class prior:
13
+ """prior of gaussian process"""
14
+
15
+ def __init__(self, mean, cov):
16
+ """
17
+
18
+ Parameters
19
+ ----------
20
+ mean: numpy.ndarray
21
+ mean values of prior
22
+ cov: numpy.ndarray
23
+ covariance matrix of priors
24
+ """
25
+ self.mean = mean
26
+ self.cov = cov
27
+ self.num_params = self.cov.num_params + self.mean.num_params
28
+ self.params = self.cat_params(self.mean.params, self.cov.params)
29
+
30
+ def cat_params(self, mean_params, cov_params):
31
+ """
32
+
33
+ Parameters
34
+ ----------
35
+ mean_params: numpy.ndarray
36
+ Mean values of parameters
37
+ cov_params: numpy.ndarray
38
+ Covariance matrix of parameters
39
+ Returns
40
+ -------
41
+ numpy.ndarray
42
+ """
43
+ return np.append(mean_params, cov_params)
44
+
45
+ def decomp_params(self, params):
46
+ """
47
+ decomposing the parameters to those of mean values and covariance matrix for priors
48
+
49
+ Parameters
50
+ ----------
51
+ params: numpy.ndarray
52
+ parameters
53
+
54
+ Returns
55
+ -------
56
+ mean_params: numpy.ndarray
57
+ cov_params: numpy.ndarray
58
+ """
59
+ if params is None:
60
+ params = np.copy(self.params)
61
+
62
+ mean_params = params[0 : self.mean.num_params]
63
+ cov_params = params[self.mean.num_params :]
64
+ return mean_params, cov_params
65
+
66
+ def get_mean(self, num_data, params=None):
67
+ """
68
+ Calculating the mean value of priors
69
+
70
+ Parameters
71
+ ----------
72
+ num_data: int
73
+ Total number of data
74
+ params: numpy.ndarray
75
+ Parameters
76
+ Returns
77
+ -------
78
+ float
79
+ """
80
+ if params is None:
81
+ params = np.copy(self.params)
82
+ return self.mean.get_mean(num_data, params[0 : self.mean.num_params])
83
+
84
+ def get_cov(self, X, Z=None, params=None, diag=False):
85
+ """
86
+ Calculating the variance-covariance matrix of priors
87
+
88
+ Parameters
89
+ ----------
90
+ X: numpy.ndarray
91
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
92
+ Z: numpy.ndarray
93
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
94
+ params: numpy.ndarray
95
+ Parameters.
96
+ diag: bool
97
+ If X is the diagonalization matrix, true.
98
+ Returns
99
+ -------
100
+ numpy.ndarray
101
+ """
102
+ if params is None:
103
+ params = np.copy(self.params)
104
+
105
+ return self.cov.get_cov(X, Z, params=params[self.mean.num_params :], diag=diag)
106
+
107
+ def get_grad_mean(self, num_data, params=None):
108
+ """
109
+ Calculating the gradiant of mean values of priors
110
+
111
+ Parameters
112
+ ----------
113
+ num_data: int
114
+ Total number of data
115
+ params: numpy.ndarray
116
+ Parameters
117
+
118
+ Returns
119
+ -------
120
+ numpy.ndarray
121
+
122
+ """
123
+ if params is None:
124
+ params = np.copy(self.params)
125
+
126
+ mean_params, cov_params = self.decomp_params(params)
127
+ return self.mean.get_grad(num_data, params=mean_params)
128
+
129
+ def get_grad_cov(self, X, params=None):
130
+ """
131
+ Calculating the covariance matrix priors
132
+
133
+ Parameters
134
+ ----------
135
+ X: numpy.ndarray
136
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
137
+ params: numpy.ndarray
138
+ Parameters.
139
+
140
+ Returns
141
+ -------
142
+ numpy.ndarray
143
+
144
+ """
145
+ if params is None:
146
+ params = np.copy(self.params)
147
+ mean_params, cov_params = self.decomp_params(params)
148
+ return self.cov.get_grad(X, params=cov_params)
149
+
150
+ def set_params(self, params):
151
+ """
152
+ Setting parameters
153
+
154
+ Parameters
155
+ ----------
156
+ params: numpy.ndarray
157
+ Parameters.
158
+ """
159
+ mean_params, cov_params = self.decomp_params(params)
160
+ self.set_mean_params(mean_params)
161
+ self.set_cov_params(cov_params)
162
+
163
+ def set_mean_params(self, params):
164
+ """
165
+ Setting parameters for mean values of priors
166
+
167
+ Parameters
168
+ ----------
169
+ params: numpy.ndarray
170
+ Parameters
171
+ """
172
+ if self.mean.num_params != 0:
173
+ self.params[0 : self.mean.num_params] = params
174
+ self.mean.set_params(params)
175
+
176
+ def set_cov_params(self, params):
177
+ """
178
+ Setting parameters for covariance matrix of priors
179
+
180
+ Parameters
181
+ ----------
182
+ params: numpy.ndarray
183
+ Parameters
184
+ """
185
+ self.params[self.mean.num_params :] = params
186
+ self.cov.set_params(params)
187
+
188
+ def sampling(self, X, N=1):
189
+ """
190
+ Sampling from GP prior
191
+
192
+ Parameters
193
+ ----------
194
+ X: numpy.ndarray
195
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
196
+ N: int
197
+
198
+ Returns
199
+ -------
200
+ float
201
+
202
+ """
203
+ num_data = X.shape[0]
204
+ G = self.get_cov(X) + 1e-8 * np.identity(num_data)
205
+ U = scipy.linalg.cholesky(G, check_finite=False)
206
+ Z = np.random.randn(N, num_data)
207
+ return np.dot(Z, U) + self.get_mean(num_data)
@@ -0,0 +1,8 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ from .gauss import gauss
@@ -0,0 +1 @@
1
+