physbo 2.0.0__cp310-cp310-macosx_12_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. physbo/__init__.py +17 -0
  2. physbo/blm/__init__.py +17 -0
  3. physbo/blm/basis/__init__.py +8 -0
  4. physbo/blm/basis/fourier.py +148 -0
  5. physbo/blm/core/__init__.py +8 -0
  6. physbo/blm/core/model.py +257 -0
  7. physbo/blm/inf/__init__.py +8 -0
  8. physbo/blm/inf/exact.py +192 -0
  9. physbo/blm/lik/__init__.py +10 -0
  10. physbo/blm/lik/_src/__init__.py +8 -0
  11. physbo/blm/lik/_src/cov.py +113 -0
  12. physbo/blm/lik/gauss.py +136 -0
  13. physbo/blm/lik/linear.py +117 -0
  14. physbo/blm/predictor.py +238 -0
  15. physbo/blm/prior/__init__.py +8 -0
  16. physbo/blm/prior/gauss.py +215 -0
  17. physbo/gp/__init__.py +15 -0
  18. physbo/gp/core/__init__.py +11 -0
  19. physbo/gp/core/learning.py +364 -0
  20. physbo/gp/core/model.py +420 -0
  21. physbo/gp/core/prior.py +207 -0
  22. physbo/gp/cov/__init__.py +8 -0
  23. physbo/gp/cov/_src/__init__.py +1 -0
  24. physbo/gp/cov/_src/enhance_gauss.cpython-310-darwin.so +0 -0
  25. physbo/gp/cov/gauss.py +393 -0
  26. physbo/gp/inf/__init__.py +8 -0
  27. physbo/gp/inf/exact.py +231 -0
  28. physbo/gp/lik/__init__.py +8 -0
  29. physbo/gp/lik/gauss.py +179 -0
  30. physbo/gp/mean/__init__.py +9 -0
  31. physbo/gp/mean/const.py +150 -0
  32. physbo/gp/mean/zero.py +66 -0
  33. physbo/gp/predictor.py +170 -0
  34. physbo/misc/__init__.py +15 -0
  35. physbo/misc/_src/__init__.py +1 -0
  36. physbo/misc/_src/cholupdate.cpython-310-darwin.so +0 -0
  37. physbo/misc/_src/diagAB.cpython-310-darwin.so +0 -0
  38. physbo/misc/_src/logsumexp.cpython-310-darwin.so +0 -0
  39. physbo/misc/_src/traceAB.cpython-310-darwin.so +0 -0
  40. physbo/misc/centering.py +28 -0
  41. physbo/misc/gauss_elim.py +35 -0
  42. physbo/misc/set_config.py +299 -0
  43. physbo/opt/__init__.py +8 -0
  44. physbo/opt/adam.py +107 -0
  45. physbo/predictor.py +261 -0
  46. physbo/search/__init__.py +11 -0
  47. physbo/search/discrete/__init__.py +11 -0
  48. physbo/search/discrete/policy.py +804 -0
  49. physbo/search/discrete/results.py +192 -0
  50. physbo/search/discrete_multi/__init__.py +11 -0
  51. physbo/search/discrete_multi/policy.py +552 -0
  52. physbo/search/discrete_multi/results.py +128 -0
  53. physbo/search/pareto.py +206 -0
  54. physbo/search/score.py +155 -0
  55. physbo/search/score_multi.py +197 -0
  56. physbo/search/utility.py +101 -0
  57. physbo/variable.py +222 -0
  58. physbo-2.0.0.dist-info/METADATA +110 -0
  59. physbo-2.0.0.dist-info/RECORD +61 -0
  60. physbo-2.0.0.dist-info/WHEEL +5 -0
  61. physbo-2.0.0.dist-info/top_level.txt +1 -0
physbo/gp/cov/gauss.py ADDED
@@ -0,0 +1,393 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ # -*- coding:utf-8 -*-
9
+ import numpy as np
10
+ from scipy import spatial
11
+ from ._src.enhance_gauss import grad_width64
12
+
13
+
14
+ class gauss:
15
+ """gaussian kernel"""
16
+
17
+ def __init__(
18
+ self,
19
+ num_dim,
20
+ width=3,
21
+ scale=1,
22
+ ard=False,
23
+ max_width=1e6,
24
+ min_width=1e-6,
25
+ max_scale=1e6,
26
+ min_scale=1e-6,
27
+ ):
28
+ """
29
+
30
+ Parameters
31
+ ----------
32
+ num_dim: int
33
+ width: float
34
+ scale: float
35
+ ard: bool
36
+ flag to use Automatic Relevance Determination (ARD).
37
+ max_width: float
38
+ Maximum value of width
39
+ min_width: float
40
+ Minimum value of width
41
+ max_scale: float
42
+ Maximum value of scale
43
+ min_scale: float
44
+ Minimum value of scale
45
+ """
46
+ self.ard = ard
47
+ self.num_dim = num_dim
48
+ self.scale = scale
49
+ self.max_ln_width = np.log(max_width)
50
+ self.min_ln_width = np.log(min_width)
51
+ self.max_ln_scale = np.log(max_scale)
52
+ self.min_ln_scale = np.log(min_scale)
53
+
54
+ if self.ard:
55
+ # with ARD
56
+ self.num_params = num_dim + 1
57
+ if isinstance(width, np.ndarray) and len(width) == self.num_dim:
58
+ self.width = width
59
+ else:
60
+ self.width = width * np.ones(self.num_dim)
61
+ else:
62
+ # without ARD
63
+ self.width = width
64
+ self.num_params = 2
65
+
66
+ params = self.cat_params(self.width, self.scale)
67
+ self.set_params(params)
68
+
69
+ def print_params(self):
70
+ """
71
+ show the current kernel parameters
72
+
73
+ """
74
+
75
+ print(" Parameters of Gaussian kernel \n ")
76
+ print(" width = ", +self.width)
77
+ print(" scale = ", +self.scale)
78
+ print(" scale2 = ", +self.scale**2)
79
+ print(" \n")
80
+
81
+ def prepare(self, params=None):
82
+ """
83
+ Setting parameters
84
+
85
+ Parameters
86
+ ----------
87
+ params: numpy.ndarray
88
+ parameters
89
+
90
+ Returns
91
+ -------
92
+ params: numpy.ndarray
93
+ width: int
94
+ scale: int
95
+
96
+ """
97
+ if params is None:
98
+ params = self.params
99
+ width = self.width
100
+ scale = self.scale
101
+ else:
102
+ params = self.supp_params(params)
103
+ width, scale = self.decomp_params(params)
104
+
105
+ return params, width, scale
106
+
107
+ def get_grad(self, X, params=None):
108
+ """
109
+ Getting gradiant values of X
110
+
111
+ Parameters
112
+ ----------
113
+ X: numpy.ndarray
114
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
115
+
116
+ params: numpy.ndarray
117
+
118
+ Returns
119
+ -------
120
+ grad: numpy.ndarray
121
+
122
+ """
123
+ num_data = X.shape[0]
124
+ params, width, scale = self.prepare(params)
125
+ G = self.get_cov(X, params=params)
126
+
127
+ grad = np.zeros((self.num_params, num_data, num_data))
128
+ if self.ard:
129
+ grad[0 : self.num_params - 1, :, :] = grad_width64(X, width, G)
130
+ else:
131
+ pairwise_dists = spatial.distance.pdist(X / width, "euclidean")
132
+ grad[0, :, :] = G * spatial.distance.squareform(pairwise_dists**2)
133
+
134
+ grad[-1, :, :] = 2 * G
135
+ return grad
136
+
137
+ def get_cov(self, X, Z=None, params=None, diag=False):
138
+ """
139
+ compute the covariant matrix
140
+ Parameters
141
+ ----------
142
+ X: numpy.ndarray
143
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
144
+
145
+ Z: numpy.ndarray
146
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of search candidate.
147
+
148
+ params: numpy.ndarray
149
+ Parameters
150
+
151
+ diag: bool
152
+ If X is the diagonalization matrix, true.
153
+
154
+ Returns
155
+ -------
156
+ G: numpy.ndarray
157
+ covariant matrix
158
+ """
159
+ params, width, scale = self.prepare(params)
160
+ scale2 = scale**2
161
+
162
+ if Z is None:
163
+ if diag:
164
+ G = scale2 * np.ones(X.shape[0])
165
+ else:
166
+ pairwise_dists = spatial.distance.squareform(
167
+ spatial.distance.pdist(X / width, "euclidean") ** 2
168
+ )
169
+ G = np.exp(-0.5 * pairwise_dists) * scale2
170
+ else:
171
+ pairwise_dists = (
172
+ spatial.distance.cdist(X / width, Z / width, "euclidean") ** 2
173
+ )
174
+ G = np.exp(-0.5 * pairwise_dists) * scale2
175
+
176
+ return G
177
+
178
+ def set_params(self, params):
179
+ """
180
+ set kernel parameters
181
+
182
+ Parameters
183
+ ----------
184
+ params: numpy.ndarray
185
+ Parameters for optimization.
186
+
187
+ """
188
+ params = self.supp_params(params)
189
+ self.params = params
190
+ self.width, self.scale = self.decomp_params(params)
191
+
192
+ def supp_params(self, params):
193
+ """
194
+ Set maximum (minimum) values for parameters when the parameter is greater(less) than this value.
195
+
196
+ Parameters
197
+ ----------
198
+ params: numpy.ndarray
199
+ Parameters for optimization.
200
+ Array of real elements of size (n,), where ‘n’ is the number of independent variables.
201
+
202
+ Returns
203
+ -------
204
+ params: numpy.ndarray
205
+
206
+ """
207
+ index = np.where(params[0:-1] > self.max_ln_width)
208
+ params[index[0]] = self.max_ln_width
209
+
210
+ index = np.where(params[0:-1] < self.min_ln_width)
211
+ params[index[0]] = self.min_ln_width
212
+
213
+ if params[-1] > self.max_ln_scale:
214
+ params[-1] = self.max_ln_scale
215
+
216
+ if params[-1] < self.min_ln_scale:
217
+ params[-1] = self.min_ln_scale
218
+
219
+ return params
220
+
221
+ def decomp_params(self, params):
222
+ """
223
+ decompose the parameters defined on the log region
224
+ into width and scale parameters
225
+
226
+ Parameters
227
+ ----------
228
+ params: numpy.ndarray
229
+ parameters
230
+
231
+ Returns
232
+ -------
233
+ width: float
234
+ scale: float
235
+ """
236
+
237
+ width = np.exp(params[0:-1])
238
+ scale = np.exp(params[-1])
239
+ return width, scale
240
+
241
+ def save(self, file_name):
242
+ """
243
+ save the gaussian kernel
244
+
245
+ Parameters
246
+ ----------
247
+ file_name: str
248
+ file name to save the information of the kernel
249
+
250
+ """
251
+ kwarg = {
252
+ "name": "gauss",
253
+ "params": self.params,
254
+ "ard": self.ard,
255
+ "num_dim": self.num_dim,
256
+ "max_ln_scale": self.max_ln_scale,
257
+ "min_ln_scale": self.min_ln_scale,
258
+ "max_ln_width": self.max_ln_width,
259
+ "min_ln_width": self.min_ln_width,
260
+ "num_params": self.num_params,
261
+ }
262
+ with open(file_name, "wb") as f:
263
+ np.savez(f, **kwarg)
264
+
265
+ def load(self, file_name):
266
+ """
267
+ Recovering the Gaussian kernel from file
268
+ Parameters
269
+ ----------
270
+ file_name: str
271
+ file name to load the information of the kernel
272
+
273
+ """
274
+ temp = np.load(file_name)
275
+
276
+ self.num_dim = temp["num_dim"]
277
+ self.ard = temp["ard"]
278
+ self.max_ln_scale = temp["max_ln_scale"]
279
+ self.min_ln_scale = temp["min_ln_scale"]
280
+ self.max_ln_width = temp["max_ln_width"]
281
+ self.min_ln_width = temp["min_ln_width"]
282
+ params = temp["params"]
283
+ self.set_params(params)
284
+
285
+ def get_params_bound(self):
286
+ """
287
+ Getting boundary array.
288
+
289
+ Returns
290
+ -------
291
+ bound: list
292
+ A num_params-dimensional array with the tuple (min_params, max_params).
293
+
294
+ """
295
+
296
+ if self.ard:
297
+ bound = [
298
+ (self.min_ln_width, self.max_ln_width) for i in range(0, self.num_dim)
299
+ ]
300
+ else:
301
+ bound = [(self.min_ln_width, self.max_ln_width)]
302
+
303
+ bound.append((self.min_ln_scale, self.max_ln_scale))
304
+ return bound
305
+
306
+ def cat_params(self, width, scale):
307
+ """
308
+ Taking the logarithm of width and scale parameters
309
+ and concatinate them into one ndarray
310
+
311
+ Parameters
312
+ ----------
313
+ width: int
314
+ scale: int
315
+
316
+ Returns
317
+ -------
318
+ params: numpy.ndarray
319
+ Parameters
320
+ """
321
+ params = np.zeros(self.num_params)
322
+ params[0:-1] = np.log(width)
323
+ params[-1] = np.log(scale)
324
+ return params
325
+
326
+ def rand_expans(self, num_basis, params=None):
327
+ """
328
+ Kernel Expansion
329
+
330
+ Parameters
331
+ ----------
332
+ num_basis: int
333
+ total number of basis
334
+ params: numpy.ndarray
335
+ Parameters
336
+
337
+ Returns
338
+ -------
339
+ tupple (W, b, amp)
340
+ """
341
+ params, width, scale = self.prepare(params)
342
+ scale2 = scale**2
343
+ amp = np.sqrt((2 * scale2) / num_basis)
344
+ W = np.random.randn(num_basis, self.num_dim) / width
345
+ b = np.random.rand(num_basis) * 2 * np.pi
346
+ return (W, b, amp)
347
+
348
+ def get_cand_params(self, X, t):
349
+ """
350
+ Getting candidate parameters.
351
+
352
+ Parameters
353
+ ----------
354
+ X: numpy.ndarray
355
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
356
+
357
+ t: numpy.ndarray
358
+ N dimensional array.
359
+ The negative energy of each search candidate (value of the objective function to be optimized).
360
+
361
+ Returns
362
+ -------
363
+ params: numpy.ndarray
364
+
365
+ """
366
+ if self.ard:
367
+ # with ARD
368
+ width = np.zeros(self.num_dim)
369
+ scale = np.std(t)
370
+ u = np.random.uniform(0.4, 0.8)
371
+ width = u * (np.max(X, 0) - np.min(X, 0)) * np.sqrt(self.num_dim)
372
+
373
+ index = np.where(np.abs(width) < 1e-6)
374
+ width[index[0]] = 1e-6
375
+ params = np.append(np.log(width), np.log(scale))
376
+ else:
377
+ # without ARD
378
+ num_data = X.shape[0]
379
+ M = max(2000, int(np.floor(num_data / 5)))
380
+
381
+ dist = np.zeros(M)
382
+
383
+ for m in range(M):
384
+ a = np.random.randint(0, X.shape[0], 2)
385
+ dist[m] = np.linalg.norm(X[a[0], :] - X[a[1], :])
386
+
387
+ dist = np.sort(dist)
388
+ tmp = int(np.floor(M / 10))
389
+ n = np.random.randint(0, 5)
390
+ width = dist[(2 * n + 1) * tmp]
391
+ scale = np.std(t)
392
+ params = np.append(np.log(width + 1e-8), np.log(scale))
393
+ return params
@@ -0,0 +1,8 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ from . import exact
physbo/gp/inf/exact.py ADDED
@@ -0,0 +1,231 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ import numpy as np
9
+ import scipy
10
+ from ... import misc
11
+ from copy import deepcopy
12
+
13
+
14
+ def eval_marlik(gp, X, t, params=None):
15
+ """
16
+ Evaluating marginal likelihood.
17
+
18
+ Parameters
19
+ ----------
20
+ gp: physbo.gp.core.model
21
+ X: numpy.ndarray
22
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
23
+
24
+ t: numpy.ndarray
25
+ N dimensional array.
26
+ The negative energy of each search candidate (value of the objective function to be optimized).
27
+ params: numpy.ndarray
28
+ Parameters.
29
+
30
+ Returns
31
+ -------
32
+ marlik: float
33
+ Marginal likelihood.
34
+ """
35
+ ndata, ndims = X.shape
36
+ lik_params, prior_params = gp.decomp_params(params)
37
+
38
+ fmu = gp.prior.get_mean(ndata, params=prior_params)
39
+ G = gp.prior.get_cov(X, params=prior_params)
40
+ B = gp.lik.get_cov(ndata, params=lik_params)
41
+
42
+ A = G + B + 1e-8 * np.identity(ndata)
43
+ res = t - fmu
44
+ U = scipy.linalg.cholesky(A, check_finite=False)
45
+ alpha = scipy.linalg.solve_triangular(
46
+ U.transpose(), res, lower=True, overwrite_b=False, check_finite=False
47
+ )
48
+ marlik = (
49
+ 0.5 * ndata * np.log(2 * np.pi)
50
+ + np.sum(np.log(np.diag(U)))
51
+ + 0.5 * np.inner(alpha, alpha)
52
+ )
53
+ return marlik
54
+
55
+
56
+ def get_grad_marlik(gp, X, t, params=None):
57
+ """
58
+ Evaluating gradiant of marginal likelihood.
59
+
60
+ Parameters
61
+ ----------
62
+ gp: physbo.gp.core.model
63
+ X: numpy.ndarray
64
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
65
+
66
+ t: numpy.ndarray
67
+ N dimensional array.
68
+ The negative energy of each search candidate (value of the objective function to be optimized).
69
+ params: numpy.ndarray
70
+ Parameters.
71
+
72
+ Returns
73
+ -------
74
+ grad_marlik: numpy.ndarray
75
+ Gradiant of marginal likelihood.
76
+ """
77
+ ndata, ndims = X.shape
78
+ lik_params, prior_params = gp.decomp_params(params)
79
+
80
+ fmu = gp.prior.get_mean(ndata, prior_params)
81
+ G = gp.prior.get_cov(X, params=prior_params)
82
+ B = gp.lik.get_cov(ndata, lik_params)
83
+
84
+ A = G + B + 1e-8 * np.identity(ndata)
85
+ U = scipy.linalg.cholesky(A, check_finite=False)
86
+ res = t - fmu
87
+ alpha = misc.gauss_elim(U, res)
88
+ invA = scipy.linalg.inv(A, check_finite=False)
89
+
90
+ grad_marlik = np.zeros(gp.num_params)
91
+
92
+ """ lik """
93
+ if gp.lik.num_params != 0:
94
+ lik_grad = gp.lik.get_grad(ndata, lik_params)
95
+ temp = lik_grad.dot(alpha)
96
+ grad_marlik[0 : gp.lik.num_params] = -0.5 * temp.dot(
97
+ alpha
98
+ ) + 0.5 * misc.traceAB2(invA, lik_grad)
99
+
100
+ ntemp = gp.lik.num_params
101
+ """ prior """
102
+ if gp.prior.mean.num_params != 0:
103
+ mean_grad = gp.prior.get_grad_mean(ndata, prior_params)
104
+ grad_marlik[ntemp : ntemp + gp.prior.mean.num_params] = -np.inner(
105
+ alpha, mean_grad
106
+ )
107
+
108
+ ntemp += gp.prior.mean.num_params
109
+
110
+ if gp.prior.cov.num_params != 0:
111
+ cov_grad = gp.prior.get_grad_cov(X, prior_params)
112
+ temp = cov_grad.dot(alpha)
113
+ grad_marlik[ntemp:] = -0.5 * temp.dot(alpha) + 0.5 * misc.traceAB3(
114
+ invA, cov_grad
115
+ )
116
+
117
+ return grad_marlik
118
+
119
+
120
+ def prepare(gp, X, t, params=None):
121
+ """
122
+
123
+ Parameters
124
+ ----------
125
+ gp: physbo.gp.core.model
126
+ X: numpy.ndarray
127
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
128
+
129
+ t: numpy.ndarray
130
+ N dimensional array.
131
+ The negative energy of each search candidate (value of the objective function to be optimized).
132
+ params: numpy.ndarray
133
+ Parameters.
134
+
135
+ Returns
136
+ -------
137
+ stats: tupple
138
+ """
139
+ ndata = X.shape[0]
140
+ ndims = X.shape[1]
141
+
142
+ if params is None:
143
+ params = np.copy(gp.params)
144
+
145
+ lik_params, prior_params = gp.decomp_params(params)
146
+
147
+ G = gp.prior.get_cov(X, params=prior_params)
148
+ fmu = gp.prior.get_mean(ndata, params=prior_params)
149
+ B = gp.lik.get_cov(ndata, params=lik_params)
150
+ A = G + B + 1e-8 * np.identity(ndata)
151
+ U = scipy.linalg.cholesky(A, check_finite=False)
152
+ residual = t - fmu
153
+ alpha = misc.gauss_elim(U, residual)
154
+ stats = (U, alpha)
155
+
156
+ return stats
157
+
158
+
159
+ def get_post_fmean(gp, X, Z, params=None):
160
+ """
161
+ Calculating the mean of posterior
162
+
163
+ Parameters
164
+ ----------
165
+ gp: physbo.gp.core.model
166
+ X: numpy.ndarray
167
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
168
+ Z: numpy.ndarray
169
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
170
+ params: numpy.ndarray
171
+ Parameters.
172
+
173
+ Returns
174
+ -------
175
+ numpy.ndarray
176
+ """
177
+
178
+ ndata = X.shape[0]
179
+ ndims = X.shape[1]
180
+ ntest = Z.shape[0]
181
+
182
+ lik_params, prior_params = gp.decomp_params(params)
183
+
184
+ alpha = gp.stats[1]
185
+
186
+ fmu = gp.prior.get_mean(ntest)
187
+ G = gp.prior.get_cov(X=Z, Z=X, params=prior_params)
188
+
189
+ return G.dot(alpha) + fmu
190
+
191
+
192
+ def get_post_fcov(gp, X, Z, params=None, diag=True):
193
+ """
194
+ Calculating the covariance of posterior
195
+
196
+ Parameters
197
+ ----------
198
+ gp: physbo.gp.core.model
199
+ X: numpy.ndarray
200
+ N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
201
+ Z: numpy.ndarray
202
+ N x d dimensional matrix. Each row of Z denotes the d-dimensional feature vector of tests.
203
+ params: numpy.ndarray
204
+ Parameters.
205
+ diag: bool
206
+ If X is the diagonalization matrix, true.
207
+ Returns
208
+ -------
209
+ numpy.ndarray
210
+ """
211
+
212
+ lik_params, prior_params = gp.decomp_params(params)
213
+
214
+ U = gp.stats[0]
215
+ alpha = gp.stats[1]
216
+
217
+ G = gp.prior.get_cov(X=X, Z=Z, params=prior_params)
218
+
219
+ invUG = scipy.linalg.solve_triangular(
220
+ U.transpose(), G, lower=True, overwrite_b=False, check_finite=False
221
+ )
222
+
223
+ if diag:
224
+ diagK = gp.prior.get_cov(X=Z, params=prior_params, diag=True)
225
+ diag_invUG2 = misc.diagAB(invUG.transpose(), invUG)
226
+ post_cov = diagK - diag_invUG2
227
+ else:
228
+ K = gp.prior.get_cov(X=Z, params=prior_params)
229
+ post_cov = K - np.dot(invUG.transpose(), invUG)
230
+
231
+ return post_cov
@@ -0,0 +1,8 @@
1
+ # SPDX-License-Identifier: MPL-2.0
2
+ # Copyright (C) 2020- The University of Tokyo
3
+ #
4
+ # This Source Code Form is subject to the terms of the Mozilla Public
5
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
6
+ # file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
+
8
+ from .gauss import gauss