CUQIpy 1.4.0.post0.dev13__py3-none-any.whl → 1.4.0.post0.dev41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of CUQIpy might be problematic. Click here for more details.
- cuqi/__init__.py +1 -0
- cuqi/_version.py +3 -3
- cuqi/experimental/__init__.py +1 -2
- cuqi/experimental/_recommender.py +4 -4
- cuqi/legacy/__init__.py +2 -0
- cuqi/legacy/sampler/__init__.py +11 -0
- cuqi/legacy/sampler/_conjugate.py +55 -0
- cuqi/legacy/sampler/_conjugate_approx.py +52 -0
- cuqi/legacy/sampler/_cwmh.py +196 -0
- cuqi/legacy/sampler/_gibbs.py +231 -0
- cuqi/legacy/sampler/_hmc.py +335 -0
- cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
- cuqi/legacy/sampler/_laplace_approximation.py +184 -0
- cuqi/legacy/sampler/_mh.py +190 -0
- cuqi/legacy/sampler/_pcn.py +244 -0
- cuqi/legacy/sampler/_rto.py +284 -0
- cuqi/legacy/sampler/_sampler.py +182 -0
- cuqi/problem/_problem.py +87 -80
- cuqi/sampler/__init__.py +120 -8
- cuqi/sampler/_conjugate.py +376 -35
- cuqi/sampler/_conjugate_approx.py +40 -16
- cuqi/sampler/_cwmh.py +132 -138
- cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
- cuqi/sampler/_gibbs.py +269 -130
- cuqi/sampler/_hmc.py +328 -201
- cuqi/sampler/_langevin_algorithm.py +282 -98
- cuqi/sampler/_laplace_approximation.py +87 -117
- cuqi/sampler/_mh.py +47 -157
- cuqi/sampler/_pcn.py +56 -211
- cuqi/sampler/_rto.py +206 -140
- cuqi/sampler/_sampler.py +540 -135
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/METADATA +1 -1
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/RECORD +36 -35
- cuqi/experimental/mcmc/__init__.py +0 -122
- cuqi/experimental/mcmc/_conjugate.py +0 -396
- cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
- cuqi/experimental/mcmc/_cwmh.py +0 -190
- cuqi/experimental/mcmc/_gibbs.py +0 -366
- cuqi/experimental/mcmc/_hmc.py +0 -462
- cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
- cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
- cuqi/experimental/mcmc/_mh.py +0 -80
- cuqi/experimental/mcmc/_pcn.py +0 -89
- cuqi/experimental/mcmc/_rto.py +0 -350
- cuqi/experimental/mcmc/_sampler.py +0 -582
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/WHEEL +0 -0
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/licenses/LICENSE +0 -0
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
import scipy as sp
|
|
2
|
+
from scipy.linalg.interpolative import estimate_spectral_norm
|
|
3
|
+
from scipy.sparse.linalg import LinearOperator as scipyLinearOperator
|
|
4
|
+
import numpy as np
|
|
5
|
+
import cuqi
|
|
6
|
+
from cuqi.solver import CGLS, FISTA
|
|
7
|
+
from cuqi.legacy.sampler import Sampler
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LinearRTO(Sampler):
|
|
11
|
+
"""
|
|
12
|
+
Linear RTO (Randomize-Then-Optimize) sampler.
|
|
13
|
+
|
|
14
|
+
Samples posterior related to the inverse problem with Gaussian likelihood and prior, and where the forward model is linear or more generally affine.
|
|
15
|
+
|
|
16
|
+
Parameters
|
|
17
|
+
------------
|
|
18
|
+
target : `cuqi.distribution.Posterior`, `cuqi.distribution.MultipleLikelihoodPosterior` or 5-dimensional tuple.
|
|
19
|
+
If target is of type cuqi.distribution.Posterior or cuqi.distribution.MultipleLikelihoodPosterior, it represents the posterior distribution.
|
|
20
|
+
If target is a 5-dimensional tuple, it assumes the following structure:
|
|
21
|
+
(data, model, L_sqrtprec, P_mean, P_sqrtrec)
|
|
22
|
+
|
|
23
|
+
Here:
|
|
24
|
+
data: is a m-dimensional numpy array containing the measured data.
|
|
25
|
+
model: is a m by n dimensional matrix, AffineModel or LinearModel representing the forward model.
|
|
26
|
+
L_sqrtprec: is the squareroot of the precision matrix of the Gaussian likelihood.
|
|
27
|
+
P_mean: is the prior mean.
|
|
28
|
+
P_sqrtprec: is the squareroot of the precision matrix of the Gaussian mean.
|
|
29
|
+
|
|
30
|
+
x0 : `np.ndarray`
|
|
31
|
+
Initial point for the sampler. *Optional*.
|
|
32
|
+
|
|
33
|
+
maxit : int
|
|
34
|
+
Maximum number of iterations of the inner CGLS solver. *Optional*.
|
|
35
|
+
|
|
36
|
+
tol : float
|
|
37
|
+
Tolerance of the inner CGLS solver. *Optional*.
|
|
38
|
+
|
|
39
|
+
callback : callable, *Optional*
|
|
40
|
+
If set this function will be called after every sample.
|
|
41
|
+
The signature of the callback function is `callback(sample, sample_index)`,
|
|
42
|
+
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
43
|
+
An example is shown in demos/demo31_callback.py.
|
|
44
|
+
|
|
45
|
+
"""
|
|
46
|
+
def __init__(self, target, x0=None, maxit=10, tol=1e-6, shift=0, **kwargs):
|
|
47
|
+
|
|
48
|
+
# Accept tuple of inputs and construct posterior
|
|
49
|
+
if isinstance(target, tuple) and len(target) == 5:
|
|
50
|
+
# Structure (data, model, L_sqrtprec, P_mean, P_sqrtprec)
|
|
51
|
+
data = target[0]
|
|
52
|
+
model = target[1]
|
|
53
|
+
L_sqrtprec = target[2]
|
|
54
|
+
P_mean = target[3]
|
|
55
|
+
P_sqrtprec = target[4]
|
|
56
|
+
|
|
57
|
+
# If numpy matrix convert to CUQI model
|
|
58
|
+
if isinstance(model, np.ndarray) and len(model.shape) == 2:
|
|
59
|
+
model = cuqi.model.LinearModel(model)
|
|
60
|
+
|
|
61
|
+
# Check model input
|
|
62
|
+
if not isinstance(model, cuqi.model.AffineModel):
|
|
63
|
+
raise TypeError("Model needs to be cuqi.model.AffineModel or matrix")
|
|
64
|
+
|
|
65
|
+
# Likelihood
|
|
66
|
+
L = cuqi.distribution.Gaussian(model, sqrtprec=L_sqrtprec).to_likelihood(data)
|
|
67
|
+
|
|
68
|
+
# Prior TODO: allow multiple priors stacked
|
|
69
|
+
#if isinstance(P_mean, list) and isinstance(P_sqrtprec, list):
|
|
70
|
+
# P = cuqi.distribution.JointGaussianSqrtPrec(P_mean, P_sqrtprec)
|
|
71
|
+
#else:
|
|
72
|
+
P = cuqi.distribution.Gaussian(P_mean, sqrtprec=P_sqrtprec)
|
|
73
|
+
|
|
74
|
+
# Construct posterior
|
|
75
|
+
target = cuqi.distribution.Posterior(L, P)
|
|
76
|
+
|
|
77
|
+
super().__init__(target, x0=x0, **kwargs)
|
|
78
|
+
|
|
79
|
+
self._check_posterior()
|
|
80
|
+
|
|
81
|
+
# Modify initial guess
|
|
82
|
+
if x0 is not None:
|
|
83
|
+
self.x0 = x0
|
|
84
|
+
else:
|
|
85
|
+
self.x0 = np.zeros(self.prior.dim)
|
|
86
|
+
|
|
87
|
+
# Other parameters
|
|
88
|
+
self.maxit = maxit
|
|
89
|
+
self.tol = tol
|
|
90
|
+
self.shift = 0
|
|
91
|
+
|
|
92
|
+
L1 = [likelihood.distribution.sqrtprec for likelihood in self.likelihoods]
|
|
93
|
+
L2 = self.prior.sqrtprec
|
|
94
|
+
L2mu = self.prior.sqrtprecTimesMean
|
|
95
|
+
|
|
96
|
+
# pre-computations
|
|
97
|
+
self.n = len(self.x0)
|
|
98
|
+
self.b_tild = np.hstack([L@(likelihood.data - model._shift) for (L, likelihood, model) in zip(L1, self.likelihoods, self.models)]+ [L2mu])
|
|
99
|
+
|
|
100
|
+
callability = [callable(likelihood.model) for likelihood in self.likelihoods]
|
|
101
|
+
notcallability = [not c for c in callability]
|
|
102
|
+
if all(notcallability):
|
|
103
|
+
self.M = sp.sparse.vstack([L@likelihood.model for (L, likelihood) in zip(L1, self.likelihoods)] + [L2])
|
|
104
|
+
elif all(callability):
|
|
105
|
+
# in this case, model is a function doing forward and backward operations
|
|
106
|
+
def M(x, flag):
|
|
107
|
+
if flag == 1:
|
|
108
|
+
out1 = [L @ likelihood.model._forward_func_no_shift(x) for (L, likelihood) in zip(L1, self.likelihoods)] # Use forward function which excludes shift
|
|
109
|
+
out2 = L2 @ x
|
|
110
|
+
out = np.hstack(out1 + [out2])
|
|
111
|
+
elif flag == 2:
|
|
112
|
+
idx_start = 0
|
|
113
|
+
idx_end = 0
|
|
114
|
+
out1 = np.zeros(self.n)
|
|
115
|
+
for likelihood in self.likelihoods:
|
|
116
|
+
idx_end += len(likelihood.data)
|
|
117
|
+
out1 += likelihood.model._adjoint_func_no_shift(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end]) # Use adjoint function which excludes shift
|
|
118
|
+
idx_start = idx_end
|
|
119
|
+
out2 = L2.T @ x[idx_end:]
|
|
120
|
+
out = out1 + out2
|
|
121
|
+
return out
|
|
122
|
+
self.M = M
|
|
123
|
+
else:
|
|
124
|
+
raise TypeError("All likelihoods need to be callable or none need to be callable.")
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def prior(self):
|
|
128
|
+
return self.target.prior
|
|
129
|
+
|
|
130
|
+
@property
|
|
131
|
+
def likelihood(self):
|
|
132
|
+
return self.target.likelihood
|
|
133
|
+
|
|
134
|
+
@property
|
|
135
|
+
def likelihoods(self):
|
|
136
|
+
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
137
|
+
return [self.target.likelihood]
|
|
138
|
+
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
139
|
+
return self.target.likelihoods
|
|
140
|
+
|
|
141
|
+
@property
|
|
142
|
+
def model(self):
|
|
143
|
+
return self.target.model
|
|
144
|
+
|
|
145
|
+
@property
|
|
146
|
+
def models(self):
|
|
147
|
+
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
148
|
+
return [self.target.model]
|
|
149
|
+
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
150
|
+
return self.target.models
|
|
151
|
+
|
|
152
|
+
def _sample(self, N, Nb):
|
|
153
|
+
Ns = N+Nb # number of simulations
|
|
154
|
+
samples = np.empty((self.n, Ns))
|
|
155
|
+
|
|
156
|
+
# initial state
|
|
157
|
+
samples[:, 0] = self.x0
|
|
158
|
+
for s in range(Ns-1):
|
|
159
|
+
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
160
|
+
sim = CGLS(self.M, y, samples[:, s], self.maxit, self.tol, self.shift)
|
|
161
|
+
samples[:, s+1], _ = sim.solve()
|
|
162
|
+
|
|
163
|
+
self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
|
|
164
|
+
self._call_callback(samples[:, s+1], s+1)
|
|
165
|
+
|
|
166
|
+
# remove burn-in
|
|
167
|
+
samples = samples[:, Nb:]
|
|
168
|
+
|
|
169
|
+
return samples, None, None
|
|
170
|
+
|
|
171
|
+
def _sample_adapt(self, N, Nb):
|
|
172
|
+
return self._sample(N,Nb)
|
|
173
|
+
|
|
174
|
+
def _check_posterior(self):
|
|
175
|
+
# Check target type
|
|
176
|
+
if not isinstance(self.target, (cuqi.distribution.Posterior, cuqi.distribution.MultipleLikelihoodPosterior)):
|
|
177
|
+
raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior' or 'cuqi.distribution.MultipleLikelihoodPosterior'.")
|
|
178
|
+
|
|
179
|
+
# Check Linear model and Gaussian likelihood(s)
|
|
180
|
+
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
181
|
+
if not isinstance(self.model, cuqi.model.AffineModel):
|
|
182
|
+
raise TypeError("Model needs to be linear or affine")
|
|
183
|
+
|
|
184
|
+
if not hasattr(self.likelihood.distribution, "sqrtprec"):
|
|
185
|
+
raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
|
|
186
|
+
|
|
187
|
+
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior): # Elif used for further alternatives, e.g., stacked posterior
|
|
188
|
+
for likelihood in self.likelihoods:
|
|
189
|
+
if not isinstance(likelihood.model, cuqi.model.LinearModel):
|
|
190
|
+
raise TypeError("Model needs to be linear")
|
|
191
|
+
|
|
192
|
+
if not hasattr(likelihood.distribution, "sqrtprec"):
|
|
193
|
+
raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
|
|
194
|
+
|
|
195
|
+
# Check Gaussian prior
|
|
196
|
+
if not hasattr(self.prior, "sqrtprec"):
|
|
197
|
+
raise TypeError("prior must contain a sqrtprec attribute")
|
|
198
|
+
|
|
199
|
+
if not hasattr(self.prior, "sqrtprecTimesMean"):
|
|
200
|
+
raise TypeError("Prior must contain a sqrtprecTimesMean attribute")
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
class RegularizedLinearRTO(LinearRTO):
|
|
204
|
+
"""
|
|
205
|
+
Regularized Linear RTO (Randomize-Then-Optimize) sampler.
|
|
206
|
+
|
|
207
|
+
Samples posterior related to the inverse problem with Gaussian likelihood and implicit Gaussian prior, and where the forward model is Linear.
|
|
208
|
+
|
|
209
|
+
Parameters
|
|
210
|
+
------------
|
|
211
|
+
target : `cuqi.distribution.Posterior`
|
|
212
|
+
See `cuqi.legacy.sampler.LinearRTO`
|
|
213
|
+
|
|
214
|
+
x0 : `np.ndarray`
|
|
215
|
+
Initial point for the sampler. *Optional*.
|
|
216
|
+
|
|
217
|
+
maxit : int
|
|
218
|
+
Maximum number of iterations of the inner FISTA solver. *Optional*.
|
|
219
|
+
|
|
220
|
+
stepsize : string or float
|
|
221
|
+
If stepsize is a string and equals either "automatic", then the stepsize is automatically estimated based on the spectral norm.
|
|
222
|
+
If stepsize is a float, then this stepsize is used.
|
|
223
|
+
|
|
224
|
+
abstol : float
|
|
225
|
+
Absolute tolerance of the inner FISTA solver. *Optional*.
|
|
226
|
+
|
|
227
|
+
callback : callable, *Optional*
|
|
228
|
+
If set this function will be called after every sample.
|
|
229
|
+
The signature of the callback function is `callback(sample, sample_index)`,
|
|
230
|
+
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
231
|
+
An example is shown in demos/demo31_callback.py.
|
|
232
|
+
|
|
233
|
+
"""
|
|
234
|
+
def __init__(self, target, x0=None, maxit=100, stepsize = "automatic", abstol=1e-10, adaptive = True, **kwargs):
|
|
235
|
+
|
|
236
|
+
if not callable(target.prior.proximal):
|
|
237
|
+
raise TypeError("Projector needs to be callable")
|
|
238
|
+
|
|
239
|
+
super().__init__(target, x0=x0, maxit=100, **kwargs)
|
|
240
|
+
|
|
241
|
+
# Other parameters
|
|
242
|
+
self.stepsize = stepsize
|
|
243
|
+
self.abstol = abstol
|
|
244
|
+
self.adaptive = adaptive
|
|
245
|
+
self.proximal = target.prior.proximal
|
|
246
|
+
|
|
247
|
+
@property
|
|
248
|
+
def prior(self):
|
|
249
|
+
return self.target.prior.gaussian
|
|
250
|
+
|
|
251
|
+
def _sample(self, N, Nb):
|
|
252
|
+
Ns = N+Nb # number of simulations
|
|
253
|
+
samples = np.empty((self.n, Ns))
|
|
254
|
+
|
|
255
|
+
if isinstance(self.stepsize, str):
|
|
256
|
+
if self.stepsize in ["automatic"]:
|
|
257
|
+
if not callable(self.M):
|
|
258
|
+
M_op = scipyLinearOperator(self.M.shape, matvec = lambda v: self.M@v, rmatvec = lambda w: self.M.T@w)
|
|
259
|
+
else:
|
|
260
|
+
M_op = scipyLinearOperator((len(self.b_tild), self.n), matvec = lambda v: self.M(v,1), rmatvec = lambda w: self.M(w,2))
|
|
261
|
+
|
|
262
|
+
_stepsize = 0.99/(estimate_spectral_norm(M_op)**2)
|
|
263
|
+
# print(f"Estimated stepsize for regularized Linear RTO: {_stepsize}")
|
|
264
|
+
else:
|
|
265
|
+
raise ValueError("Stepsize choice not supported")
|
|
266
|
+
else:
|
|
267
|
+
_stepsize = self.stepsize
|
|
268
|
+
|
|
269
|
+
# initial state
|
|
270
|
+
samples[:, 0] = self.x0
|
|
271
|
+
for s in range(Ns-1):
|
|
272
|
+
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
273
|
+
sim = FISTA(self.M, y, self.proximal,
|
|
274
|
+
samples[:, s], maxit = self.maxit, stepsize = _stepsize, abstol = self.abstol, adaptive = self.adaptive)
|
|
275
|
+
samples[:, s+1], _ = sim.solve()
|
|
276
|
+
|
|
277
|
+
self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
|
|
278
|
+
self._call_callback(samples[:, s+1], s+1)
|
|
279
|
+
# remove burn-in
|
|
280
|
+
samples = samples[:, Nb:]
|
|
281
|
+
|
|
282
|
+
return samples, None, None
|
|
283
|
+
|
|
284
|
+
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
import sys
|
|
3
|
+
import numpy as np
|
|
4
|
+
import cuqi
|
|
5
|
+
from cuqi.samples import Samples
|
|
6
|
+
import warnings
|
|
7
|
+
|
|
8
|
+
class Sampler(ABC):
|
|
9
|
+
|
|
10
|
+
def __init__(self, target, x0=None, dim=None, callback=None):
|
|
11
|
+
|
|
12
|
+
warnings.warn(f"\nYou are using the legacy sampler '{self.__class__.__name__}'.\n"
|
|
13
|
+
f"This will be removed in a future release of CUQIpy.\n"
|
|
14
|
+
f"Please consider using the new samplers in the 'cuqi.sampler' module.\n", UserWarning, stacklevel=2)
|
|
15
|
+
|
|
16
|
+
self._dim = dim
|
|
17
|
+
if hasattr(target,'dim'):
|
|
18
|
+
if self._dim is None:
|
|
19
|
+
self._dim = target.dim
|
|
20
|
+
elif self._dim != target.dim:
|
|
21
|
+
raise ValueError("'dim' need to be None or equal to 'target.dim'")
|
|
22
|
+
elif x0 is not None:
|
|
23
|
+
self._dim = len(x0)
|
|
24
|
+
|
|
25
|
+
self.target = target
|
|
26
|
+
|
|
27
|
+
if x0 is None:
|
|
28
|
+
x0 = np.ones(self.dim)
|
|
29
|
+
self.x0 = x0
|
|
30
|
+
|
|
31
|
+
self.callback = callback
|
|
32
|
+
|
|
33
|
+
def step(self, x):
|
|
34
|
+
"""
|
|
35
|
+
Perform a single MCMC step
|
|
36
|
+
"""
|
|
37
|
+
# Currently a hack to get step method for any sampler
|
|
38
|
+
self.x0 = x
|
|
39
|
+
return self.sample(2).samples[:,-1]
|
|
40
|
+
|
|
41
|
+
def step_tune(self, x, *args, **kwargs):
|
|
42
|
+
"""
|
|
43
|
+
Perform a single MCMC step and tune the sampler. This is used during burn-in.
|
|
44
|
+
"""
|
|
45
|
+
# Currently a hack to get step method for any sampler
|
|
46
|
+
out = self.step(x)
|
|
47
|
+
self.tune(*args, *kwargs)
|
|
48
|
+
return out
|
|
49
|
+
|
|
50
|
+
def tune(self):
|
|
51
|
+
"""
|
|
52
|
+
Tune the sampler parameters.
|
|
53
|
+
"""
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def geometry(self):
|
|
59
|
+
if hasattr(self, 'target') and hasattr(self.target, 'geometry'):
|
|
60
|
+
geom = self.target.geometry
|
|
61
|
+
else:
|
|
62
|
+
geom = cuqi.geometry._DefaultGeometry1D(self.dim)
|
|
63
|
+
return geom
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def target(self):
|
|
67
|
+
return self._target
|
|
68
|
+
|
|
69
|
+
@target.setter
|
|
70
|
+
def target(self, value):
|
|
71
|
+
if not isinstance(value, cuqi.distribution.Distribution) and callable(value):
|
|
72
|
+
# obtain self.dim
|
|
73
|
+
if self.dim is not None:
|
|
74
|
+
dim = self.dim
|
|
75
|
+
else:
|
|
76
|
+
raise ValueError(f"If 'target' is a lambda function, the parameter 'dim' need to be specified when initializing {self.__class__}.")
|
|
77
|
+
|
|
78
|
+
# set target
|
|
79
|
+
self._target = cuqi.distribution.UserDefinedDistribution(logpdf_func=value, dim = dim)
|
|
80
|
+
|
|
81
|
+
elif isinstance(value, cuqi.distribution.Distribution):
|
|
82
|
+
self._target = value
|
|
83
|
+
else:
|
|
84
|
+
raise ValueError("'target' need to be either a lambda function or of type 'cuqi.distribution.Distribution'")
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def dim(self):
|
|
89
|
+
if hasattr(self,'target') and hasattr(self.target,'dim'):
|
|
90
|
+
self._dim = self.target.dim
|
|
91
|
+
return self._dim
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def sample(self,N,Nb=0):
|
|
95
|
+
# Get samples from the samplers sample method
|
|
96
|
+
result = self._sample(N,Nb)
|
|
97
|
+
return self._create_Sample_object(result,N+Nb)
|
|
98
|
+
|
|
99
|
+
def sample_adapt(self,N,Nb=0):
|
|
100
|
+
# Get samples from the samplers sample method
|
|
101
|
+
result = self._sample_adapt(N,Nb)
|
|
102
|
+
return self._create_Sample_object(result,N+Nb)
|
|
103
|
+
|
|
104
|
+
def _create_Sample_object(self,result,N):
|
|
105
|
+
loglike_eval = None
|
|
106
|
+
acc_rate = None
|
|
107
|
+
if isinstance(result,tuple):
|
|
108
|
+
#Unpack samples+loglike+acc_rate
|
|
109
|
+
s = result[0]
|
|
110
|
+
if len(result)>1: loglike_eval = result[1]
|
|
111
|
+
if len(result)>2: acc_rate = result[2]
|
|
112
|
+
if len(result)>3: raise TypeError("Expected tuple of at most 3 elements from sampling method.")
|
|
113
|
+
else:
|
|
114
|
+
s = result
|
|
115
|
+
|
|
116
|
+
#Store samples in cuqi samples object if more than 1 sample
|
|
117
|
+
if N==1:
|
|
118
|
+
if len(s) == 1 and isinstance(s,np.ndarray): #Extract single value from numpy array
|
|
119
|
+
s = s.ravel()[0]
|
|
120
|
+
else:
|
|
121
|
+
s = s.flatten()
|
|
122
|
+
else:
|
|
123
|
+
s = Samples(s, self.geometry)#, geometry = self.geometry)
|
|
124
|
+
s.loglike_eval = loglike_eval
|
|
125
|
+
s.acc_rate = acc_rate
|
|
126
|
+
return s
|
|
127
|
+
|
|
128
|
+
@abstractmethod
|
|
129
|
+
def _sample(self,N,Nb):
|
|
130
|
+
pass
|
|
131
|
+
|
|
132
|
+
@abstractmethod
|
|
133
|
+
def _sample_adapt(self,N,Nb):
|
|
134
|
+
pass
|
|
135
|
+
|
|
136
|
+
def _print_progress(self,s,Ns):
|
|
137
|
+
"""Prints sampling progress"""
|
|
138
|
+
if Ns > 2:
|
|
139
|
+
if (s % (max(Ns//100,1))) == 0:
|
|
140
|
+
msg = f'Sample {s} / {Ns}'
|
|
141
|
+
sys.stdout.write('\r'+msg)
|
|
142
|
+
if s==Ns:
|
|
143
|
+
msg = f'Sample {s} / {Ns}'
|
|
144
|
+
sys.stdout.write('\r'+msg+'\n')
|
|
145
|
+
|
|
146
|
+
def _call_callback(self, sample, sample_index):
|
|
147
|
+
""" Calls the callback function. Assumes input is sample and sample index"""
|
|
148
|
+
if self.callback is not None:
|
|
149
|
+
self.callback(sample, sample_index)
|
|
150
|
+
|
|
151
|
+
class ProposalBasedSampler(Sampler,ABC):
|
|
152
|
+
def __init__(self, target, proposal=None, scale=1, x0=None, dim=None, **kwargs):
|
|
153
|
+
#TODO: after fixing None dim
|
|
154
|
+
#if dim is None and hasattr(proposal,'dim'):
|
|
155
|
+
# dim = proposal.dim
|
|
156
|
+
super().__init__(target, x0=x0, dim=dim, **kwargs)
|
|
157
|
+
|
|
158
|
+
self.proposal =proposal
|
|
159
|
+
self.scale = scale
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def proposal(self):
|
|
164
|
+
return self._proposal
|
|
165
|
+
|
|
166
|
+
@proposal.setter
|
|
167
|
+
def proposal(self, value):
|
|
168
|
+
self._proposal = value
|
|
169
|
+
|
|
170
|
+
@property
|
|
171
|
+
def geometry(self):
|
|
172
|
+
geom1, geom2 = None, None
|
|
173
|
+
if hasattr(self, 'proposal') and hasattr(self.proposal, 'geometry') and self.proposal.geometry.par_dim is not None:
|
|
174
|
+
geom1= self.proposal.geometry
|
|
175
|
+
if hasattr(self, 'target') and hasattr(self.target, 'geometry') and self.target.geometry.par_dim is not None:
|
|
176
|
+
geom2 = self.target.geometry
|
|
177
|
+
if not isinstance(geom1,cuqi.geometry._DefaultGeometry) and geom1 is not None:
|
|
178
|
+
return geom1
|
|
179
|
+
elif not isinstance(geom2,cuqi.geometry._DefaultGeometry) and geom2 is not None:
|
|
180
|
+
return geom2
|
|
181
|
+
else:
|
|
182
|
+
return cuqi.geometry._DefaultGeometry1D(self.dim)
|