CUQIpy 1.3.0.post0.dev401__py3-none-any.whl → 1.4.0.post0.dev41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (50) hide show
  1. cuqi/__init__.py +1 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/density/_density.py +9 -1
  4. cuqi/distribution/_joint_distribution.py +96 -11
  5. cuqi/experimental/__init__.py +1 -2
  6. cuqi/experimental/_recommender.py +4 -4
  7. cuqi/legacy/__init__.py +2 -0
  8. cuqi/legacy/sampler/__init__.py +11 -0
  9. cuqi/legacy/sampler/_conjugate.py +55 -0
  10. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  11. cuqi/legacy/sampler/_cwmh.py +196 -0
  12. cuqi/legacy/sampler/_gibbs.py +231 -0
  13. cuqi/legacy/sampler/_hmc.py +335 -0
  14. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  15. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  16. cuqi/legacy/sampler/_mh.py +190 -0
  17. cuqi/legacy/sampler/_pcn.py +244 -0
  18. cuqi/legacy/sampler/_rto.py +284 -0
  19. cuqi/legacy/sampler/_sampler.py +182 -0
  20. cuqi/problem/_problem.py +87 -80
  21. cuqi/sampler/__init__.py +120 -8
  22. cuqi/sampler/_conjugate.py +376 -35
  23. cuqi/sampler/_conjugate_approx.py +40 -16
  24. cuqi/sampler/_cwmh.py +132 -138
  25. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  26. cuqi/sampler/_gibbs.py +269 -130
  27. cuqi/sampler/_hmc.py +328 -201
  28. cuqi/sampler/_langevin_algorithm.py +282 -98
  29. cuqi/sampler/_laplace_approximation.py +87 -117
  30. cuqi/sampler/_mh.py +47 -157
  31. cuqi/sampler/_pcn.py +56 -211
  32. cuqi/sampler/_rto.py +206 -140
  33. cuqi/sampler/_sampler.py +540 -135
  34. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/METADATA +1 -1
  35. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/RECORD +38 -37
  36. cuqi/experimental/mcmc/__init__.py +0 -122
  37. cuqi/experimental/mcmc/_conjugate.py +0 -396
  38. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  39. cuqi/experimental/mcmc/_cwmh.py +0 -190
  40. cuqi/experimental/mcmc/_gibbs.py +0 -366
  41. cuqi/experimental/mcmc/_hmc.py +0 -462
  42. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  43. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  44. cuqi/experimental/mcmc/_mh.py +0 -80
  45. cuqi/experimental/mcmc/_pcn.py +0 -89
  46. cuqi/experimental/mcmc/_rto.py +0 -350
  47. cuqi/experimental/mcmc/_sampler.py +0 -582
  48. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/WHEEL +0 -0
  49. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/licenses/LICENSE +0 -0
  50. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,244 @@
1
+ import numpy as np
2
+ import cuqi
3
+ from cuqi.legacy.sampler import Sampler
4
+
5
+ class pCN(Sampler):
6
+ #Samples target*proposal
7
+ #TODO. Check proposal, needs to be Gaussian and zero mean.
8
+ """Preconditioned Crank-Nicolson sampler
9
+
10
+ Parameters
11
+ ----------
12
+ target : `cuqi.distribution.Posterior` or tuple of likelihood and prior objects
13
+ If target is of type cuqi.distribution.Posterior, it represents the posterior distribution.
14
+ If target is a tuple of (cuqi.likelihood.Likelihood, cuqi.distribution.Distribution) objects,
15
+ the first element is considered the likelihood and the second is considered the prior.
16
+
17
+ scale : int
18
+
19
+ x0 : `np.ndarray`
20
+ Initial point for the sampler
21
+
22
+ callback : callable, *Optional*
23
+ If set this function will be called after every sample.
24
+ The signature of the callback function is `callback(sample, sample_index)`,
25
+ where `sample` is the current sample and `sample_index` is the index of the sample.
26
+ An example is shown in demos/demo31_callback.py.
27
+
28
+ Example
29
+ -------
30
+
31
+ This uses a custom logpdf and sample function.
32
+
33
+ .. code-block:: python
34
+
35
+ # Parameters
36
+ dim = 5 # Dimension of distribution
37
+ mu = np.arange(dim) # Mean of Gaussian
38
+ std = 1 # standard deviation of Gaussian
39
+
40
+ # Logpdf function of likelihood
41
+ logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
42
+
43
+ # sample function of prior N(0,I)
44
+ sample_func = lambda : 0 + 1*np.random.randn(dim,1)
45
+
46
+ # Define as UserDefinedDistributions
47
+ likelihood = cuqi.likelihood.UserDefinedLikelihood(dim=dim, logpdf_func=logpdf_func)
48
+ prior = cuqi.distribution.UserDefinedDistribution(dim=dim, sample_func=sample_func)
49
+
50
+ # Set up sampler
51
+ sampler = cuqi.legacy.sampler.pCN((likelihood,prior), scale = 0.1)
52
+
53
+ # Sample
54
+ samples = sampler.sample(5000)
55
+
56
+ Example
57
+ -------
58
+
59
+ This uses CUQIpy distributions.
60
+
61
+ .. code-block:: python
62
+
63
+ # Parameters
64
+ dim = 5 # Dimension of distribution
65
+ mu = np.arange(dim) # Mean of Gaussian
66
+ std = 1 # standard deviation of Gaussian
67
+
68
+ # Define as UserDefinedDistributions
69
+ model = cuqi.model.Model(lambda x: x, range_geometry=dim, domain_geometry=dim)
70
+ likelihood = cuqi.distribution.Gaussian(mean=model, cov=np.ones(dim)).to_likelihood(mu)
71
+ prior = cuqi.distribution.Gaussian(mean=np.zeros(dim), cov=1)
72
+
73
+ target = cuqi.distribution.Posterior(likelihood, prior)
74
+
75
+ # Set up sampler
76
+ sampler = cuqi.legacy.sampler.pCN(target, scale = 0.1)
77
+
78
+ # Sample
79
+ samples = sampler.sample(5000)
80
+
81
+ """
82
+ def __init__(self, target, scale=None, x0=None, **kwargs):
83
+ super().__init__(target, x0=x0, dim=None, **kwargs)
84
+ self.scale = scale
85
+
86
+ @property
87
+ def prior(self):
88
+ if isinstance(self.target, cuqi.distribution.Posterior):
89
+ return self.target.prior
90
+ elif isinstance(self.target,tuple) and len(self.target)==2:
91
+ return self.target[1]
92
+
93
+ @property
94
+ def likelihood(self):
95
+ if isinstance(self.target, cuqi.distribution.Posterior):
96
+ return self.target.likelihood
97
+ elif isinstance(self.target,tuple) and len(self.target)==2:
98
+ return self.target[0]
99
+
100
+
101
+ @Sampler.target.setter
102
+ def target(self, value):
103
+ if isinstance(value, cuqi.distribution.Posterior):
104
+ self._target = value
105
+ self._loglikelihood = lambda x : self.likelihood.logd(x)
106
+ elif isinstance(value,tuple) and len(value)==2 and \
107
+ (isinstance(value[0], cuqi.likelihood.Likelihood) or isinstance(value[0], cuqi.likelihood.UserDefinedLikelihood)) and \
108
+ isinstance(value[1], cuqi.distribution.Distribution):
109
+ self._target = value
110
+ self._loglikelihood = lambda x : self.likelihood.logd(x)
111
+ else:
112
+ raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
113
+
114
+ #TODO:
115
+ #if not isinstance(self.prior,(cuqi.distribution.Gaussian, cuqi.distribution.Normal)):
116
+ # raise ValueError("The prior distribution of the target need to be Gaussian")
117
+
118
+ @property
119
+ def dim(self):
120
+ if hasattr(self,'target') and hasattr(self.target,'dim'):
121
+ self._dim = self.target.dim
122
+ elif hasattr(self,'target') and isinstance(self.target,tuple) and len(self.target)==2:
123
+ self._dim = self.target[0].dim
124
+ return self._dim
125
+
126
+ def _sample(self, N, Nb):
127
+ if self.scale is None:
128
+ raise ValueError("Scale must be set to sample without adaptation. Consider using sample_adapt instead.")
129
+
130
+ Ns = N+Nb # number of simulations
131
+
132
+ # allocation
133
+ samples = np.empty((self.dim, Ns))
134
+ loglike_eval = np.empty(Ns)
135
+ acc = np.zeros(Ns, dtype=int)
136
+
137
+ # initial state
138
+ samples[:, 0] = self.x0
139
+ loglike_eval[0] = self._loglikelihood(self.x0)
140
+ acc[0] = 1
141
+
142
+ # run MCMC
143
+ for s in range(Ns-1):
144
+ # run component by component
145
+ samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
146
+
147
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
148
+ self._call_callback(samples[:, s+1], s+1)
149
+
150
+ # remove burn-in
151
+ samples = samples[:, Nb:]
152
+ loglike_eval = loglike_eval[Nb:]
153
+ accave = acc[Nb:].mean()
154
+ print('\nAverage acceptance rate:', accave, '\n')
155
+ #
156
+ return samples, loglike_eval, accave
157
+
158
+ def _sample_adapt(self, N, Nb):
159
+ # Set intial scale if not set
160
+ if self.scale is None:
161
+ self.scale = 0.1
162
+
163
+ Ns = N+Nb # number of simulations
164
+
165
+ # allocation
166
+ samples = np.empty((self.dim, Ns))
167
+ loglike_eval = np.empty(Ns)
168
+ acc = np.zeros(Ns)
169
+
170
+ # initial state
171
+ samples[:, 0] = self.x0
172
+ loglike_eval[0] = self._loglikelihood(self.x0)
173
+ acc[0] = 1
174
+
175
+ # initial adaptation params
176
+ Na = int(0.1*N) # iterations to adapt
177
+ hat_acc = np.empty(int(np.floor(Ns/Na))) # average acceptance rate of the chains
178
+ lambd = self.scale
179
+ star_acc = 0.44 # target acceptance rate RW
180
+ i, idx = 0, 0
181
+
182
+ # run MCMC
183
+ for s in range(Ns-1):
184
+ # run component by component
185
+ samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
186
+
187
+ # adapt prop spread using acc of past samples
188
+ if ((s+1) % Na == 0):
189
+ # evaluate average acceptance rate
190
+ hat_acc[i] = np.mean(acc[idx:idx+Na])
191
+
192
+ # d. compute new scaling parameter
193
+ zeta = 1/np.sqrt(i+1) # ensures that the variation of lambda(i) vanishes
194
+ lambd = np.exp(np.log(lambd) + zeta*(hat_acc[i]-star_acc))
195
+
196
+ # update parameters
197
+ self.scale = min(lambd, 1)
198
+
199
+ # update counters
200
+ i += 1
201
+ idx += Na
202
+
203
+ # display iterations
204
+ if ((s+1) % (max(Ns//100,1))) == 0 or (s+1) == Ns-1:
205
+ print("\r",'Sample', s+1, '/', Ns, end="")
206
+
207
+ self._call_callback(samples[:, s+1], s+1)
208
+
209
+ print("\r",'Sample', s+2, '/', Ns)
210
+
211
+ # remove burn-in
212
+ samples = samples[:, Nb:]
213
+ loglike_eval = loglike_eval[Nb:]
214
+ accave = acc[Nb:].mean()
215
+ print('\nAverage acceptance rate:', accave, 'MCMC scale:', self.scale, '\n')
216
+
217
+ return samples, loglike_eval, accave
218
+
219
+ def single_update(self, x_t, loglike_eval_t):
220
+ # propose state
221
+ xi = self.prior.sample(1).flatten() # sample from the prior
222
+ x_star = np.sqrt(1-self.scale**2)*x_t + self.scale*xi # pCN proposal
223
+
224
+ # evaluate target
225
+ loglike_eval_star = self._loglikelihood(x_star)
226
+
227
+ # ratio and acceptance probability
228
+ ratio = loglike_eval_star - loglike_eval_t # proposal is symmetric
229
+ alpha = min(0, ratio)
230
+
231
+ # accept/reject
232
+ u_theta = np.log(np.random.rand())
233
+ if (u_theta <= alpha):
234
+ x_next = x_star
235
+ loglike_eval_next = loglike_eval_star
236
+ acc = 1
237
+ else:
238
+ x_next = x_t
239
+ loglike_eval_next = loglike_eval_t
240
+ acc = 0
241
+
242
+ return x_next, loglike_eval_next, acc
243
+
244
+
@@ -0,0 +1,284 @@
1
+ import scipy as sp
2
+ from scipy.linalg.interpolative import estimate_spectral_norm
3
+ from scipy.sparse.linalg import LinearOperator as scipyLinearOperator
4
+ import numpy as np
5
+ import cuqi
6
+ from cuqi.solver import CGLS, FISTA
7
+ from cuqi.legacy.sampler import Sampler
8
+
9
+
10
+ class LinearRTO(Sampler):
11
+ """
12
+ Linear RTO (Randomize-Then-Optimize) sampler.
13
+
14
+ Samples posterior related to the inverse problem with Gaussian likelihood and prior, and where the forward model is linear or more generally affine.
15
+
16
+ Parameters
17
+ ------------
18
+ target : `cuqi.distribution.Posterior`, `cuqi.distribution.MultipleLikelihoodPosterior` or 5-dimensional tuple.
19
+ If target is of type cuqi.distribution.Posterior or cuqi.distribution.MultipleLikelihoodPosterior, it represents the posterior distribution.
20
+ If target is a 5-dimensional tuple, it assumes the following structure:
21
+ (data, model, L_sqrtprec, P_mean, P_sqrtrec)
22
+
23
+ Here:
24
+ data: is a m-dimensional numpy array containing the measured data.
25
+ model: is a m by n dimensional matrix, AffineModel or LinearModel representing the forward model.
26
+ L_sqrtprec: is the squareroot of the precision matrix of the Gaussian likelihood.
27
+ P_mean: is the prior mean.
28
+ P_sqrtprec: is the squareroot of the precision matrix of the Gaussian mean.
29
+
30
+ x0 : `np.ndarray`
31
+ Initial point for the sampler. *Optional*.
32
+
33
+ maxit : int
34
+ Maximum number of iterations of the inner CGLS solver. *Optional*.
35
+
36
+ tol : float
37
+ Tolerance of the inner CGLS solver. *Optional*.
38
+
39
+ callback : callable, *Optional*
40
+ If set this function will be called after every sample.
41
+ The signature of the callback function is `callback(sample, sample_index)`,
42
+ where `sample` is the current sample and `sample_index` is the index of the sample.
43
+ An example is shown in demos/demo31_callback.py.
44
+
45
+ """
46
+ def __init__(self, target, x0=None, maxit=10, tol=1e-6, shift=0, **kwargs):
47
+
48
+ # Accept tuple of inputs and construct posterior
49
+ if isinstance(target, tuple) and len(target) == 5:
50
+ # Structure (data, model, L_sqrtprec, P_mean, P_sqrtprec)
51
+ data = target[0]
52
+ model = target[1]
53
+ L_sqrtprec = target[2]
54
+ P_mean = target[3]
55
+ P_sqrtprec = target[4]
56
+
57
+ # If numpy matrix convert to CUQI model
58
+ if isinstance(model, np.ndarray) and len(model.shape) == 2:
59
+ model = cuqi.model.LinearModel(model)
60
+
61
+ # Check model input
62
+ if not isinstance(model, cuqi.model.AffineModel):
63
+ raise TypeError("Model needs to be cuqi.model.AffineModel or matrix")
64
+
65
+ # Likelihood
66
+ L = cuqi.distribution.Gaussian(model, sqrtprec=L_sqrtprec).to_likelihood(data)
67
+
68
+ # Prior TODO: allow multiple priors stacked
69
+ #if isinstance(P_mean, list) and isinstance(P_sqrtprec, list):
70
+ # P = cuqi.distribution.JointGaussianSqrtPrec(P_mean, P_sqrtprec)
71
+ #else:
72
+ P = cuqi.distribution.Gaussian(P_mean, sqrtprec=P_sqrtprec)
73
+
74
+ # Construct posterior
75
+ target = cuqi.distribution.Posterior(L, P)
76
+
77
+ super().__init__(target, x0=x0, **kwargs)
78
+
79
+ self._check_posterior()
80
+
81
+ # Modify initial guess
82
+ if x0 is not None:
83
+ self.x0 = x0
84
+ else:
85
+ self.x0 = np.zeros(self.prior.dim)
86
+
87
+ # Other parameters
88
+ self.maxit = maxit
89
+ self.tol = tol
90
+ self.shift = 0
91
+
92
+ L1 = [likelihood.distribution.sqrtprec for likelihood in self.likelihoods]
93
+ L2 = self.prior.sqrtprec
94
+ L2mu = self.prior.sqrtprecTimesMean
95
+
96
+ # pre-computations
97
+ self.n = len(self.x0)
98
+ self.b_tild = np.hstack([L@(likelihood.data - model._shift) for (L, likelihood, model) in zip(L1, self.likelihoods, self.models)]+ [L2mu])
99
+
100
+ callability = [callable(likelihood.model) for likelihood in self.likelihoods]
101
+ notcallability = [not c for c in callability]
102
+ if all(notcallability):
103
+ self.M = sp.sparse.vstack([L@likelihood.model for (L, likelihood) in zip(L1, self.likelihoods)] + [L2])
104
+ elif all(callability):
105
+ # in this case, model is a function doing forward and backward operations
106
+ def M(x, flag):
107
+ if flag == 1:
108
+ out1 = [L @ likelihood.model._forward_func_no_shift(x) for (L, likelihood) in zip(L1, self.likelihoods)] # Use forward function which excludes shift
109
+ out2 = L2 @ x
110
+ out = np.hstack(out1 + [out2])
111
+ elif flag == 2:
112
+ idx_start = 0
113
+ idx_end = 0
114
+ out1 = np.zeros(self.n)
115
+ for likelihood in self.likelihoods:
116
+ idx_end += len(likelihood.data)
117
+ out1 += likelihood.model._adjoint_func_no_shift(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end]) # Use adjoint function which excludes shift
118
+ idx_start = idx_end
119
+ out2 = L2.T @ x[idx_end:]
120
+ out = out1 + out2
121
+ return out
122
+ self.M = M
123
+ else:
124
+ raise TypeError("All likelihoods need to be callable or none need to be callable.")
125
+
126
+ @property
127
+ def prior(self):
128
+ return self.target.prior
129
+
130
+ @property
131
+ def likelihood(self):
132
+ return self.target.likelihood
133
+
134
+ @property
135
+ def likelihoods(self):
136
+ if isinstance(self.target, cuqi.distribution.Posterior):
137
+ return [self.target.likelihood]
138
+ elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
139
+ return self.target.likelihoods
140
+
141
+ @property
142
+ def model(self):
143
+ return self.target.model
144
+
145
+ @property
146
+ def models(self):
147
+ if isinstance(self.target, cuqi.distribution.Posterior):
148
+ return [self.target.model]
149
+ elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
150
+ return self.target.models
151
+
152
+ def _sample(self, N, Nb):
153
+ Ns = N+Nb # number of simulations
154
+ samples = np.empty((self.n, Ns))
155
+
156
+ # initial state
157
+ samples[:, 0] = self.x0
158
+ for s in range(Ns-1):
159
+ y = self.b_tild + np.random.randn(len(self.b_tild))
160
+ sim = CGLS(self.M, y, samples[:, s], self.maxit, self.tol, self.shift)
161
+ samples[:, s+1], _ = sim.solve()
162
+
163
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
164
+ self._call_callback(samples[:, s+1], s+1)
165
+
166
+ # remove burn-in
167
+ samples = samples[:, Nb:]
168
+
169
+ return samples, None, None
170
+
171
+ def _sample_adapt(self, N, Nb):
172
+ return self._sample(N,Nb)
173
+
174
+ def _check_posterior(self):
175
+ # Check target type
176
+ if not isinstance(self.target, (cuqi.distribution.Posterior, cuqi.distribution.MultipleLikelihoodPosterior)):
177
+ raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior' or 'cuqi.distribution.MultipleLikelihoodPosterior'.")
178
+
179
+ # Check Linear model and Gaussian likelihood(s)
180
+ if isinstance(self.target, cuqi.distribution.Posterior):
181
+ if not isinstance(self.model, cuqi.model.AffineModel):
182
+ raise TypeError("Model needs to be linear or affine")
183
+
184
+ if not hasattr(self.likelihood.distribution, "sqrtprec"):
185
+ raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
186
+
187
+ elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior): # Elif used for further alternatives, e.g., stacked posterior
188
+ for likelihood in self.likelihoods:
189
+ if not isinstance(likelihood.model, cuqi.model.LinearModel):
190
+ raise TypeError("Model needs to be linear")
191
+
192
+ if not hasattr(likelihood.distribution, "sqrtprec"):
193
+ raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
194
+
195
+ # Check Gaussian prior
196
+ if not hasattr(self.prior, "sqrtprec"):
197
+ raise TypeError("prior must contain a sqrtprec attribute")
198
+
199
+ if not hasattr(self.prior, "sqrtprecTimesMean"):
200
+ raise TypeError("Prior must contain a sqrtprecTimesMean attribute")
201
+
202
+
203
+ class RegularizedLinearRTO(LinearRTO):
204
+ """
205
+ Regularized Linear RTO (Randomize-Then-Optimize) sampler.
206
+
207
+ Samples posterior related to the inverse problem with Gaussian likelihood and implicit Gaussian prior, and where the forward model is Linear.
208
+
209
+ Parameters
210
+ ------------
211
+ target : `cuqi.distribution.Posterior`
212
+ See `cuqi.legacy.sampler.LinearRTO`
213
+
214
+ x0 : `np.ndarray`
215
+ Initial point for the sampler. *Optional*.
216
+
217
+ maxit : int
218
+ Maximum number of iterations of the inner FISTA solver. *Optional*.
219
+
220
+ stepsize : string or float
221
+ If stepsize is a string and equals either "automatic", then the stepsize is automatically estimated based on the spectral norm.
222
+ If stepsize is a float, then this stepsize is used.
223
+
224
+ abstol : float
225
+ Absolute tolerance of the inner FISTA solver. *Optional*.
226
+
227
+ callback : callable, *Optional*
228
+ If set this function will be called after every sample.
229
+ The signature of the callback function is `callback(sample, sample_index)`,
230
+ where `sample` is the current sample and `sample_index` is the index of the sample.
231
+ An example is shown in demos/demo31_callback.py.
232
+
233
+ """
234
+ def __init__(self, target, x0=None, maxit=100, stepsize = "automatic", abstol=1e-10, adaptive = True, **kwargs):
235
+
236
+ if not callable(target.prior.proximal):
237
+ raise TypeError("Projector needs to be callable")
238
+
239
+ super().__init__(target, x0=x0, maxit=100, **kwargs)
240
+
241
+ # Other parameters
242
+ self.stepsize = stepsize
243
+ self.abstol = abstol
244
+ self.adaptive = adaptive
245
+ self.proximal = target.prior.proximal
246
+
247
+ @property
248
+ def prior(self):
249
+ return self.target.prior.gaussian
250
+
251
+ def _sample(self, N, Nb):
252
+ Ns = N+Nb # number of simulations
253
+ samples = np.empty((self.n, Ns))
254
+
255
+ if isinstance(self.stepsize, str):
256
+ if self.stepsize in ["automatic"]:
257
+ if not callable(self.M):
258
+ M_op = scipyLinearOperator(self.M.shape, matvec = lambda v: self.M@v, rmatvec = lambda w: self.M.T@w)
259
+ else:
260
+ M_op = scipyLinearOperator((len(self.b_tild), self.n), matvec = lambda v: self.M(v,1), rmatvec = lambda w: self.M(w,2))
261
+
262
+ _stepsize = 0.99/(estimate_spectral_norm(M_op)**2)
263
+ # print(f"Estimated stepsize for regularized Linear RTO: {_stepsize}")
264
+ else:
265
+ raise ValueError("Stepsize choice not supported")
266
+ else:
267
+ _stepsize = self.stepsize
268
+
269
+ # initial state
270
+ samples[:, 0] = self.x0
271
+ for s in range(Ns-1):
272
+ y = self.b_tild + np.random.randn(len(self.b_tild))
273
+ sim = FISTA(self.M, y, self.proximal,
274
+ samples[:, s], maxit = self.maxit, stepsize = _stepsize, abstol = self.abstol, adaptive = self.adaptive)
275
+ samples[:, s+1], _ = sim.solve()
276
+
277
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
278
+ self._call_callback(samples[:, s+1], s+1)
279
+ # remove burn-in
280
+ samples = samples[:, Nb:]
281
+
282
+ return samples, None, None
283
+
284
+