CUQIpy 1.4.0.post0.dev13__py3-none-any.whl → 1.4.0.post0.dev45__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of CUQIpy might be problematic. Click here for more details.
- cuqi/__init__.py +1 -0
- cuqi/_version.py +3 -3
- cuqi/experimental/__init__.py +1 -2
- cuqi/experimental/_recommender.py +4 -4
- cuqi/legacy/__init__.py +2 -0
- cuqi/legacy/sampler/__init__.py +11 -0
- cuqi/legacy/sampler/_conjugate.py +55 -0
- cuqi/legacy/sampler/_conjugate_approx.py +52 -0
- cuqi/legacy/sampler/_cwmh.py +196 -0
- cuqi/legacy/sampler/_gibbs.py +231 -0
- cuqi/legacy/sampler/_hmc.py +335 -0
- cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
- cuqi/legacy/sampler/_laplace_approximation.py +184 -0
- cuqi/legacy/sampler/_mh.py +190 -0
- cuqi/legacy/sampler/_pcn.py +244 -0
- cuqi/legacy/sampler/_rto.py +284 -0
- cuqi/legacy/sampler/_sampler.py +182 -0
- cuqi/problem/_problem.py +87 -80
- cuqi/sampler/__init__.py +120 -8
- cuqi/sampler/_conjugate.py +376 -35
- cuqi/sampler/_conjugate_approx.py +40 -16
- cuqi/sampler/_cwmh.py +132 -138
- cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
- cuqi/sampler/_gibbs.py +269 -130
- cuqi/sampler/_hmc.py +328 -201
- cuqi/sampler/_langevin_algorithm.py +282 -98
- cuqi/sampler/_laplace_approximation.py +87 -117
- cuqi/sampler/_mh.py +47 -157
- cuqi/sampler/_pcn.py +56 -211
- cuqi/sampler/_rto.py +206 -140
- cuqi/sampler/_sampler.py +540 -135
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev45.dist-info}/METADATA +1 -1
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev45.dist-info}/RECORD +36 -35
- cuqi/experimental/mcmc/__init__.py +0 -122
- cuqi/experimental/mcmc/_conjugate.py +0 -396
- cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
- cuqi/experimental/mcmc/_cwmh.py +0 -190
- cuqi/experimental/mcmc/_gibbs.py +0 -366
- cuqi/experimental/mcmc/_hmc.py +0 -462
- cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
- cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
- cuqi/experimental/mcmc/_mh.py +0 -80
- cuqi/experimental/mcmc/_pcn.py +0 -89
- cuqi/experimental/mcmc/_rto.py +0 -350
- cuqi/experimental/mcmc/_sampler.py +0 -582
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev45.dist-info}/WHEEL +0 -0
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev45.dist-info}/licenses/LICENSE +0 -0
- {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev45.dist-info}/top_level.txt +0 -0
cuqi/sampler/_rto.py
CHANGED
|
@@ -3,7 +3,7 @@ from scipy.linalg.interpolative import estimate_spectral_norm
|
|
|
3
3
|
from scipy.sparse.linalg import LinearOperator as scipyLinearOperator
|
|
4
4
|
import numpy as np
|
|
5
5
|
import cuqi
|
|
6
|
-
from cuqi.solver import CGLS, FISTA
|
|
6
|
+
from cuqi.solver import CGLS, FISTA, ADMM, ScipyLinearLSQ, ScipyMinimizer
|
|
7
7
|
from cuqi.sampler import Sampler
|
|
8
8
|
|
|
9
9
|
|
|
@@ -27,7 +27,7 @@ class LinearRTO(Sampler):
|
|
|
27
27
|
P_mean: is the prior mean.
|
|
28
28
|
P_sqrtprec: is the squareroot of the precision matrix of the Gaussian mean.
|
|
29
29
|
|
|
30
|
-
|
|
30
|
+
initial_point : `np.ndarray`
|
|
31
31
|
Initial point for the sampler. *Optional*.
|
|
32
32
|
|
|
33
33
|
maxit : int
|
|
@@ -36,67 +36,87 @@ class LinearRTO(Sampler):
|
|
|
36
36
|
tol : float
|
|
37
37
|
Tolerance of the inner CGLS solver. *Optional*.
|
|
38
38
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
The signature of the callback function is `callback(sample, sample_index)`,
|
|
42
|
-
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
43
|
-
An example is shown in demos/demo31_callback.py.
|
|
39
|
+
inner_initial_point : string or np.ndarray or cuqi.array.CUQIArray
|
|
40
|
+
Initial point for the inner optimization problem. Can be "previous_sample" (default), "MAP", or a specific numpy or cuqi array. *Optional*.
|
|
44
41
|
|
|
45
|
-
|
|
46
|
-
|
|
42
|
+
callback : callable, optional
|
|
43
|
+
A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
|
|
44
|
+
The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
|
|
47
45
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
model = target[1]
|
|
53
|
-
L_sqrtprec = target[2]
|
|
54
|
-
P_mean = target[3]
|
|
55
|
-
P_sqrtprec = target[4]
|
|
56
|
-
|
|
57
|
-
# If numpy matrix convert to CUQI model
|
|
58
|
-
if isinstance(model, np.ndarray) and len(model.shape) == 2:
|
|
59
|
-
model = cuqi.model.LinearModel(model)
|
|
60
|
-
|
|
61
|
-
# Check model input
|
|
62
|
-
if not isinstance(model, cuqi.model.AffineModel):
|
|
63
|
-
raise TypeError("Model needs to be cuqi.model.AffineModel or matrix")
|
|
64
|
-
|
|
65
|
-
# Likelihood
|
|
66
|
-
L = cuqi.distribution.Gaussian(model, sqrtprec=L_sqrtprec).to_likelihood(data)
|
|
67
|
-
|
|
68
|
-
# Prior TODO: allow multiple priors stacked
|
|
69
|
-
#if isinstance(P_mean, list) and isinstance(P_sqrtprec, list):
|
|
70
|
-
# P = cuqi.distribution.JointGaussianSqrtPrec(P_mean, P_sqrtprec)
|
|
71
|
-
#else:
|
|
72
|
-
P = cuqi.distribution.Gaussian(P_mean, sqrtprec=P_sqrtprec)
|
|
73
|
-
|
|
74
|
-
# Construct posterior
|
|
75
|
-
target = cuqi.distribution.Posterior(L, P)
|
|
76
|
-
|
|
77
|
-
super().__init__(target, x0=x0, **kwargs)
|
|
78
|
-
|
|
79
|
-
self._check_posterior()
|
|
80
|
-
|
|
81
|
-
# Modify initial guess
|
|
82
|
-
if x0 is not None:
|
|
83
|
-
self.x0 = x0
|
|
84
|
-
else:
|
|
85
|
-
self.x0 = np.zeros(self.prior.dim)
|
|
46
|
+
"""
|
|
47
|
+
def __init__(self, target=None, initial_point=None, maxit=10, tol=1e-6, inner_initial_point="previous_sample", **kwargs):
|
|
48
|
+
|
|
49
|
+
super().__init__(target=target, initial_point=initial_point, **kwargs)
|
|
86
50
|
|
|
87
51
|
# Other parameters
|
|
88
52
|
self.maxit = maxit
|
|
89
|
-
self.tol = tol
|
|
90
|
-
self.
|
|
91
|
-
|
|
53
|
+
self.tol = tol
|
|
54
|
+
self.inner_initial_point = inner_initial_point
|
|
55
|
+
|
|
56
|
+
def _initialize(self):
|
|
57
|
+
self._precompute()
|
|
58
|
+
self._compute_map()
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def inner_initial_point(self):
|
|
62
|
+
if isinstance(self._inner_initial_point, str):
|
|
63
|
+
if self._inner_initial_point == "previous_sample":
|
|
64
|
+
return self.current_point
|
|
65
|
+
elif self._inner_initial_point == "map":
|
|
66
|
+
return self._map
|
|
67
|
+
else:
|
|
68
|
+
return self._inner_initial_point
|
|
69
|
+
|
|
70
|
+
@inner_initial_point.setter
|
|
71
|
+
def inner_initial_point(self, value):
|
|
72
|
+
is_correct_string = (isinstance(value, str) and
|
|
73
|
+
(value.lower() == "previous_sample" or
|
|
74
|
+
value.lower() == "map"))
|
|
75
|
+
if is_correct_string:
|
|
76
|
+
self._inner_initial_point = value.lower()
|
|
77
|
+
elif isinstance(value, (np.ndarray, cuqi.array.CUQIarray)):
|
|
78
|
+
self._inner_initial_point = value
|
|
79
|
+
else:
|
|
80
|
+
raise ValueError("Invalid value for inner_initial_point. Choose either 'previous_sample', 'MAP', or provide a numpy array/cuqi array.")
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def prior(self):
|
|
84
|
+
return self.target.prior
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def likelihood(self):
|
|
88
|
+
return self.target.likelihood
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def likelihoods(self):
|
|
92
|
+
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
93
|
+
return [self.target.likelihood]
|
|
94
|
+
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
95
|
+
return self.target.likelihoods
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def model(self):
|
|
99
|
+
return self.target.model
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def models(self):
|
|
103
|
+
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
104
|
+
return [self.target.model]
|
|
105
|
+
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
106
|
+
return self.target.models
|
|
107
|
+
|
|
108
|
+
def _compute_map(self):
|
|
109
|
+
sim = CGLS(self.M, self.b_tild, self.current_point, self.maxit, self.tol)
|
|
110
|
+
self._map, _ = sim.solve()
|
|
111
|
+
|
|
112
|
+
def _precompute(self):
|
|
92
113
|
L1 = [likelihood.distribution.sqrtprec for likelihood in self.likelihoods]
|
|
93
114
|
L2 = self.prior.sqrtprec
|
|
94
115
|
L2mu = self.prior.sqrtprecTimesMean
|
|
95
116
|
|
|
96
117
|
# pre-computations
|
|
97
|
-
self.n =
|
|
98
|
-
self.b_tild = np.hstack([L@(likelihood.data - model._shift) for (L, likelihood, model) in zip(L1, self.likelihoods, self.models)]+ [L2mu])
|
|
99
|
-
|
|
118
|
+
self.n = self.prior.dim
|
|
119
|
+
self.b_tild = np.hstack([L@(likelihood.data - model._shift) for (L, likelihood, model) in zip(L1, self.likelihoods, self.models)]+ [L2mu]) # With shift from AffineModel
|
|
100
120
|
callability = [callable(likelihood.model) for likelihood in self.likelihoods]
|
|
101
121
|
notcallability = [not c for c in callability]
|
|
102
122
|
if all(notcallability):
|
|
@@ -114,64 +134,26 @@ class LinearRTO(Sampler):
|
|
|
114
134
|
out1 = np.zeros(self.n)
|
|
115
135
|
for likelihood in self.likelihoods:
|
|
116
136
|
idx_end += len(likelihood.data)
|
|
117
|
-
out1 += likelihood.model._adjoint_func_no_shift(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end])
|
|
137
|
+
out1 += likelihood.model._adjoint_func_no_shift(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end])
|
|
118
138
|
idx_start = idx_end
|
|
119
139
|
out2 = L2.T @ x[idx_end:]
|
|
120
140
|
out = out1 + out2
|
|
121
141
|
return out
|
|
122
142
|
self.M = M
|
|
123
143
|
else:
|
|
124
|
-
raise TypeError("All likelihoods need to be callable or none need to be callable.")
|
|
125
|
-
|
|
126
|
-
@property
|
|
127
|
-
def prior(self):
|
|
128
|
-
return self.target.prior
|
|
129
|
-
|
|
130
|
-
@property
|
|
131
|
-
def likelihood(self):
|
|
132
|
-
return self.target.likelihood
|
|
133
|
-
|
|
134
|
-
@property
|
|
135
|
-
def likelihoods(self):
|
|
136
|
-
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
137
|
-
return [self.target.likelihood]
|
|
138
|
-
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
139
|
-
return self.target.likelihoods
|
|
144
|
+
raise TypeError("All likelihoods need to be callable or none need to be callable.")
|
|
140
145
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
148
|
-
return [self.target.model]
|
|
149
|
-
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
150
|
-
return self.target.models
|
|
151
|
-
|
|
152
|
-
def _sample(self, N, Nb):
|
|
153
|
-
Ns = N+Nb # number of simulations
|
|
154
|
-
samples = np.empty((self.n, Ns))
|
|
155
|
-
|
|
156
|
-
# initial state
|
|
157
|
-
samples[:, 0] = self.x0
|
|
158
|
-
for s in range(Ns-1):
|
|
159
|
-
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
160
|
-
sim = CGLS(self.M, y, samples[:, s], self.maxit, self.tol, self.shift)
|
|
161
|
-
samples[:, s+1], _ = sim.solve()
|
|
162
|
-
|
|
163
|
-
self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
|
|
164
|
-
self._call_callback(samples[:, s+1], s+1)
|
|
165
|
-
|
|
166
|
-
# remove burn-in
|
|
167
|
-
samples = samples[:, Nb:]
|
|
168
|
-
|
|
169
|
-
return samples, None, None
|
|
146
|
+
def step(self):
|
|
147
|
+
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
148
|
+
sim = CGLS(self.M, y, self.inner_initial_point, self.maxit, self.tol)
|
|
149
|
+
self.current_point, _ = sim.solve()
|
|
150
|
+
acc = 1
|
|
151
|
+
return acc
|
|
170
152
|
|
|
171
|
-
def
|
|
172
|
-
|
|
153
|
+
def tune(self, skip_len, update_count):
|
|
154
|
+
pass
|
|
173
155
|
|
|
174
|
-
def
|
|
156
|
+
def validate_target(self):
|
|
175
157
|
# Check target type
|
|
176
158
|
if not isinstance(self.target, (cuqi.distribution.Posterior, cuqi.distribution.MultipleLikelihoodPosterior)):
|
|
177
159
|
raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior' or 'cuqi.distribution.MultipleLikelihoodPosterior'.")
|
|
@@ -179,15 +161,15 @@ class LinearRTO(Sampler):
|
|
|
179
161
|
# Check Linear model and Gaussian likelihood(s)
|
|
180
162
|
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
181
163
|
if not isinstance(self.model, cuqi.model.AffineModel):
|
|
182
|
-
raise TypeError("Model needs to be linear or affine")
|
|
164
|
+
raise TypeError("Model needs to be linear or more generally affine")
|
|
183
165
|
|
|
184
166
|
if not hasattr(self.likelihood.distribution, "sqrtprec"):
|
|
185
167
|
raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
|
|
186
168
|
|
|
187
169
|
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior): # Elif used for further alternatives, e.g., stacked posterior
|
|
188
170
|
for likelihood in self.likelihoods:
|
|
189
|
-
if not isinstance(likelihood.model, cuqi.model.
|
|
190
|
-
raise TypeError("Model needs to be linear")
|
|
171
|
+
if not isinstance(likelihood.model, cuqi.model.AffineModel):
|
|
172
|
+
raise TypeError("Model needs to be linear or more generally affine")
|
|
191
173
|
|
|
192
174
|
if not hasattr(likelihood.distribution, "sqrtprec"):
|
|
193
175
|
raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
|
|
@@ -198,60 +180,115 @@ class LinearRTO(Sampler):
|
|
|
198
180
|
|
|
199
181
|
if not hasattr(self.prior, "sqrtprecTimesMean"):
|
|
200
182
|
raise TypeError("Prior must contain a sqrtprecTimesMean attribute")
|
|
201
|
-
|
|
183
|
+
|
|
184
|
+
def _get_default_initial_point(self, dim):
|
|
185
|
+
""" Get the default initial point for the sampler. Defaults to an array of zeros. """
|
|
186
|
+
return np.zeros(dim)
|
|
202
187
|
|
|
203
188
|
class RegularizedLinearRTO(LinearRTO):
|
|
204
189
|
"""
|
|
205
190
|
Regularized Linear RTO (Randomize-Then-Optimize) sampler.
|
|
206
191
|
|
|
207
192
|
Samples posterior related to the inverse problem with Gaussian likelihood and implicit Gaussian prior, and where the forward model is Linear.
|
|
193
|
+
The sampler works by repeatedly solving regularized linear least squares problems for perturbed data.
|
|
194
|
+
The solver for these optimization problems is chosen based on how the regularized is provided in the implicit Gaussian prior.
|
|
195
|
+
Currently we use the following solvers:
|
|
196
|
+
FISTA: [1] Beck, Amir, and Marc Teboulle. "A fast iterative shrinkage-thresholding algorithm for linear inverse problems." SIAM journal on imaging sciences 2.1 (2009): 183-202.
|
|
197
|
+
Used when prior.proximal is callable.
|
|
198
|
+
ADMM: [2] Boyd et al. "Distributed optimization and statistical learning via the alternating direction method of multipliers."Foundations and Trends® in Machine learning, 2011.
|
|
199
|
+
Used when prior.proximal is a list of penalty terms.
|
|
200
|
+
ScipyLinearLSQ: Wrapper for Scipy's lsq_linear for the Trust Region Reflective algorithm. Optionally used when the constraint is either "nonnegativity" or "box".
|
|
201
|
+
ScipyMinimizer: Wrapper for Scipy's minimize. Optionally used when the constraint is either "nonnegativity" or "box".
|
|
208
202
|
|
|
209
203
|
Parameters
|
|
210
204
|
------------
|
|
211
205
|
target : `cuqi.distribution.Posterior`
|
|
212
206
|
See `cuqi.sampler.LinearRTO`
|
|
213
207
|
|
|
214
|
-
|
|
208
|
+
initial_point : `np.ndarray`
|
|
215
209
|
Initial point for the sampler. *Optional*.
|
|
216
210
|
|
|
217
211
|
maxit : int
|
|
218
|
-
Maximum number of iterations of the
|
|
212
|
+
Maximum number of iterations of the FISTA/ADMM/ScipyLinearLSQ/ScipyMinimizer solver. *Optional*.
|
|
213
|
+
|
|
214
|
+
inner_max_it : int
|
|
215
|
+
Maximum number of iterations of the CGLS solver used within the ADMM solver. *Optional*.
|
|
219
216
|
|
|
220
217
|
stepsize : string or float
|
|
221
218
|
If stepsize is a string and equals either "automatic", then the stepsize is automatically estimated based on the spectral norm.
|
|
222
219
|
If stepsize is a float, then this stepsize is used.
|
|
223
220
|
|
|
221
|
+
penalty_parameter : int
|
|
222
|
+
Penalty parameter of the ADMM solver. *Optional*.
|
|
223
|
+
See [2] or `cuqi.solver.ADMM`
|
|
224
|
+
|
|
224
225
|
abstol : float
|
|
225
|
-
Absolute tolerance of the
|
|
226
|
+
Absolute tolerance of the FISTA/ScipyLinearLSQ/ScipyMinimizer solver. *Optional*.
|
|
227
|
+
|
|
228
|
+
inner_abstol : float
|
|
229
|
+
Tolerance parameter for ScipyLinearLSQ's inner solve of the unbounded least-squares problem. *Optional*.
|
|
230
|
+
|
|
231
|
+
adaptive : bool
|
|
232
|
+
If True, FISTA is used as solver, otherwise ISTA is used. *Optional*.
|
|
233
|
+
|
|
234
|
+
solver : string
|
|
235
|
+
Options are "FISTA" (default for a single constraint or regularization), "ADMM" (default and the only option for multiple constraints or regularizations), "ScipyLinearLSQ" and "ScipyMinimizer". Note "ScipyLinearLSQ" and "ScipyMinimizer" can only be used with `RegularizedGaussian` of a single `box` or `nonnegativity` constraint. *Optional*.
|
|
236
|
+
|
|
237
|
+
inner_initial_point : string or np.ndarray or cuqi.array.CUQIArray
|
|
238
|
+
Initial point for the inner optimization problem. Can be "previous_sample" (default), "MAP", or a specific numpy or cuqi array. *Optional*.
|
|
226
239
|
|
|
227
|
-
callback : callable,
|
|
228
|
-
|
|
229
|
-
The
|
|
230
|
-
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
231
|
-
An example is shown in demos/demo31_callback.py.
|
|
240
|
+
callback : callable, optional
|
|
241
|
+
A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
|
|
242
|
+
The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
|
|
232
243
|
|
|
233
244
|
"""
|
|
234
|
-
def __init__(self, target,
|
|
235
|
-
|
|
236
|
-
if not callable(target.prior.proximal):
|
|
237
|
-
raise TypeError("Projector needs to be callable")
|
|
245
|
+
def __init__(self, target=None, initial_point=None, maxit=100, inner_max_it=10, stepsize="automatic", penalty_parameter=10, abstol=1e-10, adaptive=True, solver=None, inner_abstol=None, inner_initial_point="previous_sample", **kwargs):
|
|
238
246
|
|
|
239
|
-
super().__init__(target
|
|
247
|
+
super().__init__(target=target, initial_point=initial_point, **kwargs)
|
|
240
248
|
|
|
241
249
|
# Other parameters
|
|
242
250
|
self.stepsize = stepsize
|
|
243
|
-
self.abstol = abstol
|
|
251
|
+
self.abstol = abstol
|
|
252
|
+
self.inner_abstol = inner_abstol
|
|
244
253
|
self.adaptive = adaptive
|
|
245
|
-
self.
|
|
254
|
+
self.maxit = maxit
|
|
255
|
+
self.inner_max_it = inner_max_it
|
|
256
|
+
self.penalty_parameter = penalty_parameter
|
|
257
|
+
self.solver = solver
|
|
258
|
+
self.inner_initial_point = inner_initial_point
|
|
259
|
+
|
|
260
|
+
def _initialize(self):
|
|
261
|
+
super()._initialize()
|
|
262
|
+
if self.solver is None:
|
|
263
|
+
self.solver = "FISTA" if callable(self.proximal) else "ADMM"
|
|
264
|
+
if self.solver == "FISTA":
|
|
265
|
+
self._stepsize = self._choose_stepsize()
|
|
266
|
+
self._compute_map_regularized()
|
|
246
267
|
|
|
247
268
|
@property
|
|
248
|
-
def
|
|
249
|
-
return self.
|
|
269
|
+
def solver(self):
|
|
270
|
+
return self._solver
|
|
271
|
+
|
|
272
|
+
@solver.setter
|
|
273
|
+
def solver(self, value):
|
|
274
|
+
if value == "ScipyLinearLSQ" or value == "ScipyMinimizer":
|
|
275
|
+
if (self.target.prior.preset["constraint"] == "nonnegativity" or self.target.prior.preset["constraint"] == "box"):
|
|
276
|
+
self._solver = value
|
|
277
|
+
else:
|
|
278
|
+
raise ValueError("ScipyLinearLSQ and ScipyMinimizer only support RegularizedGaussian with box or nonnegativity constraint.")
|
|
279
|
+
else:
|
|
280
|
+
self._solver = value
|
|
281
|
+
|
|
282
|
+
@property
|
|
283
|
+
def proximal(self):
|
|
284
|
+
return self.target.prior.proximal
|
|
285
|
+
|
|
286
|
+
def validate_target(self):
|
|
287
|
+
super().validate_target()
|
|
288
|
+
if not isinstance(self.target.prior, (cuqi.implicitprior.RegularizedGaussian, cuqi.implicitprior.RegularizedGMRF)):
|
|
289
|
+
raise TypeError("Prior needs to be RegularizedGaussian or RegularizedGMRF")
|
|
250
290
|
|
|
251
|
-
def
|
|
252
|
-
Ns = N+Nb # number of simulations
|
|
253
|
-
samples = np.empty((self.n, Ns))
|
|
254
|
-
|
|
291
|
+
def _choose_stepsize(self):
|
|
255
292
|
if isinstance(self.stepsize, str):
|
|
256
293
|
if self.stepsize in ["automatic"]:
|
|
257
294
|
if not callable(self.M):
|
|
@@ -265,20 +302,49 @@ class RegularizedLinearRTO(LinearRTO):
|
|
|
265
302
|
raise ValueError("Stepsize choice not supported")
|
|
266
303
|
else:
|
|
267
304
|
_stepsize = self.stepsize
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
305
|
+
return _stepsize
|
|
306
|
+
|
|
307
|
+
@property
|
|
308
|
+
def prior(self):
|
|
309
|
+
return self.target.prior.gaussian
|
|
310
|
+
|
|
311
|
+
def _compute_map_regularized(self):
|
|
312
|
+
self._map = self._customized_step(self.b_tild, self.initial_point)
|
|
313
|
+
|
|
314
|
+
def _customized_step(self, y, x0):
|
|
315
|
+
if self.solver == "FISTA":
|
|
273
316
|
sim = FISTA(self.M, y, self.proximal,
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
317
|
+
x0, maxit = self.maxit, stepsize = self._stepsize, abstol = self.abstol, adaptive = self.adaptive)
|
|
318
|
+
elif self.solver == "ADMM":
|
|
319
|
+
sim = ADMM(self.M, y, self.proximal,
|
|
320
|
+
x0, self.penalty_parameter, maxit = self.maxit, inner_max_it = self.inner_max_it, adaptive = self.adaptive)
|
|
321
|
+
elif self.solver == "ScipyLinearLSQ":
|
|
322
|
+
A_op = sp.sparse.linalg.LinearOperator((sum([llh.distribution.dim for llh in self.likelihoods])+self.target.prior.dim, self.target.prior.dim),
|
|
323
|
+
matvec=lambda x: self.M(x, 1),
|
|
324
|
+
rmatvec=lambda x: self.M(x, 2)
|
|
325
|
+
)
|
|
326
|
+
sim = ScipyLinearLSQ(A_op, y, self.target.prior._box_bounds,
|
|
327
|
+
max_iter = self.maxit,
|
|
328
|
+
lsmr_maxiter = self.inner_max_it,
|
|
329
|
+
tol = self.abstol,
|
|
330
|
+
lsmr_tol = self.inner_abstol)
|
|
331
|
+
elif self.solver == "ScipyMinimizer":
|
|
332
|
+
# Adapt bounds format, as scipy.minimize requires a bounds format
|
|
333
|
+
# different than that in scipy.lsq_linear.
|
|
334
|
+
bounds = [(self.target.prior._box_bounds[0][i], self.target.prior._box_bounds[1][i]) for i in range(self.target.prior.dim)]
|
|
335
|
+
# Note that the objective function is defined as 0.5*||Mx-y||^2,
|
|
336
|
+
# and the corresponding gradient (gradfunc) is given by M^T(Mx-y).
|
|
337
|
+
sim = ScipyMinimizer(lambda x: 0.5*np.sum((self.M(x, 1)-y)**2), x0, gradfunc=lambda x: self.M(self.M(x, 1) - y, 2), bounds=bounds, tol=self.abstol, options={"maxiter": self.maxit})
|
|
338
|
+
else:
|
|
339
|
+
raise ValueError("Choice of solver not supported.")
|
|
281
340
|
|
|
282
|
-
|
|
341
|
+
sol, _ = sim.solve()
|
|
342
|
+
return sol
|
|
343
|
+
|
|
344
|
+
def step(self):
|
|
345
|
+
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
283
346
|
|
|
347
|
+
self.current_point = self._customized_step(y, self.inner_initial_point)
|
|
284
348
|
|
|
349
|
+
acc = 1
|
|
350
|
+
return acc
|