CUQIpy 1.1.1.post0.dev36__py3-none-any.whl → 1.4.1.post0.dev124__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of CUQIpy might be problematic. Click here for more details.
- cuqi/__init__.py +2 -0
- cuqi/_version.py +3 -3
- cuqi/algebra/__init__.py +2 -0
- cuqi/algebra/_abstract_syntax_tree.py +358 -0
- cuqi/algebra/_ordered_set.py +82 -0
- cuqi/algebra/_random_variable.py +457 -0
- cuqi/array/_array.py +4 -13
- cuqi/config.py +7 -0
- cuqi/density/_density.py +9 -1
- cuqi/distribution/__init__.py +3 -2
- cuqi/distribution/_beta.py +7 -11
- cuqi/distribution/_cauchy.py +2 -2
- cuqi/distribution/_custom.py +0 -6
- cuqi/distribution/_distribution.py +31 -45
- cuqi/distribution/_gamma.py +7 -3
- cuqi/distribution/_gaussian.py +2 -12
- cuqi/distribution/_inverse_gamma.py +4 -10
- cuqi/distribution/_joint_distribution.py +112 -15
- cuqi/distribution/_lognormal.py +0 -7
- cuqi/distribution/{_modifiedhalfnormal.py → _modified_half_normal.py} +23 -23
- cuqi/distribution/_normal.py +34 -7
- cuqi/distribution/_posterior.py +9 -0
- cuqi/distribution/_truncated_normal.py +129 -0
- cuqi/distribution/_uniform.py +47 -1
- cuqi/experimental/__init__.py +2 -2
- cuqi/experimental/_recommender.py +216 -0
- cuqi/geometry/__init__.py +2 -0
- cuqi/geometry/_geometry.py +15 -1
- cuqi/geometry/_product_geometry.py +181 -0
- cuqi/implicitprior/__init__.py +5 -3
- cuqi/implicitprior/_regularized_gaussian.py +483 -0
- cuqi/implicitprior/{_regularizedGMRF.py → _regularized_gmrf.py} +4 -2
- cuqi/implicitprior/{_regularizedUnboundedUniform.py → _regularized_unbounded_uniform.py} +3 -2
- cuqi/implicitprior/_restorator.py +269 -0
- cuqi/legacy/__init__.py +2 -0
- cuqi/{experimental/mcmc → legacy/sampler}/__init__.py +7 -11
- cuqi/legacy/sampler/_conjugate.py +55 -0
- cuqi/legacy/sampler/_conjugate_approx.py +52 -0
- cuqi/legacy/sampler/_cwmh.py +196 -0
- cuqi/legacy/sampler/_gibbs.py +231 -0
- cuqi/legacy/sampler/_hmc.py +335 -0
- cuqi/{experimental/mcmc → legacy/sampler}/_langevin_algorithm.py +82 -111
- cuqi/legacy/sampler/_laplace_approximation.py +184 -0
- cuqi/legacy/sampler/_mh.py +190 -0
- cuqi/legacy/sampler/_pcn.py +244 -0
- cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +132 -90
- cuqi/legacy/sampler/_sampler.py +182 -0
- cuqi/likelihood/_likelihood.py +9 -1
- cuqi/model/__init__.py +1 -1
- cuqi/model/_model.py +1361 -359
- cuqi/pde/__init__.py +4 -0
- cuqi/pde/_observation_map.py +36 -0
- cuqi/pde/_pde.py +134 -33
- cuqi/problem/_problem.py +93 -87
- cuqi/sampler/__init__.py +120 -8
- cuqi/sampler/_conjugate.py +376 -35
- cuqi/sampler/_conjugate_approx.py +40 -16
- cuqi/sampler/_cwmh.py +132 -138
- cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
- cuqi/sampler/_gibbs.py +288 -130
- cuqi/sampler/_hmc.py +328 -201
- cuqi/sampler/_langevin_algorithm.py +284 -100
- cuqi/sampler/_laplace_approximation.py +87 -117
- cuqi/sampler/_mh.py +47 -157
- cuqi/sampler/_pcn.py +65 -213
- cuqi/sampler/_rto.py +211 -142
- cuqi/sampler/_sampler.py +553 -136
- cuqi/samples/__init__.py +1 -1
- cuqi/samples/_samples.py +24 -18
- cuqi/solver/__init__.py +6 -4
- cuqi/solver/_solver.py +230 -26
- cuqi/testproblem/_testproblem.py +2 -3
- cuqi/utilities/__init__.py +6 -1
- cuqi/utilities/_get_python_variable_name.py +2 -2
- cuqi/utilities/_utilities.py +182 -2
- {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/METADATA +10 -6
- cuqipy-1.4.1.post0.dev124.dist-info/RECORD +101 -0
- {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/WHEEL +1 -1
- CUQIpy-1.1.1.post0.dev36.dist-info/RECORD +0 -92
- cuqi/experimental/mcmc/_conjugate.py +0 -197
- cuqi/experimental/mcmc/_conjugate_approx.py +0 -81
- cuqi/experimental/mcmc/_cwmh.py +0 -191
- cuqi/experimental/mcmc/_gibbs.py +0 -268
- cuqi/experimental/mcmc/_hmc.py +0 -470
- cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
- cuqi/experimental/mcmc/_mh.py +0 -78
- cuqi/experimental/mcmc/_pcn.py +0 -89
- cuqi/experimental/mcmc/_sampler.py +0 -561
- cuqi/experimental/mcmc/_utilities.py +0 -17
- cuqi/implicitprior/_regularizedGaussian.py +0 -323
- {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info/licenses}/LICENSE +0 -0
- {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/top_level.txt +0 -0
cuqi/sampler/_rto.py
CHANGED
|
@@ -3,7 +3,7 @@ from scipy.linalg.interpolative import estimate_spectral_norm
|
|
|
3
3
|
from scipy.sparse.linalg import LinearOperator as scipyLinearOperator
|
|
4
4
|
import numpy as np
|
|
5
5
|
import cuqi
|
|
6
|
-
from cuqi.solver import CGLS, FISTA
|
|
6
|
+
from cuqi.solver import CGLS, FISTA, ADMM, ScipyLinearLSQ, ScipyMinimizer
|
|
7
7
|
from cuqi.sampler import Sampler
|
|
8
8
|
|
|
9
9
|
|
|
@@ -11,7 +11,7 @@ class LinearRTO(Sampler):
|
|
|
11
11
|
"""
|
|
12
12
|
Linear RTO (Randomize-Then-Optimize) sampler.
|
|
13
13
|
|
|
14
|
-
Samples posterior related to the inverse problem with Gaussian likelihood and prior, and where the forward model is
|
|
14
|
+
Samples posterior related to the inverse problem with Gaussian likelihood and prior, and where the forward model is linear or more generally affine.
|
|
15
15
|
|
|
16
16
|
Parameters
|
|
17
17
|
------------
|
|
@@ -22,12 +22,12 @@ class LinearRTO(Sampler):
|
|
|
22
22
|
|
|
23
23
|
Here:
|
|
24
24
|
data: is a m-dimensional numpy array containing the measured data.
|
|
25
|
-
model: is a m by n dimensional matrix or LinearModel representing the forward model.
|
|
25
|
+
model: is a m by n dimensional matrix, AffineModel or LinearModel representing the forward model.
|
|
26
26
|
L_sqrtprec: is the squareroot of the precision matrix of the Gaussian likelihood.
|
|
27
27
|
P_mean: is the prior mean.
|
|
28
28
|
P_sqrtprec: is the squareroot of the precision matrix of the Gaussian mean.
|
|
29
29
|
|
|
30
|
-
|
|
30
|
+
initial_point : `np.ndarray`
|
|
31
31
|
Initial point for the sampler. *Optional*.
|
|
32
32
|
|
|
33
33
|
maxit : int
|
|
@@ -36,67 +36,87 @@ class LinearRTO(Sampler):
|
|
|
36
36
|
tol : float
|
|
37
37
|
Tolerance of the inner CGLS solver. *Optional*.
|
|
38
38
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
The signature of the callback function is `callback(sample, sample_index)`,
|
|
42
|
-
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
43
|
-
An example is shown in demos/demo31_callback.py.
|
|
39
|
+
inner_initial_point : string or np.ndarray or cuqi.array.CUQIArray
|
|
40
|
+
Initial point for the inner optimization problem. Can be "previous_sample" (default), "MAP", or a specific numpy or cuqi array. *Optional*.
|
|
44
41
|
|
|
45
|
-
|
|
46
|
-
|
|
42
|
+
callback : callable, optional
|
|
43
|
+
A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
|
|
44
|
+
The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
|
|
47
45
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
model = target[1]
|
|
53
|
-
L_sqrtprec = target[2]
|
|
54
|
-
P_mean = target[3]
|
|
55
|
-
P_sqrtprec = target[4]
|
|
56
|
-
|
|
57
|
-
# If numpy matrix convert to CUQI model
|
|
58
|
-
if isinstance(model, np.ndarray) and len(model.shape) == 2:
|
|
59
|
-
model = cuqi.model.LinearModel(model)
|
|
60
|
-
|
|
61
|
-
# Check model input
|
|
62
|
-
if not isinstance(model, cuqi.model.LinearModel):
|
|
63
|
-
raise TypeError("Model needs to be cuqi.model.LinearModel or matrix")
|
|
64
|
-
|
|
65
|
-
# Likelihood
|
|
66
|
-
L = cuqi.distribution.Gaussian(model, sqrtprec=L_sqrtprec).to_likelihood(data)
|
|
67
|
-
|
|
68
|
-
# Prior TODO: allow multiple priors stacked
|
|
69
|
-
#if isinstance(P_mean, list) and isinstance(P_sqrtprec, list):
|
|
70
|
-
# P = cuqi.distribution.JointGaussianSqrtPrec(P_mean, P_sqrtprec)
|
|
71
|
-
#else:
|
|
72
|
-
P = cuqi.distribution.Gaussian(P_mean, sqrtprec=P_sqrtprec)
|
|
73
|
-
|
|
74
|
-
# Construct posterior
|
|
75
|
-
target = cuqi.distribution.Posterior(L, P)
|
|
76
|
-
|
|
77
|
-
super().__init__(target, x0=x0, **kwargs)
|
|
78
|
-
|
|
79
|
-
self._check_posterior()
|
|
80
|
-
|
|
81
|
-
# Modify initial guess
|
|
82
|
-
if x0 is not None:
|
|
83
|
-
self.x0 = x0
|
|
84
|
-
else:
|
|
85
|
-
self.x0 = np.zeros(self.prior.dim)
|
|
46
|
+
"""
|
|
47
|
+
def __init__(self, target=None, initial_point=None, maxit=10, tol=1e-6, inner_initial_point="previous_sample", **kwargs):
|
|
48
|
+
|
|
49
|
+
super().__init__(target=target, initial_point=initial_point, **kwargs)
|
|
86
50
|
|
|
87
51
|
# Other parameters
|
|
88
52
|
self.maxit = maxit
|
|
89
|
-
self.tol = tol
|
|
90
|
-
self.
|
|
91
|
-
|
|
53
|
+
self.tol = tol
|
|
54
|
+
self.inner_initial_point = inner_initial_point
|
|
55
|
+
|
|
56
|
+
def _initialize(self):
|
|
57
|
+
self._precompute()
|
|
58
|
+
self._compute_map()
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def inner_initial_point(self):
|
|
62
|
+
if isinstance(self._inner_initial_point, str):
|
|
63
|
+
if self._inner_initial_point == "previous_sample":
|
|
64
|
+
return self.current_point
|
|
65
|
+
elif self._inner_initial_point == "map":
|
|
66
|
+
return self._map
|
|
67
|
+
else:
|
|
68
|
+
return self._inner_initial_point
|
|
69
|
+
|
|
70
|
+
@inner_initial_point.setter
|
|
71
|
+
def inner_initial_point(self, value):
|
|
72
|
+
is_correct_string = (isinstance(value, str) and
|
|
73
|
+
(value.lower() == "previous_sample" or
|
|
74
|
+
value.lower() == "map"))
|
|
75
|
+
if is_correct_string:
|
|
76
|
+
self._inner_initial_point = value.lower()
|
|
77
|
+
elif isinstance(value, (np.ndarray, cuqi.array.CUQIarray)):
|
|
78
|
+
self._inner_initial_point = value
|
|
79
|
+
else:
|
|
80
|
+
raise ValueError("Invalid value for inner_initial_point. Choose either 'previous_sample', 'MAP', or provide a numpy array/cuqi array.")
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def prior(self):
|
|
84
|
+
return self.target.prior
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def likelihood(self):
|
|
88
|
+
return self.target.likelihood
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def likelihoods(self):
|
|
92
|
+
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
93
|
+
return [self.target.likelihood]
|
|
94
|
+
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
95
|
+
return self.target.likelihoods
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def model(self):
|
|
99
|
+
return self.target.model
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def models(self):
|
|
103
|
+
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
104
|
+
return [self.target.model]
|
|
105
|
+
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
106
|
+
return self.target.models
|
|
107
|
+
|
|
108
|
+
def _compute_map(self):
|
|
109
|
+
sim = CGLS(self.M, self.b_tild, self.current_point, self.maxit, self.tol)
|
|
110
|
+
self._map, _ = sim.solve()
|
|
111
|
+
|
|
112
|
+
def _precompute(self):
|
|
92
113
|
L1 = [likelihood.distribution.sqrtprec for likelihood in self.likelihoods]
|
|
93
114
|
L2 = self.prior.sqrtprec
|
|
94
115
|
L2mu = self.prior.sqrtprecTimesMean
|
|
95
116
|
|
|
96
117
|
# pre-computations
|
|
97
|
-
self.n =
|
|
98
|
-
self.b_tild = np.hstack([L@likelihood.data for (L, likelihood) in zip(L1, self.likelihoods)]+ [L2mu])
|
|
99
|
-
|
|
118
|
+
self.n = self.prior.dim
|
|
119
|
+
self.b_tild = np.hstack([L@(likelihood.data - model._shift) for (L, likelihood, model) in zip(L1, self.likelihoods, self.models)]+ [L2mu]) # With shift from AffineModel
|
|
100
120
|
callability = [callable(likelihood.model) for likelihood in self.likelihoods]
|
|
101
121
|
notcallability = [not c for c in callability]
|
|
102
122
|
if all(notcallability):
|
|
@@ -105,7 +125,7 @@ class LinearRTO(Sampler):
|
|
|
105
125
|
# in this case, model is a function doing forward and backward operations
|
|
106
126
|
def M(x, flag):
|
|
107
127
|
if flag == 1:
|
|
108
|
-
out1 = [L @ likelihood.model.
|
|
128
|
+
out1 = [L @ likelihood.model._forward_func_no_shift(x) for (L, likelihood) in zip(L1, self.likelihoods)] # Use forward function which excludes shift
|
|
109
129
|
out2 = L2 @ x
|
|
110
130
|
out = np.hstack(out1 + [out2])
|
|
111
131
|
elif flag == 2:
|
|
@@ -114,77 +134,42 @@ class LinearRTO(Sampler):
|
|
|
114
134
|
out1 = np.zeros(self.n)
|
|
115
135
|
for likelihood in self.likelihoods:
|
|
116
136
|
idx_end += len(likelihood.data)
|
|
117
|
-
out1 += likelihood.model.
|
|
137
|
+
out1 += likelihood.model._adjoint_func_no_shift(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end])
|
|
118
138
|
idx_start = idx_end
|
|
119
139
|
out2 = L2.T @ x[idx_end:]
|
|
120
140
|
out = out1 + out2
|
|
121
141
|
return out
|
|
122
142
|
self.M = M
|
|
123
143
|
else:
|
|
124
|
-
raise TypeError("All likelihoods need to be callable or none need to be callable.")
|
|
125
|
-
|
|
126
|
-
@property
|
|
127
|
-
def prior(self):
|
|
128
|
-
return self.target.prior
|
|
129
|
-
|
|
130
|
-
@property
|
|
131
|
-
def likelihood(self):
|
|
132
|
-
return self.target.likelihood
|
|
133
|
-
|
|
134
|
-
@property
|
|
135
|
-
def likelihoods(self):
|
|
136
|
-
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
137
|
-
return [self.target.likelihood]
|
|
138
|
-
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
|
|
139
|
-
return self.target.likelihoods
|
|
144
|
+
raise TypeError("All likelihoods need to be callable or none need to be callable.")
|
|
140
145
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
return self.target.data
|
|
148
|
-
|
|
149
|
-
def _sample(self, N, Nb):
|
|
150
|
-
Ns = N+Nb # number of simulations
|
|
151
|
-
samples = np.empty((self.n, Ns))
|
|
152
|
-
|
|
153
|
-
# initial state
|
|
154
|
-
samples[:, 0] = self.x0
|
|
155
|
-
for s in range(Ns-1):
|
|
156
|
-
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
157
|
-
sim = CGLS(self.M, y, samples[:, s], self.maxit, self.tol, self.shift)
|
|
158
|
-
samples[:, s+1], _ = sim.solve()
|
|
159
|
-
|
|
160
|
-
self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
|
|
161
|
-
self._call_callback(samples[:, s+1], s+1)
|
|
162
|
-
|
|
163
|
-
# remove burn-in
|
|
164
|
-
samples = samples[:, Nb:]
|
|
165
|
-
|
|
166
|
-
return samples, None, None
|
|
146
|
+
def step(self):
|
|
147
|
+
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
148
|
+
sim = CGLS(self.M, y, self.inner_initial_point, self.maxit, self.tol)
|
|
149
|
+
self.current_point, _ = sim.solve()
|
|
150
|
+
acc = 1
|
|
151
|
+
return acc
|
|
167
152
|
|
|
168
|
-
def
|
|
169
|
-
|
|
153
|
+
def tune(self, skip_len, update_count):
|
|
154
|
+
pass
|
|
170
155
|
|
|
171
|
-
def
|
|
156
|
+
def validate_target(self):
|
|
172
157
|
# Check target type
|
|
173
158
|
if not isinstance(self.target, (cuqi.distribution.Posterior, cuqi.distribution.MultipleLikelihoodPosterior)):
|
|
174
159
|
raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior' or 'cuqi.distribution.MultipleLikelihoodPosterior'.")
|
|
175
160
|
|
|
176
161
|
# Check Linear model and Gaussian likelihood(s)
|
|
177
162
|
if isinstance(self.target, cuqi.distribution.Posterior):
|
|
178
|
-
if not isinstance(self.model, cuqi.model.
|
|
179
|
-
raise TypeError("Model needs to be linear")
|
|
163
|
+
if not isinstance(self.model, cuqi.model.AffineModel):
|
|
164
|
+
raise TypeError("Model needs to be linear or more generally affine")
|
|
180
165
|
|
|
181
166
|
if not hasattr(self.likelihood.distribution, "sqrtprec"):
|
|
182
167
|
raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
|
|
183
168
|
|
|
184
169
|
elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior): # Elif used for further alternatives, e.g., stacked posterior
|
|
185
170
|
for likelihood in self.likelihoods:
|
|
186
|
-
if not isinstance(likelihood.model, cuqi.model.
|
|
187
|
-
raise TypeError("Model needs to be linear")
|
|
171
|
+
if not isinstance(likelihood.model, cuqi.model.AffineModel):
|
|
172
|
+
raise TypeError("Model needs to be linear or more generally affine")
|
|
188
173
|
|
|
189
174
|
if not hasattr(likelihood.distribution, "sqrtprec"):
|
|
190
175
|
raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
|
|
@@ -195,60 +180,115 @@ class LinearRTO(Sampler):
|
|
|
195
180
|
|
|
196
181
|
if not hasattr(self.prior, "sqrtprecTimesMean"):
|
|
197
182
|
raise TypeError("Prior must contain a sqrtprecTimesMean attribute")
|
|
198
|
-
|
|
183
|
+
|
|
184
|
+
def _get_default_initial_point(self, dim):
|
|
185
|
+
""" Get the default initial point for the sampler. Defaults to an array of zeros. """
|
|
186
|
+
return np.zeros(dim)
|
|
199
187
|
|
|
200
188
|
class RegularizedLinearRTO(LinearRTO):
|
|
201
189
|
"""
|
|
202
190
|
Regularized Linear RTO (Randomize-Then-Optimize) sampler.
|
|
203
191
|
|
|
204
192
|
Samples posterior related to the inverse problem with Gaussian likelihood and implicit Gaussian prior, and where the forward model is Linear.
|
|
193
|
+
The sampler works by repeatedly solving regularized linear least squares problems for perturbed data.
|
|
194
|
+
The solver for these optimization problems is chosen based on how the regularized is provided in the implicit Gaussian prior.
|
|
195
|
+
Currently we use the following solvers:
|
|
196
|
+
FISTA: [1] Beck, Amir, and Marc Teboulle. "A fast iterative shrinkage-thresholding algorithm for linear inverse problems." SIAM journal on imaging sciences 2.1 (2009): 183-202.
|
|
197
|
+
Used when prior.proximal is callable.
|
|
198
|
+
ADMM: [2] Boyd et al. "Distributed optimization and statistical learning via the alternating direction method of multipliers."Foundations and Trends® in Machine learning, 2011.
|
|
199
|
+
Used when prior.proximal is a list of penalty terms.
|
|
200
|
+
ScipyLinearLSQ: Wrapper for Scipy's lsq_linear for the Trust Region Reflective algorithm. Optionally used when the constraint is either "nonnegativity" or "box".
|
|
201
|
+
ScipyMinimizer: Wrapper for Scipy's minimize. Optionally used when the constraint is either "nonnegativity" or "box".
|
|
205
202
|
|
|
206
203
|
Parameters
|
|
207
204
|
------------
|
|
208
205
|
target : `cuqi.distribution.Posterior`
|
|
209
206
|
See `cuqi.sampler.LinearRTO`
|
|
210
207
|
|
|
211
|
-
|
|
208
|
+
initial_point : `np.ndarray`
|
|
212
209
|
Initial point for the sampler. *Optional*.
|
|
213
210
|
|
|
214
211
|
maxit : int
|
|
215
|
-
Maximum number of iterations of the
|
|
212
|
+
Maximum number of iterations of the FISTA/ADMM/ScipyLinearLSQ/ScipyMinimizer solver. *Optional*.
|
|
213
|
+
|
|
214
|
+
inner_max_it : int
|
|
215
|
+
Maximum number of iterations of the CGLS solver used within the ADMM solver. *Optional*.
|
|
216
216
|
|
|
217
217
|
stepsize : string or float
|
|
218
218
|
If stepsize is a string and equals either "automatic", then the stepsize is automatically estimated based on the spectral norm.
|
|
219
219
|
If stepsize is a float, then this stepsize is used.
|
|
220
220
|
|
|
221
|
+
penalty_parameter : int
|
|
222
|
+
Penalty parameter of the ADMM solver. *Optional*.
|
|
223
|
+
See [2] or `cuqi.solver.ADMM`
|
|
224
|
+
|
|
221
225
|
abstol : float
|
|
222
|
-
Absolute tolerance of the
|
|
226
|
+
Absolute tolerance of the FISTA/ScipyLinearLSQ/ScipyMinimizer solver. *Optional*.
|
|
227
|
+
|
|
228
|
+
inner_abstol : float
|
|
229
|
+
Tolerance parameter for ScipyLinearLSQ's inner solve of the unbounded least-squares problem. *Optional*.
|
|
230
|
+
|
|
231
|
+
adaptive : bool
|
|
232
|
+
If True, FISTA is used as solver, otherwise ISTA is used. *Optional*.
|
|
233
|
+
|
|
234
|
+
solver : string
|
|
235
|
+
Options are "FISTA" (default for a single constraint or regularization), "ADMM" (default and the only option for multiple constraints or regularizations), "ScipyLinearLSQ" and "ScipyMinimizer". Note "ScipyLinearLSQ" and "ScipyMinimizer" can only be used with `RegularizedGaussian` of a single `box` or `nonnegativity` constraint. *Optional*.
|
|
236
|
+
|
|
237
|
+
inner_initial_point : string or np.ndarray or cuqi.array.CUQIArray
|
|
238
|
+
Initial point for the inner optimization problem. Can be "previous_sample" (default), "MAP", or a specific numpy or cuqi array. *Optional*.
|
|
223
239
|
|
|
224
|
-
callback : callable,
|
|
225
|
-
|
|
226
|
-
The
|
|
227
|
-
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
228
|
-
An example is shown in demos/demo31_callback.py.
|
|
240
|
+
callback : callable, optional
|
|
241
|
+
A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
|
|
242
|
+
The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
|
|
229
243
|
|
|
230
244
|
"""
|
|
231
|
-
def __init__(self, target,
|
|
232
|
-
|
|
233
|
-
if not callable(target.prior.proximal):
|
|
234
|
-
raise TypeError("Projector needs to be callable")
|
|
245
|
+
def __init__(self, target=None, initial_point=None, maxit=100, inner_max_it=10, stepsize="automatic", penalty_parameter=10, abstol=1e-10, adaptive=True, solver=None, inner_abstol=None, inner_initial_point="previous_sample", **kwargs):
|
|
235
246
|
|
|
236
|
-
super().__init__(target
|
|
247
|
+
super().__init__(target=target, initial_point=initial_point, **kwargs)
|
|
237
248
|
|
|
238
249
|
# Other parameters
|
|
239
250
|
self.stepsize = stepsize
|
|
240
|
-
self.abstol = abstol
|
|
251
|
+
self.abstol = abstol
|
|
252
|
+
self.inner_abstol = inner_abstol
|
|
241
253
|
self.adaptive = adaptive
|
|
242
|
-
self.
|
|
254
|
+
self.maxit = maxit
|
|
255
|
+
self.inner_max_it = inner_max_it
|
|
256
|
+
self.penalty_parameter = penalty_parameter
|
|
257
|
+
self.solver = solver
|
|
258
|
+
self.inner_initial_point = inner_initial_point
|
|
259
|
+
|
|
260
|
+
def _initialize(self):
|
|
261
|
+
super()._initialize()
|
|
262
|
+
if self.solver is None:
|
|
263
|
+
self.solver = "FISTA" if callable(self.proximal) else "ADMM"
|
|
264
|
+
if self.solver == "FISTA":
|
|
265
|
+
self._stepsize = self._choose_stepsize()
|
|
266
|
+
self._compute_map_regularized()
|
|
243
267
|
|
|
244
268
|
@property
|
|
245
|
-
def
|
|
246
|
-
return self.
|
|
269
|
+
def solver(self):
|
|
270
|
+
return self._solver
|
|
271
|
+
|
|
272
|
+
@solver.setter
|
|
273
|
+
def solver(self, value):
|
|
274
|
+
if value == "ScipyLinearLSQ" or value == "ScipyMinimizer":
|
|
275
|
+
if (self.target.prior.preset["constraint"] == "nonnegativity" or self.target.prior.preset["constraint"] == "box"):
|
|
276
|
+
self._solver = value
|
|
277
|
+
else:
|
|
278
|
+
raise ValueError("ScipyLinearLSQ and ScipyMinimizer only support RegularizedGaussian with box or nonnegativity constraint.")
|
|
279
|
+
else:
|
|
280
|
+
self._solver = value
|
|
281
|
+
|
|
282
|
+
@property
|
|
283
|
+
def proximal(self):
|
|
284
|
+
return self.target.prior.proximal
|
|
285
|
+
|
|
286
|
+
def validate_target(self):
|
|
287
|
+
super().validate_target()
|
|
288
|
+
if not isinstance(self.target.prior, (cuqi.implicitprior.RegularizedGaussian, cuqi.implicitprior.RegularizedGMRF)):
|
|
289
|
+
raise TypeError("Prior needs to be RegularizedGaussian or RegularizedGMRF")
|
|
247
290
|
|
|
248
|
-
def
|
|
249
|
-
Ns = N+Nb # number of simulations
|
|
250
|
-
samples = np.empty((self.n, Ns))
|
|
251
|
-
|
|
291
|
+
def _choose_stepsize(self):
|
|
252
292
|
if isinstance(self.stepsize, str):
|
|
253
293
|
if self.stepsize in ["automatic"]:
|
|
254
294
|
if not callable(self.M):
|
|
@@ -262,20 +302,49 @@ class RegularizedLinearRTO(LinearRTO):
|
|
|
262
302
|
raise ValueError("Stepsize choice not supported")
|
|
263
303
|
else:
|
|
264
304
|
_stepsize = self.stepsize
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
305
|
+
return _stepsize
|
|
306
|
+
|
|
307
|
+
@property
|
|
308
|
+
def prior(self):
|
|
309
|
+
return self.target.prior.gaussian
|
|
310
|
+
|
|
311
|
+
def _compute_map_regularized(self):
|
|
312
|
+
self._map = self._customized_step(self.b_tild, self.initial_point)
|
|
313
|
+
|
|
314
|
+
def _customized_step(self, y, x0):
|
|
315
|
+
if self.solver == "FISTA":
|
|
316
|
+
sim = FISTA(self.M, y, self.proximal,
|
|
317
|
+
x0, maxit = self.maxit, stepsize = self._stepsize, abstol = self.abstol, adaptive = self.adaptive)
|
|
318
|
+
elif self.solver == "ADMM":
|
|
319
|
+
sim = ADMM(self.M, y, self.proximal,
|
|
320
|
+
x0, self.penalty_parameter, maxit = self.maxit, inner_max_it = self.inner_max_it, adaptive = self.adaptive)
|
|
321
|
+
elif self.solver == "ScipyLinearLSQ":
|
|
322
|
+
A_op = sp.sparse.linalg.LinearOperator((sum([llh.distribution.dim for llh in self.likelihoods])+self.target.prior.dim, self.target.prior.dim),
|
|
323
|
+
matvec=lambda x: self.M(x, 1),
|
|
324
|
+
rmatvec=lambda x: self.M(x, 2)
|
|
325
|
+
)
|
|
326
|
+
sim = ScipyLinearLSQ(A_op, y, self.target.prior._box_bounds,
|
|
327
|
+
max_iter = self.maxit,
|
|
328
|
+
lsmr_maxiter = self.inner_max_it,
|
|
329
|
+
tol = self.abstol,
|
|
330
|
+
lsmr_tol = self.inner_abstol)
|
|
331
|
+
elif self.solver == "ScipyMinimizer":
|
|
332
|
+
# Adapt bounds format, as scipy.minimize requires a bounds format
|
|
333
|
+
# different than that in scipy.lsq_linear.
|
|
334
|
+
bounds = [(self.target.prior._box_bounds[0][i], self.target.prior._box_bounds[1][i]) for i in range(self.target.prior.dim)]
|
|
335
|
+
# Note that the objective function is defined as 0.5*||Mx-y||^2,
|
|
336
|
+
# and the corresponding gradient (gradfunc) is given by M^T(Mx-y).
|
|
337
|
+
sim = ScipyMinimizer(lambda x: 0.5*np.sum((self.M(x, 1)-y)**2), x0, gradfunc=lambda x: self.M(self.M(x, 1) - y, 2), bounds=bounds, tol=self.abstol, options={"maxiter": self.maxit})
|
|
338
|
+
else:
|
|
339
|
+
raise ValueError("Choice of solver not supported.")
|
|
278
340
|
|
|
279
|
-
|
|
341
|
+
sol, _ = sim.solve()
|
|
342
|
+
return sol
|
|
343
|
+
|
|
344
|
+
def step(self):
|
|
345
|
+
y = self.b_tild + np.random.randn(len(self.b_tild))
|
|
280
346
|
|
|
347
|
+
self.current_point = self._customized_step(y, self.inner_initial_point)
|
|
281
348
|
|
|
349
|
+
acc = 1
|
|
350
|
+
return acc
|