CUQIpy 1.3.0.post0.dev298__py3-none-any.whl → 1.4.0.post0.dev92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/{experimental/algebra/_randomvariable.py → algebra/_random_variable.py} +4 -4
  5. cuqi/density/_density.py +9 -1
  6. cuqi/distribution/_distribution.py +25 -16
  7. cuqi/distribution/_joint_distribution.py +99 -14
  8. cuqi/distribution/_posterior.py +9 -0
  9. cuqi/experimental/__init__.py +1 -4
  10. cuqi/experimental/_recommender.py +4 -4
  11. cuqi/geometry/__init__.py +2 -0
  12. cuqi/{experimental/geometry/_productgeometry.py → geometry/_product_geometry.py} +1 -1
  13. cuqi/implicitprior/__init__.py +1 -1
  14. cuqi/implicitprior/_restorator.py +35 -1
  15. cuqi/legacy/__init__.py +2 -0
  16. cuqi/legacy/sampler/__init__.py +11 -0
  17. cuqi/legacy/sampler/_conjugate.py +55 -0
  18. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  19. cuqi/legacy/sampler/_cwmh.py +196 -0
  20. cuqi/legacy/sampler/_gibbs.py +231 -0
  21. cuqi/legacy/sampler/_hmc.py +335 -0
  22. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  23. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  24. cuqi/legacy/sampler/_mh.py +190 -0
  25. cuqi/legacy/sampler/_pcn.py +244 -0
  26. cuqi/legacy/sampler/_rto.py +284 -0
  27. cuqi/legacy/sampler/_sampler.py +182 -0
  28. cuqi/likelihood/_likelihood.py +1 -1
  29. cuqi/model/_model.py +225 -90
  30. cuqi/pde/__init__.py +4 -0
  31. cuqi/pde/_observation_map.py +36 -0
  32. cuqi/pde/_pde.py +52 -21
  33. cuqi/problem/_problem.py +87 -80
  34. cuqi/sampler/__init__.py +120 -8
  35. cuqi/sampler/_conjugate.py +376 -35
  36. cuqi/sampler/_conjugate_approx.py +40 -16
  37. cuqi/sampler/_cwmh.py +132 -138
  38. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  39. cuqi/sampler/_gibbs.py +276 -130
  40. cuqi/sampler/_hmc.py +328 -201
  41. cuqi/sampler/_langevin_algorithm.py +282 -98
  42. cuqi/sampler/_laplace_approximation.py +87 -117
  43. cuqi/sampler/_mh.py +47 -157
  44. cuqi/sampler/_pcn.py +65 -213
  45. cuqi/sampler/_rto.py +206 -140
  46. cuqi/sampler/_sampler.py +540 -135
  47. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/METADATA +1 -1
  48. cuqipy-1.4.0.post0.dev92.dist-info/RECORD +101 -0
  49. cuqi/experimental/algebra/__init__.py +0 -2
  50. cuqi/experimental/geometry/__init__.py +0 -1
  51. cuqi/experimental/mcmc/__init__.py +0 -122
  52. cuqi/experimental/mcmc/_conjugate.py +0 -396
  53. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  54. cuqi/experimental/mcmc/_cwmh.py +0 -190
  55. cuqi/experimental/mcmc/_gibbs.py +0 -374
  56. cuqi/experimental/mcmc/_hmc.py +0 -460
  57. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  58. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  59. cuqi/experimental/mcmc/_mh.py +0 -80
  60. cuqi/experimental/mcmc/_pcn.py +0 -89
  61. cuqi/experimental/mcmc/_rto.py +0 -306
  62. cuqi/experimental/mcmc/_sampler.py +0 -564
  63. cuqipy-1.3.0.post0.dev298.dist-info/RECORD +0 -100
  64. /cuqi/{experimental/algebra/_ast.py → algebra/_abstract_syntax_tree.py} +0 -0
  65. /cuqi/{experimental/algebra/_orderedset.py → algebra/_ordered_set.py} +0 -0
  66. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/WHEEL +0 -0
  67. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/licenses/LICENSE +0 -0
  68. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/top_level.txt +0 -0
@@ -1,89 +0,0 @@
1
- import numpy as np
2
- import cuqi
3
- from cuqi.experimental.mcmc import Sampler
4
- from cuqi.array import CUQIarray
5
-
6
- class PCN(Sampler): # Refactor to Proposal-based sampler?
7
-
8
- _STATE_KEYS = Sampler._STATE_KEYS.union({'scale', 'current_likelihood_logd', 'lambd'})
9
-
10
- def __init__(self, target=None, scale=1.0, **kwargs):
11
-
12
- super().__init__(target, **kwargs)
13
- self.initial_scale = scale
14
-
15
- def _initialize(self):
16
- self.scale = self.initial_scale
17
- self.current_likelihood_logd = self._loglikelihood(self.current_point)
18
-
19
- # parameters used in the Robbins-Monro recursion for tuning the scale parameter
20
- # see details and reference in the tune method
21
- self.lambd = self.scale
22
- self.star_acc = 0.44 #TODO: 0.234 # target acceptance rate
23
-
24
- def validate_target(self):
25
- if not isinstance(self.target, cuqi.distribution.Posterior):
26
- raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
27
- if not isinstance(self.prior, (cuqi.distribution.Gaussian, cuqi.distribution.Normal)):
28
- raise ValueError("The prior distribution of the target need to be Gaussian")
29
-
30
- def step(self):
31
- # propose state
32
- xi = self.prior.sample(1).flatten() # sample from the prior
33
- x_star = np.sqrt(1-self.scale**2)*self.current_point + self.scale*xi # PCN proposal
34
-
35
- # evaluate target
36
- loglike_eval_star = self._loglikelihood(x_star)
37
-
38
- # ratio and acceptance probability
39
- ratio = loglike_eval_star - self.current_likelihood_logd # proposal is symmetric
40
- alpha = min(0, ratio)
41
-
42
- # accept/reject
43
- acc = 0
44
- u_theta = np.log(np.random.rand())
45
- if (u_theta <= alpha):
46
- self.current_point = x_star
47
- self.current_likelihood_logd = loglike_eval_star
48
- acc = 1
49
-
50
- return acc
51
-
52
- @property
53
- def prior(self):
54
- return self.target.prior
55
-
56
- @property
57
- def likelihood(self):
58
- return self.target.likelihood
59
-
60
- def _loglikelihood(self, x):
61
- return self.likelihood.logd(x)
62
-
63
- @property
64
- def dim(self): # TODO. Check if we need this. Implemented in base class
65
- if hasattr(self,'target') and hasattr(self.target,'dim'):
66
- self._dim = self.target.dim
67
- elif hasattr(self,'target') and isinstance(self.target,tuple) and len(self.target)==2:
68
- self._dim = self.target[0].dim
69
- return self._dim
70
-
71
- def tune(self, skip_len, update_count):
72
- """
73
- Tune the scale parameter of the PCN sampler.
74
- The tuning is based on algorithm 4 in Andrieu, Christophe, and Johannes Thoms.
75
- "A tutorial on adaptive MCMC." Statistics and computing 18 (2008): 343-373.
76
- Note: the tuning algorithm here is the same as the one used in MH sampler.
77
- """
78
-
79
- # average acceptance rate in the past skip_len iterations
80
- hat_acc = np.mean(self._acc[-skip_len:])
81
-
82
- # new scaling parameter zeta to be used in the Robbins-Monro recursion
83
- zeta = 1/np.sqrt(update_count+1)
84
-
85
- # Robbins-Monro recursion to ensure that the variation of lambd vanishes
86
- self.lambd = np.exp(np.log(self.lambd) + zeta*(hat_acc-self.star_acc))
87
-
88
- # update scale parameter
89
- self.scale = min(self.lambd, 1)
@@ -1,306 +0,0 @@
1
- import scipy as sp
2
- from scipy.linalg.interpolative import estimate_spectral_norm
3
- from scipy.sparse.linalg import LinearOperator as scipyLinearOperator
4
- import numpy as np
5
- import cuqi
6
- from cuqi.solver import CGLS, FISTA, ADMM, ScipyLinearLSQ, ScipyMinimizer
7
- from cuqi.experimental.mcmc import Sampler
8
-
9
-
10
- class LinearRTO(Sampler):
11
- """
12
- Linear RTO (Randomize-Then-Optimize) sampler.
13
-
14
- Samples posterior related to the inverse problem with Gaussian likelihood and prior, and where the forward model is linear or more generally affine.
15
-
16
- Parameters
17
- ------------
18
- target : `cuqi.distribution.Posterior`, `cuqi.distribution.MultipleLikelihoodPosterior` or 5-dimensional tuple.
19
- If target is of type cuqi.distribution.Posterior or cuqi.distribution.MultipleLikelihoodPosterior, it represents the posterior distribution.
20
- If target is a 5-dimensional tuple, it assumes the following structure:
21
- (data, model, L_sqrtprec, P_mean, P_sqrtrec)
22
-
23
- Here:
24
- data: is a m-dimensional numpy array containing the measured data.
25
- model: is a m by n dimensional matrix, AffineModel or LinearModel representing the forward model.
26
- L_sqrtprec: is the squareroot of the precision matrix of the Gaussian likelihood.
27
- P_mean: is the prior mean.
28
- P_sqrtprec: is the squareroot of the precision matrix of the Gaussian mean.
29
-
30
- initial_point : `np.ndarray`
31
- Initial point for the sampler. *Optional*.
32
-
33
- maxit : int
34
- Maximum number of iterations of the inner CGLS solver. *Optional*.
35
-
36
- tol : float
37
- Tolerance of the inner CGLS solver. *Optional*.
38
-
39
- callback : callable, optional
40
- A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
41
- The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
42
-
43
- """
44
- def __init__(self, target=None, initial_point=None, maxit=10, tol=1e-6, **kwargs):
45
-
46
- super().__init__(target=target, initial_point=initial_point, **kwargs)
47
-
48
- # Other parameters
49
- self.maxit = maxit
50
- self.tol = tol
51
-
52
- def _initialize(self):
53
- self._precompute()
54
-
55
- @property
56
- def prior(self):
57
- return self.target.prior
58
-
59
- @property
60
- def likelihood(self):
61
- return self.target.likelihood
62
-
63
- @property
64
- def likelihoods(self):
65
- if isinstance(self.target, cuqi.distribution.Posterior):
66
- return [self.target.likelihood]
67
- elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
68
- return self.target.likelihoods
69
-
70
- @property
71
- def model(self):
72
- return self.target.model
73
-
74
- @property
75
- def models(self):
76
- if isinstance(self.target, cuqi.distribution.Posterior):
77
- return [self.target.model]
78
- elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
79
- return self.target.models
80
-
81
- def _precompute(self):
82
- L1 = [likelihood.distribution.sqrtprec for likelihood in self.likelihoods]
83
- L2 = self.prior.sqrtprec
84
- L2mu = self.prior.sqrtprecTimesMean
85
-
86
- # pre-computations
87
- self.n = self.prior.dim
88
- self.b_tild = np.hstack([L@(likelihood.data - model._shift) for (L, likelihood, model) in zip(L1, self.likelihoods, self.models)]+ [L2mu]) # With shift from AffineModel
89
- callability = [callable(likelihood.model) for likelihood in self.likelihoods]
90
- notcallability = [not c for c in callability]
91
- if all(notcallability):
92
- self.M = sp.sparse.vstack([L@likelihood.model for (L, likelihood) in zip(L1, self.likelihoods)] + [L2])
93
- elif all(callability):
94
- # in this case, model is a function doing forward and backward operations
95
- def M(x, flag):
96
- if flag == 1:
97
- out1 = [L @ likelihood.model._forward_func_no_shift(x) for (L, likelihood) in zip(L1, self.likelihoods)] # Use forward function which excludes shift
98
- out2 = L2 @ x
99
- out = np.hstack(out1 + [out2])
100
- elif flag == 2:
101
- idx_start = 0
102
- idx_end = 0
103
- out1 = np.zeros(self.n)
104
- for likelihood in self.likelihoods:
105
- idx_end += len(likelihood.data)
106
- out1 += likelihood.model._adjoint_func_no_shift(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end])
107
- idx_start = idx_end
108
- out2 = L2.T @ x[idx_end:]
109
- out = out1 + out2
110
- return out
111
- self.M = M
112
- else:
113
- raise TypeError("All likelihoods need to be callable or none need to be callable.")
114
-
115
- def step(self):
116
- y = self.b_tild + np.random.randn(len(self.b_tild))
117
- sim = CGLS(self.M, y, self.current_point, self.maxit, self.tol)
118
- self.current_point, _ = sim.solve()
119
- acc = 1
120
- return acc
121
-
122
- def tune(self, skip_len, update_count):
123
- pass
124
-
125
- def validate_target(self):
126
- # Check target type
127
- if not isinstance(self.target, (cuqi.distribution.Posterior, cuqi.distribution.MultipleLikelihoodPosterior)):
128
- raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior' or 'cuqi.distribution.MultipleLikelihoodPosterior'.")
129
-
130
- # Check Linear model and Gaussian likelihood(s)
131
- if isinstance(self.target, cuqi.distribution.Posterior):
132
- if not isinstance(self.model, cuqi.model.AffineModel):
133
- raise TypeError("Model needs to be linear or more generally affine")
134
-
135
- if not hasattr(self.likelihood.distribution, "sqrtprec"):
136
- raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
137
-
138
- elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior): # Elif used for further alternatives, e.g., stacked posterior
139
- for likelihood in self.likelihoods:
140
- if not isinstance(likelihood.model, cuqi.model.AffineModel):
141
- raise TypeError("Model needs to be linear or more generally affine")
142
-
143
- if not hasattr(likelihood.distribution, "sqrtprec"):
144
- raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
145
-
146
- # Check Gaussian prior
147
- if not hasattr(self.prior, "sqrtprec"):
148
- raise TypeError("prior must contain a sqrtprec attribute")
149
-
150
- if not hasattr(self.prior, "sqrtprecTimesMean"):
151
- raise TypeError("Prior must contain a sqrtprecTimesMean attribute")
152
-
153
- def _get_default_initial_point(self, dim):
154
- """ Get the default initial point for the sampler. Defaults to an array of zeros. """
155
- return np.zeros(dim)
156
-
157
- class RegularizedLinearRTO(LinearRTO):
158
- """
159
- Regularized Linear RTO (Randomize-Then-Optimize) sampler.
160
-
161
- Samples posterior related to the inverse problem with Gaussian likelihood and implicit Gaussian prior, and where the forward model is Linear.
162
- The sampler works by repeatedly solving regularized linear least squares problems for perturbed data.
163
- The solver for these optimization problems is chosen based on how the regularized is provided in the implicit Gaussian prior.
164
- Currently we use the following solvers:
165
- FISTA: [1] Beck, Amir, and Marc Teboulle. "A fast iterative shrinkage-thresholding algorithm for linear inverse problems." SIAM journal on imaging sciences 2.1 (2009): 183-202.
166
- Used when prior.proximal is callable.
167
- ADMM: [2] Boyd et al. "Distributed optimization and statistical learning via the alternating direction method of multipliers."Foundations and Trends® in Machine learning, 2011.
168
- Used when prior.proximal is a list of penalty terms.
169
- ScipyLinearLSQ: Wrapper for Scipy's lsq_linear for the Trust Region Reflective algorithm. Optionally used when the constraint is either "nonnegativity" or "box".
170
- ScipyMinimizer: Wrapper for Scipy's minimize. Optionally used when the constraint is either "nonnegativity" or "box".
171
-
172
- Parameters
173
- ------------
174
- target : `cuqi.distribution.Posterior`
175
- See `cuqi.sampler.LinearRTO`
176
-
177
- initial_point : `np.ndarray`
178
- Initial point for the sampler. *Optional*.
179
-
180
- maxit : int
181
- Maximum number of iterations of the FISTA/ADMM/ScipyLinearLSQ/ScipyMinimizer solver. *Optional*.
182
-
183
- inner_max_it : int
184
- Maximum number of iterations of the CGLS solver used within the ADMM solver. *Optional*.
185
-
186
- stepsize : string or float
187
- If stepsize is a string and equals either "automatic", then the stepsize is automatically estimated based on the spectral norm.
188
- If stepsize is a float, then this stepsize is used.
189
-
190
- penalty_parameter : int
191
- Penalty parameter of the ADMM solver. *Optional*.
192
- See [2] or `cuqi.solver.ADMM`
193
-
194
- abstol : float
195
- Absolute tolerance of the FISTA/ScipyLinearLSQ/ScipyMinimizer solver. *Optional*.
196
-
197
- inner_abstol : float
198
- Tolerance parameter for ScipyLinearLSQ's inner solve of the unbounded least-squares problem. *Optional*.
199
-
200
- adaptive : bool
201
- If True, FISTA is used as solver, otherwise ISTA is used. *Optional*.
202
-
203
- solver : string
204
- Options are "FISTA" (default for a single constraint or regularization), "ADMM" (default and the only option for multiple constraints or regularizations), "ScipyLinearLSQ" and "ScipyMinimizer". Note "ScipyLinearLSQ" and "ScipyMinimizer" can only be used with `RegularizedGaussian` of a single `box` or `nonnegativity` constraint. *Optional*.
205
-
206
- callback : callable, optional
207
- A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
208
- The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
209
-
210
- """
211
- def __init__(self, target=None, initial_point=None, maxit=100, inner_max_it=10, stepsize="automatic", penalty_parameter=10, abstol=1e-10, adaptive=True, solver=None, inner_abstol=None, **kwargs):
212
-
213
- super().__init__(target=target, initial_point=initial_point, **kwargs)
214
-
215
- # Other parameters
216
- self.stepsize = stepsize
217
- self.abstol = abstol
218
- self.inner_abstol = inner_abstol
219
- self.adaptive = adaptive
220
- self.maxit = maxit
221
- self.inner_max_it = inner_max_it
222
- self.penalty_parameter = penalty_parameter
223
- self.solver = solver
224
-
225
- def _initialize(self):
226
- super()._initialize()
227
- if self.solver is None:
228
- self.solver = "FISTA" if callable(self.proximal) else "ADMM"
229
- if self.solver == "FISTA":
230
- self._stepsize = self._choose_stepsize()
231
-
232
- @property
233
- def solver(self):
234
- return self._solver
235
-
236
- @solver.setter
237
- def solver(self, value):
238
- if value == "ScipyLinearLSQ" or value == "ScipyMinimizer":
239
- if (self.target.prior.preset["constraint"] == "nonnegativity" or self.target.prior.preset["constraint"] == "box"):
240
- self._solver = value
241
- else:
242
- raise ValueError("ScipyLinearLSQ and ScipyMinimizer only support RegularizedGaussian with box or nonnegativity constraint.")
243
- else:
244
- self._solver = value
245
-
246
- @property
247
- def proximal(self):
248
- return self.target.prior.proximal
249
-
250
- def validate_target(self):
251
- super().validate_target()
252
- if not isinstance(self.target.prior, (cuqi.implicitprior.RegularizedGaussian, cuqi.implicitprior.RegularizedGMRF)):
253
- raise TypeError("Prior needs to be RegularizedGaussian or RegularizedGMRF")
254
-
255
- def _choose_stepsize(self):
256
- if isinstance(self.stepsize, str):
257
- if self.stepsize in ["automatic"]:
258
- if not callable(self.M):
259
- M_op = scipyLinearOperator(self.M.shape, matvec = lambda v: self.M@v, rmatvec = lambda w: self.M.T@w)
260
- else:
261
- M_op = scipyLinearOperator((len(self.b_tild), self.n), matvec = lambda v: self.M(v,1), rmatvec = lambda w: self.M(w,2))
262
-
263
- _stepsize = 0.99/(estimate_spectral_norm(M_op)**2)
264
- # print(f"Estimated stepsize for regularized Linear RTO: {_stepsize}")
265
- else:
266
- raise ValueError("Stepsize choice not supported")
267
- else:
268
- _stepsize = self.stepsize
269
- return _stepsize
270
-
271
- @property
272
- def prior(self):
273
- return self.target.prior.gaussian
274
-
275
- def step(self):
276
- y = self.b_tild + np.random.randn(len(self.b_tild))
277
-
278
- if self.solver == "FISTA":
279
- sim = FISTA(self.M, y, self.proximal,
280
- self.current_point, maxit = self.maxit, stepsize = self._stepsize, abstol = self.abstol, adaptive = self.adaptive)
281
- elif self.solver == "ADMM":
282
- sim = ADMM(self.M, y, self.proximal,
283
- self.current_point, self.penalty_parameter, maxit = self.maxit, inner_max_it = self.inner_max_it, adaptive = self.adaptive)
284
- elif self.solver == "ScipyLinearLSQ":
285
- A_op = sp.sparse.linalg.LinearOperator((sum([llh.distribution.dim for llh in self.likelihoods])+self.target.prior.dim, self.target.prior.dim),
286
- matvec=lambda x: self.M(x, 1),
287
- rmatvec=lambda x: self.M(x, 2)
288
- )
289
- sim = ScipyLinearLSQ(A_op, y, self.target.prior._box_bounds,
290
- max_iter = self.maxit,
291
- lsmr_maxiter = self.inner_max_it,
292
- tol = self.abstol,
293
- lsmr_tol = self.inner_abstol)
294
- elif self.solver == "ScipyMinimizer":
295
- # Adapt bounds format, as scipy.minimize requires a bounds format
296
- # different than that in scipy.lsq_linear.
297
- bounds = [(self.target.prior._box_bounds[0][i], self.target.prior._box_bounds[1][i]) for i in range(self.target.prior.dim)]
298
- # Note that the objective function is defined as 0.5*||Mx-y||^2,
299
- # and the corresponding gradient (gradfunc) is given by M^T(Mx-y).
300
- sim = ScipyMinimizer(lambda x: 0.5*np.sum((self.M(x, 1)-y)**2), self.current_point, gradfunc=lambda x: self.M(self.M(x, 1) - y, 2), bounds=bounds, tol=self.abstol, options={"maxiter": self.maxit})
301
- else:
302
- raise ValueError("Choice of solver not supported.")
303
-
304
- self.current_point, _ = sim.solve()
305
- acc = 1
306
- return acc