CUQIpy 1.3.0__py3-none-any.whl → 1.4.0.post0.dev61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. cuqi/__init__.py +1 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/density/_density.py +9 -1
  4. cuqi/distribution/__init__.py +1 -1
  5. cuqi/distribution/_beta.py +1 -1
  6. cuqi/distribution/_cauchy.py +2 -2
  7. cuqi/distribution/_distribution.py +24 -15
  8. cuqi/distribution/_joint_distribution.py +97 -12
  9. cuqi/distribution/_posterior.py +9 -0
  10. cuqi/distribution/_truncated_normal.py +3 -3
  11. cuqi/distribution/_uniform.py +36 -2
  12. cuqi/experimental/__init__.py +1 -1
  13. cuqi/experimental/_recommender.py +216 -0
  14. cuqi/experimental/geometry/_productgeometry.py +3 -3
  15. cuqi/geometry/_geometry.py +12 -1
  16. cuqi/implicitprior/__init__.py +1 -1
  17. cuqi/implicitprior/_regularizedGaussian.py +40 -4
  18. cuqi/implicitprior/_restorator.py +35 -1
  19. cuqi/legacy/__init__.py +2 -0
  20. cuqi/legacy/sampler/__init__.py +11 -0
  21. cuqi/legacy/sampler/_conjugate.py +55 -0
  22. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  23. cuqi/legacy/sampler/_cwmh.py +196 -0
  24. cuqi/legacy/sampler/_gibbs.py +231 -0
  25. cuqi/legacy/sampler/_hmc.py +335 -0
  26. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  27. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  28. cuqi/legacy/sampler/_mh.py +190 -0
  29. cuqi/legacy/sampler/_pcn.py +244 -0
  30. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +134 -152
  31. cuqi/legacy/sampler/_sampler.py +182 -0
  32. cuqi/likelihood/_likelihood.py +1 -1
  33. cuqi/model/_model.py +1248 -357
  34. cuqi/pde/__init__.py +4 -0
  35. cuqi/pde/_observation_map.py +36 -0
  36. cuqi/pde/_pde.py +133 -32
  37. cuqi/problem/_problem.py +88 -82
  38. cuqi/sampler/__init__.py +120 -8
  39. cuqi/sampler/_conjugate.py +376 -35
  40. cuqi/sampler/_conjugate_approx.py +40 -16
  41. cuqi/sampler/_cwmh.py +132 -138
  42. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  43. cuqi/sampler/_gibbs.py +269 -130
  44. cuqi/sampler/_hmc.py +328 -201
  45. cuqi/sampler/_langevin_algorithm.py +282 -98
  46. cuqi/sampler/_laplace_approximation.py +87 -117
  47. cuqi/sampler/_mh.py +47 -157
  48. cuqi/sampler/_pcn.py +56 -211
  49. cuqi/sampler/_rto.py +206 -140
  50. cuqi/sampler/_sampler.py +540 -135
  51. cuqi/solver/_solver.py +6 -2
  52. cuqi/testproblem/_testproblem.py +2 -3
  53. cuqi/utilities/__init__.py +3 -1
  54. cuqi/utilities/_utilities.py +94 -12
  55. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/METADATA +6 -4
  56. cuqipy-1.4.0.post0.dev61.dist-info/RECORD +102 -0
  57. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/WHEEL +1 -1
  58. CUQIpy-1.3.0.dist-info/RECORD +0 -100
  59. cuqi/experimental/mcmc/__init__.py +0 -123
  60. cuqi/experimental/mcmc/_conjugate.py +0 -345
  61. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  62. cuqi/experimental/mcmc/_cwmh.py +0 -193
  63. cuqi/experimental/mcmc/_gibbs.py +0 -318
  64. cuqi/experimental/mcmc/_hmc.py +0 -464
  65. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -392
  66. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  67. cuqi/experimental/mcmc/_mh.py +0 -80
  68. cuqi/experimental/mcmc/_pcn.py +0 -89
  69. cuqi/experimental/mcmc/_sampler.py +0 -566
  70. cuqi/experimental/mcmc/_utilities.py +0 -17
  71. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info/licenses}/LICENSE +0 -0
  72. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/top_level.txt +0 -0
@@ -1,392 +0,0 @@
1
- import numpy as np
2
- import cuqi
3
- from cuqi.experimental.mcmc import Sampler
4
- from cuqi.implicitprior import RestorationPrior, MoreauYoshidaPrior
5
- from cuqi.array import CUQIarray
6
- from copy import copy
7
-
8
- class ULA(Sampler): # Refactor to Proposal-based sampler?
9
- """Unadjusted Langevin algorithm (ULA) (Roberts and Tweedie, 1996)
10
-
11
- It approximately samples a distribution given its logpdf gradient based on
12
- the Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where
13
- W_t is the `dim`-dimensional standard Brownian motion.
14
- ULA results from the Euler-Maruyama discretization of this Langevin stochastic
15
- differential equation (SDE).
16
-
17
- For more details see: Roberts, G. O., & Tweedie, R. L. (1996). Exponential convergence
18
- of Langevin distributions and their discrete approximations. Bernoulli, 341-363.
19
-
20
- Parameters
21
- ----------
22
-
23
- target : `cuqi.distribution.Distribution`
24
- The target distribution to sample. Must have logd and gradient method. Custom logpdfs
25
- and gradients are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
26
-
27
- initial_point : ndarray
28
- Initial parameters. *Optional*
29
-
30
- scale : float
31
- The Langevin diffusion discretization time step (In practice, scale must
32
- be smaller than 1/L, where L is the Lipschitz of the gradient of the log
33
- target density, logd).
34
-
35
- callback : callable, *Optional*
36
- If set this function will be called after every sample.
37
- The signature of the callback function is `callback(sample, sample_index)`,
38
- where `sample` is the current sample and `sample_index` is the index of the sample.
39
- An example is shown in demos/demo31_callback.py.
40
-
41
-
42
- Example
43
- -------
44
- .. code-block:: python
45
-
46
- # Parameters
47
- dim = 5 # Dimension of distribution
48
- mu = np.arange(dim) # Mean of Gaussian
49
- std = 1 # standard deviation of Gaussian
50
-
51
- # Logpdf function
52
- logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
53
- gradient_func = lambda x: -2/(std**2)*(x - mu)
54
-
55
- # Define distribution from logpdf and gradient as UserDefinedDistribution
56
- target = cuqi.distribution.UserDefinedDistribution(dim=dim, logpdf_func=logpdf_func,
57
- gradient_func=gradient_func)
58
-
59
- # Set up sampler
60
- sampler = cuqi.experimental.mcmc.ULA(target, scale=1/dim**2)
61
-
62
- # Sample
63
- sampler.sample(2000)
64
-
65
- A Deblur example can be found in demos/demo27_ULA.py
66
- # TODO: update demo once sampler merged
67
- """
68
-
69
- _STATE_KEYS = Sampler._STATE_KEYS.union({'scale', 'current_target_grad'})
70
-
71
- def __init__(self, target=None, scale=1.0, **kwargs):
72
-
73
- super().__init__(target, **kwargs)
74
- self.initial_scale = scale
75
-
76
- def _initialize(self):
77
- self.scale = self.initial_scale
78
- self.current_target_grad = self._eval_target_grad(self.current_point)
79
-
80
- def validate_target(self):
81
- try:
82
- self._eval_target_grad(np.ones(self.dim))
83
- pass
84
- except (NotImplementedError, AttributeError):
85
- raise ValueError("The target needs to have a gradient method")
86
-
87
- def _eval_target_logd(self, x):
88
- return None
89
-
90
- def _eval_target_grad(self, x):
91
- return self.target.gradient(x)
92
-
93
- def _accept_or_reject(self, x_star, target_eval_star, target_grad_star):
94
- """
95
- Accepts the proposed state and updates the sampler's state accordingly, i.e.,
96
- current_point, current_target_eval, and current_target_grad_eval.
97
-
98
- Parameters
99
- ----------
100
- x_star :
101
- The proposed state
102
-
103
- target_eval_star:
104
- The log likelihood evaluated at x_star
105
-
106
- target_grad_star:
107
- The gradient of log likelihood evaluated at x_star
108
-
109
- Returns
110
- -------
111
- scalar
112
- 1 (accepted)
113
- """
114
-
115
- self.current_point = x_star
116
- self.current_target_grad = target_grad_star
117
- acc = 1
118
-
119
- return acc
120
-
121
- def step(self):
122
- # propose state
123
- xi = cuqi.distribution.Normal(mean=np.zeros(self.dim), std=np.sqrt(self.scale)).sample()
124
- x_star = self.current_point + 0.5*self.scale*self.current_target_grad + xi
125
-
126
- # evaluate target
127
- target_eval_star = self._eval_target_logd(x_star)
128
- target_grad_star = self._eval_target_grad(x_star)
129
-
130
- # accept or reject proposal
131
- acc = self._accept_or_reject(x_star, target_eval_star, target_grad_star)
132
-
133
- return acc
134
-
135
- def tune(self, skip_len, update_count):
136
- pass
137
-
138
-
139
- class MALA(ULA): # Refactor to Proposal-based sampler?
140
- """ Metropolis-adjusted Langevin algorithm (MALA) (Roberts and Tweedie, 1996)
141
-
142
- Samples a distribution given its logd and gradient (up to a constant) based on
143
- Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt,
144
- W_t is the `dim`-dimensional standard Brownian motion.
145
- A sample is firstly proposed by ULA and is then accepted or rejected according
146
- to a Metropolis–Hastings step.
147
- This accept-reject step allows us to remove the asymptotic bias of ULA.
148
-
149
- For more details see: Roberts, G. O., & Tweedie, R. L. (1996). Exponential convergence
150
- of Langevin distributions and their discrete approximations. Bernoulli, 341-363.
151
-
152
- Parameters
153
- ----------
154
-
155
- target : `cuqi.distribution.Distribution`
156
- The target distribution to sample. Must have logpdf and gradient method. Custom logpdfs
157
- and gradients are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
158
-
159
- initial_point : ndarray
160
- Initial parameters. *Optional*
161
-
162
- scale : float
163
- The Langevin diffusion discretization time step (In practice, scale must
164
- be smaller than 1/L, where L is the Lipschitz of the gradient of the log
165
- target density, logd).
166
-
167
- callback : callable, *Optional*
168
- If set this function will be called after every sample.
169
- The signature of the callback function is `callback(sample, sample_index)`,
170
- where `sample` is the current sample and `sample_index` is the index of the sample.
171
- An example is shown in demos/demo31_callback.py.
172
-
173
-
174
- Example
175
- -------
176
- .. code-block:: python
177
-
178
- # Parameters
179
- dim = 5 # Dimension of distribution
180
- mu = np.arange(dim) # Mean of Gaussian
181
- std = 1 # standard deviation of Gaussian
182
-
183
- # Logpdf function
184
- logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
185
- gradient_func = lambda x: -2/(std**2)*(x-mu)
186
-
187
- # Define distribution from logpdf as UserDefinedDistribution (sample and gradients also supported)
188
- target = cuqi.distribution.UserDefinedDistribution(dim=dim, logpdf_func=logpdf_func,
189
- gradient_func=gradient_func)
190
-
191
- # Set up sampler
192
- sampler = cuqi.experimental.mcmc.MALA(target, scale=1/5**2)
193
-
194
- # Sample
195
- sampler.sample(2000)
196
-
197
- A Deblur example can be found in demos/demo28_MALA.py
198
- # TODO: update demo once sampler merged
199
- """
200
-
201
- _STATE_KEYS = ULA._STATE_KEYS.union({'current_target_logd'})
202
-
203
- def _initialize(self):
204
- super()._initialize()
205
- self.current_target_logd = self.target.logd(self.current_point)
206
-
207
- def _eval_target_logd(self, x):
208
- return self.target.logd(x)
209
-
210
- def _accept_or_reject(self, x_star, target_eval_star, target_grad_star):
211
- """
212
- Accepts the proposed state according to a Metropolis step and updates
213
- the sampler's state accordingly, i.e., current_point, current_target_eval,
214
- and current_target_grad_eval.
215
-
216
- Parameters
217
- ----------
218
- x_star :
219
- The proposed state
220
-
221
- target_eval_star:
222
- The log likelihood evaluated at x_star
223
-
224
- target_grad_star:
225
- The gradient of log likelihood evaluated at x_star
226
-
227
- Returns
228
- -------
229
- scaler
230
- 1 if accepted, 0 otherwise
231
- """
232
- log_target_ratio = target_eval_star - self.current_target_logd
233
- log_prop_ratio = self._log_proposal(self.current_point, x_star, target_grad_star) \
234
- - self._log_proposal(x_star, self.current_point, self.current_target_grad)
235
- log_alpha = min(0, log_target_ratio + log_prop_ratio)
236
-
237
- # accept/reject with Metropolis
238
- acc = 0
239
- log_u = np.log(np.random.rand())
240
- if (log_u <= log_alpha) and \
241
- (not np.isnan(target_eval_star)) and \
242
- (not np.isinf(target_eval_star)):
243
- self.current_point = x_star
244
- self.current_target_logd = target_eval_star
245
- self.current_target_grad = target_grad_star
246
- acc = 1
247
- return acc
248
-
249
- def tune(self, skip_len, update_count):
250
- pass
251
-
252
- def _log_proposal(self, theta_star, theta_k, g_logpi_k):
253
- mu = theta_k + ((self.scale)/2)*g_logpi_k
254
- misfit = theta_star - mu
255
- return -0.5*((1/(self.scale))*(misfit.T @ misfit))
256
-
257
-
258
- class MYULA(ULA):
259
- """Moreau-Yoshida Unadjusted Langevin algorithm (MYUULA) (Durmus et al., 2018)
260
-
261
- Samples a smoothed target distribution given its smoothed logpdf gradient.
262
- It is based on the Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt,
263
- where W_t is a `dim`-dimensional standard Brownian motion.
264
- It targets a differentiable density (partially) smoothed by the Moreau-Yoshida
265
- envelope. The smoothed target density can be made arbitrarily closed to the
266
- true unsmoothed target density.
267
-
268
- For more details see: Durmus, Alain, Eric Moulines, and Marcelo Pereyra.
269
- "Efficient Bayesian
270
- computation by proximal Markov chain Monte Carlo: when Langevin meets Moreau."
271
- SIAM Journal on Imaging Sciences 11.1 (2018): 473-506.
272
-
273
- Parameters
274
- ----------
275
-
276
- target : `cuqi.distribution.Distribution`
277
- The target distribution to sample from. The target distribution results from
278
- a differentiable likelihood and prior of type RestorationPrior.
279
-
280
- initial_point : ndarray
281
- Initial parameters. *Optional*
282
-
283
- scale : float
284
- The Langevin diffusion discretization time step (In practice, scale must
285
- be smaller than 1/L, where L is the Lipschitz of the gradient of the log
286
- target density, logd).
287
-
288
- smoothing_strength : float
289
- This parameter controls the smoothing strength of MYULA.
290
-
291
- callback : callable, *Optional*
292
- If set this function will be called after every sample.
293
- The signature of the callback function is `callback(sample, sample_index)`,
294
- where `sample` is the current sample and `sample_index` is the index of
295
- the sample.
296
- An example is shown in demos/demo31_callback.py.
297
-
298
- A Deblur example can be found in demos/howtos/myula.py
299
- # TODO: update demo once sampler merged
300
- """
301
- def __init__(self, target=None, scale=1.0, smoothing_strength=0.1, **kwargs):
302
- self.smoothing_strength = smoothing_strength
303
- super().__init__(target=target, scale=scale, **kwargs)
304
-
305
- @Sampler.target.setter
306
- def target(self, value):
307
- """ Set the target density. Runs validation of the target. """
308
- self._target = value
309
-
310
- if self._target is not None:
311
- # Create a smoothed target
312
- self._smoothed_target = self._create_smoothed_target(value)
313
-
314
- # Validate the target
315
- self.validate_target()
316
-
317
- def _create_smoothed_target(self, value):
318
- """ Create a smoothed target using a Moreau-Yoshida envelope. """
319
- copied_value = copy(value)
320
- if isinstance(copied_value.prior, RestorationPrior):
321
- # Acceess the prior name
322
- name = value.prior.name
323
- copied_value.prior = MoreauYoshidaPrior(
324
- copied_value.prior,
325
- self.smoothing_strength,
326
- name=name)
327
- return copied_value
328
-
329
- def validate_target(self):
330
- # Call ULA target validation
331
- super().validate_target()
332
-
333
- # Additional validation for MYULA target
334
- if isinstance(self.target.prior, MoreauYoshidaPrior):
335
- raise ValueError(("The prior is already smoothed, apply"
336
- " ULA when using a MoreauYoshidaPrior."))
337
- if not hasattr(self.target.prior, "restore"):
338
- raise NotImplementedError(
339
- ("Using MYULA with a prior that does not have a restore method"
340
- " is not supported.")
341
- )
342
-
343
- def _eval_target_grad(self, x):
344
- return self._smoothed_target.gradient(x)
345
-
346
- class PnPULA(MYULA):
347
- """Plug-and-Play Unadjusted Langevin algorithm (PnP-ULA)
348
- (Laumont et al., 2022)
349
-
350
- Samples a smoothed target distribution given its smoothed logpdf gradient based on
351
- Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where W_t is
352
- a `dim`-dimensional standard Brownian motion.
353
- It targets a differentiable density (partially) smoothed by a convolution
354
- with Gaussian kernel with zero mean and smoothing_strength variance. The
355
- smoothed target density can be made arbitrarily closed to the
356
- true unsmoothed target density.
357
-
358
- For more details see: Laumont, R., Bortoli, V. D., Almansa, A., Delon, J.,
359
- Durmus, A., & Pereyra, M. (2022). Bayesian imaging using plug & play priors:
360
- when Langevin meets Tweedie. SIAM Journal on Imaging Sciences, 15(2), 701-737.
361
-
362
- Parameters
363
- ----------
364
-
365
- target : `cuqi.distribution.Distribution`
366
- The target distribution to sample. The target distribution result from
367
- a differentiable likelihood and prior of type RestorationPrior.
368
-
369
- initial_point : ndarray
370
- Initial parameters. *Optional*
371
-
372
- scale : float
373
- The Langevin diffusion discretization time step (In practice, a scale of
374
- 1/L, where L is the Lipschitz of the gradient of the log target density
375
- is recommended but not guaranteed to be the optimal choice).
376
-
377
- smoothing_strength : float
378
- This parameter controls the smoothing strength of PnP-ULA.
379
-
380
-
381
- callback : callable, *Optional*
382
- If set this function will be called after every sample.
383
- The signature of the callback function is `callback(sample, sample_index)`,
384
- where `sample` is the current sample and `sample_index` is the index of
385
- the sample.
386
- An example is shown in demos/demo31_callback.py.
387
-
388
- # TODO: update demo once sampler merged
389
- """
390
- def __init__ (self, target=None, scale=1.0, smoothing_strength=0.1, **kwargs):
391
- super().__init__(target=target, scale=scale,
392
- smoothing_strength=smoothing_strength, **kwargs)
@@ -1,156 +0,0 @@
1
- import scipy as sp
2
- import numpy as np
3
- import cuqi
4
- from cuqi.solver import CGLS
5
- from cuqi.experimental.mcmc import Sampler
6
-
7
- class UGLA(Sampler):
8
- """ Unadjusted (Gaussian) Laplace Approximation sampler
9
-
10
- Samples an approximate posterior where the prior is approximated
11
- by a Gaussian distribution. The likelihood must be Gaussian.
12
-
13
- Currently only works for LMRF priors.
14
-
15
- The inner solver is Conjugate Gradient Least Squares (CGLS) solver.
16
-
17
- For more details see: Uribe, Felipe, et al. A hybrid Gibbs sampler for edge-preserving
18
- tomographic reconstruction with uncertain view angles. SIAM/ASA Journal on UQ,
19
- https://doi.org/10.1137/21M1412268 (2022).
20
-
21
- Parameters
22
- ----------
23
- target : `cuqi.distribution.Posterior`
24
- The target posterior distribution to sample.
25
-
26
- initial_point : ndarray, *Optional*
27
- Initial parameters.
28
- If not provided, it defaults to zeros.
29
-
30
- maxit : int
31
- Maximum number of inner iterations for solver when generating one sample.
32
- If not provided, it defaults to 50.
33
-
34
- tol : float
35
- Tolerance for inner solver.
36
- The inner solvers will stop before maxit if convergence check reaches tol.
37
- If not provided, it defaults to 1e-4.
38
-
39
- beta : float
40
- Smoothing parameter for the Gaussian approximation of the Laplace distribution.
41
- A small value in the range of 1e-7 to 1e-3 is recommended, though values out of this
42
- range might give better results in some cases. Generally, a larger beta value makes
43
- sampling easier but results in a worse approximation. See details in Section 3.3 of the paper.
44
- If not provided, it defaults to 1e-5.
45
-
46
- callback : callable, *Optional*
47
- If set, this function will be called after every sample.
48
- The signature of the callback function is `callback(sample, sample_index)`,
49
- where `sample` is the current sample and `sample_index` is the index of the sample.
50
- An example is shown in demos/demo31_callback.py.
51
- """
52
- def __init__(self, target=None, initial_point=None, maxit=50, tol=1e-4, beta=1e-5, **kwargs):
53
-
54
- super().__init__(target=target, initial_point=initial_point, **kwargs)
55
-
56
- # Parameters
57
- self.maxit = maxit
58
- self.tol = tol
59
- self.beta = beta
60
-
61
- def _initialize(self):
62
- self._precompute()
63
-
64
- @property
65
- def prior(self):
66
- return self.target.prior
67
-
68
- @property
69
- def likelihood(self):
70
- return self.target.likelihood
71
-
72
- @property
73
- def model(self):
74
- return self.target.model
75
-
76
- @property
77
- def _data(self):
78
- return self.target.data - self.target.model._shift
79
-
80
- def _precompute(self):
81
-
82
- D = self.prior._diff_op
83
- n = D.shape[0]
84
-
85
- # Gaussian approximation of LMRF prior as function of x_k
86
- def Lk_fun(x_k):
87
- dd = 1/np.sqrt((D @ x_k)**2 + self.beta*np.ones(n))
88
- W = sp.sparse.diags(dd)
89
- return W.sqrt() @ D
90
- self.Lk_fun = Lk_fun
91
-
92
- self._m = len(self._data)
93
- self._L1 = self.likelihood.distribution.sqrtprec
94
-
95
- # If prior location is scalar, repeat it to match dimensions
96
- if len(self.prior.location) == 1:
97
- self._priorloc = np.repeat(self.prior.location, self.dim)
98
- else:
99
- self._priorloc = self.prior.location
100
-
101
- # Initial Laplace approx
102
- self._L2 = Lk_fun(self.initial_point)
103
- self._L2mu = self._L2@self._priorloc
104
- self._b_tild = np.hstack([self._L1@self._data, self._L2mu])
105
-
106
- # Least squares form
107
- def M(x, flag):
108
- if flag == 1:
109
- out1 = self._L1 @ self.model._forward_func_no_shift(x) # Use forward function which excludes shift
110
- out2 = np.sqrt(1/self.prior.scale)*(self._L2 @ x)
111
- out = np.hstack([out1, out2])
112
- elif flag == 2:
113
- idx = int(self._m)
114
- out1 = self.model._adjoint_func_no_shift(self._L1.T@x[:idx])
115
- out2 = np.sqrt(1/self.prior.scale)*(self._L2.T @ x[idx:])
116
- out = out1 + out2
117
- return out
118
- self.M = M
119
-
120
- def step(self):
121
- # Update Laplace approximation
122
- self._L2 = self.Lk_fun(self.current_point)
123
- self._L2mu = self._L2@self._priorloc
124
- self._b_tild = np.hstack([self._L1@self._data, self._L2mu])
125
-
126
- # Sample from approximate posterior
127
- e = np.random.randn(len(self._b_tild))
128
- y = self._b_tild + e # Perturb data
129
- sim = CGLS(self.M, y, self.current_point, self.maxit, self.tol)
130
- self.current_point, _ = sim.solve()
131
- acc = 1
132
- return acc
133
-
134
- def tune(self, skip_len, update_count):
135
- pass
136
-
137
- def validate_target(self):
138
- # Check target type
139
- if not isinstance(self.target, cuqi.distribution.Posterior):
140
- raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
141
-
142
- # Check Affine model
143
- if not isinstance(self.likelihood.model, cuqi.model.AffineModel):
144
- raise TypeError("Model needs to be affine or linear")
145
-
146
- # Check Gaussian likelihood
147
- if not hasattr(self.likelihood.distribution, "sqrtprec"):
148
- raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
149
-
150
- # Check that prior is LMRF
151
- if not isinstance(self.prior, cuqi.distribution.LMRF):
152
- raise ValueError('Unadjusted Gaussian Laplace approximation (UGLA) requires LMRF prior')
153
-
154
- def _get_default_initial_point(self, dim):
155
- """ Get the default initial point for the sampler. Defaults to an array of zeros. """
156
- return np.zeros(dim)
@@ -1,80 +0,0 @@
1
- import numpy as np
2
- import cuqi
3
- from cuqi.experimental.mcmc import ProposalBasedSampler
4
-
5
-
6
- class MH(ProposalBasedSampler):
7
- """ Metropolis-Hastings (MH) sampler.
8
-
9
- Parameters
10
- ----------
11
- target : cuqi.density.Density
12
- Target density or distribution.
13
-
14
- proposal : cuqi.distribution.Distribution or callable
15
- Proposal distribution. If None, a random walk MH is used (i.e., Gaussian proposal with identity covariance).
16
-
17
- scale : float
18
- Scaling parameter for the proposal distribution.
19
-
20
- kwargs : dict
21
- Additional keyword arguments to be passed to the base class :class:`ProposalBasedSampler`.
22
-
23
- """
24
-
25
- _STATE_KEYS = ProposalBasedSampler._STATE_KEYS.union({'scale', '_scale_temp'})
26
-
27
- def __init__(self, target=None, proposal=None, scale=1, **kwargs):
28
- super().__init__(target, proposal=proposal, scale=scale, **kwargs)
29
-
30
- def _initialize(self):
31
- # Due to a bug? in old MH, we must keep track of this extra variable to match behavior.
32
- self._scale_temp = self.scale
33
-
34
- def validate_target(self):
35
- # Fail only when there is no log density, which is currently assumed to be the case in case NaN is returned.
36
- if np.isnan(self.target.logd(self._get_default_initial_point(self.dim))):
37
- raise ValueError("Target does not have valid logd")
38
-
39
- def validate_proposal(self):
40
- if not isinstance(self.proposal, cuqi.distribution.Distribution):
41
- raise ValueError("Proposal must be a cuqi.distribution.Distribution object")
42
- if not self.proposal.is_symmetric:
43
- raise ValueError("Proposal must be symmetric")
44
-
45
- def step(self):
46
- # propose state
47
- xi = self.proposal.sample(1) # sample from the proposal
48
- x_star = self.current_point + self.scale*xi.flatten() # MH proposal
49
-
50
- # evaluate target
51
- target_eval_star = self.target.logd(x_star)
52
-
53
- # ratio and acceptance probability
54
- ratio = target_eval_star - self.current_target_logd # proposal is symmetric
55
- alpha = min(0, ratio)
56
-
57
- # accept/reject
58
- u_theta = np.log(np.random.rand())
59
- acc = 0
60
- if (u_theta <= alpha) and \
61
- (not np.isnan(target_eval_star)) and \
62
- (not np.isinf(target_eval_star)):
63
- self.current_point = x_star
64
- self.current_target_logd = target_eval_star
65
- acc = 1
66
-
67
- return acc
68
-
69
- def tune(self, skip_len, update_count):
70
- hat_acc = np.mean(self._acc[-skip_len:])
71
-
72
- # d. compute new scaling parameter
73
- zeta = 1/np.sqrt(update_count+1) # ensures that the variation of lambda(i) vanishes
74
-
75
- # We use self._scale_temp here instead of self.scale in update. This might be a bug,
76
- # but is equivalent to old MH
77
- self._scale_temp = np.exp(np.log(self._scale_temp) + zeta*(hat_acc-0.234))
78
-
79
- # update parameters
80
- self.scale = min(self._scale_temp, 1)