CUQIpy 1.1.1.post0.dev36__py3-none-any.whl → 1.4.1.post0.dev124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (92) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/algebra/_abstract_syntax_tree.py +358 -0
  5. cuqi/algebra/_ordered_set.py +82 -0
  6. cuqi/algebra/_random_variable.py +457 -0
  7. cuqi/array/_array.py +4 -13
  8. cuqi/config.py +7 -0
  9. cuqi/density/_density.py +9 -1
  10. cuqi/distribution/__init__.py +3 -2
  11. cuqi/distribution/_beta.py +7 -11
  12. cuqi/distribution/_cauchy.py +2 -2
  13. cuqi/distribution/_custom.py +0 -6
  14. cuqi/distribution/_distribution.py +31 -45
  15. cuqi/distribution/_gamma.py +7 -3
  16. cuqi/distribution/_gaussian.py +2 -12
  17. cuqi/distribution/_inverse_gamma.py +4 -10
  18. cuqi/distribution/_joint_distribution.py +112 -15
  19. cuqi/distribution/_lognormal.py +0 -7
  20. cuqi/distribution/{_modifiedhalfnormal.py → _modified_half_normal.py} +23 -23
  21. cuqi/distribution/_normal.py +34 -7
  22. cuqi/distribution/_posterior.py +9 -0
  23. cuqi/distribution/_truncated_normal.py +129 -0
  24. cuqi/distribution/_uniform.py +47 -1
  25. cuqi/experimental/__init__.py +2 -2
  26. cuqi/experimental/_recommender.py +216 -0
  27. cuqi/geometry/__init__.py +2 -0
  28. cuqi/geometry/_geometry.py +15 -1
  29. cuqi/geometry/_product_geometry.py +181 -0
  30. cuqi/implicitprior/__init__.py +5 -3
  31. cuqi/implicitprior/_regularized_gaussian.py +483 -0
  32. cuqi/implicitprior/{_regularizedGMRF.py → _regularized_gmrf.py} +4 -2
  33. cuqi/implicitprior/{_regularizedUnboundedUniform.py → _regularized_unbounded_uniform.py} +3 -2
  34. cuqi/implicitprior/_restorator.py +269 -0
  35. cuqi/legacy/__init__.py +2 -0
  36. cuqi/{experimental/mcmc → legacy/sampler}/__init__.py +7 -11
  37. cuqi/legacy/sampler/_conjugate.py +55 -0
  38. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  39. cuqi/legacy/sampler/_cwmh.py +196 -0
  40. cuqi/legacy/sampler/_gibbs.py +231 -0
  41. cuqi/legacy/sampler/_hmc.py +335 -0
  42. cuqi/{experimental/mcmc → legacy/sampler}/_langevin_algorithm.py +82 -111
  43. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  44. cuqi/legacy/sampler/_mh.py +190 -0
  45. cuqi/legacy/sampler/_pcn.py +244 -0
  46. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +132 -90
  47. cuqi/legacy/sampler/_sampler.py +182 -0
  48. cuqi/likelihood/_likelihood.py +9 -1
  49. cuqi/model/__init__.py +1 -1
  50. cuqi/model/_model.py +1361 -359
  51. cuqi/pde/__init__.py +4 -0
  52. cuqi/pde/_observation_map.py +36 -0
  53. cuqi/pde/_pde.py +134 -33
  54. cuqi/problem/_problem.py +93 -87
  55. cuqi/sampler/__init__.py +120 -8
  56. cuqi/sampler/_conjugate.py +376 -35
  57. cuqi/sampler/_conjugate_approx.py +40 -16
  58. cuqi/sampler/_cwmh.py +132 -138
  59. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  60. cuqi/sampler/_gibbs.py +288 -130
  61. cuqi/sampler/_hmc.py +328 -201
  62. cuqi/sampler/_langevin_algorithm.py +284 -100
  63. cuqi/sampler/_laplace_approximation.py +87 -117
  64. cuqi/sampler/_mh.py +47 -157
  65. cuqi/sampler/_pcn.py +65 -213
  66. cuqi/sampler/_rto.py +211 -142
  67. cuqi/sampler/_sampler.py +553 -136
  68. cuqi/samples/__init__.py +1 -1
  69. cuqi/samples/_samples.py +24 -18
  70. cuqi/solver/__init__.py +6 -4
  71. cuqi/solver/_solver.py +230 -26
  72. cuqi/testproblem/_testproblem.py +2 -3
  73. cuqi/utilities/__init__.py +6 -1
  74. cuqi/utilities/_get_python_variable_name.py +2 -2
  75. cuqi/utilities/_utilities.py +182 -2
  76. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/METADATA +10 -6
  77. cuqipy-1.4.1.post0.dev124.dist-info/RECORD +101 -0
  78. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/WHEEL +1 -1
  79. CUQIpy-1.1.1.post0.dev36.dist-info/RECORD +0 -92
  80. cuqi/experimental/mcmc/_conjugate.py +0 -197
  81. cuqi/experimental/mcmc/_conjugate_approx.py +0 -81
  82. cuqi/experimental/mcmc/_cwmh.py +0 -191
  83. cuqi/experimental/mcmc/_gibbs.py +0 -268
  84. cuqi/experimental/mcmc/_hmc.py +0 -470
  85. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  86. cuqi/experimental/mcmc/_mh.py +0 -78
  87. cuqi/experimental/mcmc/_pcn.py +0 -89
  88. cuqi/experimental/mcmc/_sampler.py +0 -561
  89. cuqi/experimental/mcmc/_utilities.py +0 -17
  90. cuqi/implicitprior/_regularizedGaussian.py +0 -323
  91. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info/licenses}/LICENSE +0 -0
  92. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,269 @@
1
+ from abc import ABC, abstractmethod
2
+ from cuqi.distribution import Distribution
3
+ import numpy as np
4
+
5
+ class RestorationPrior(Distribution):
6
+ """
7
+ This class defines an implicit distribution associated with a restoration operator
8
+ (eg denoiser). They are several works relating restorations operators with
9
+ priors, see
10
+ -Laumont et al. https://arxiv.org/abs/2103.04715
11
+ -Hu et al. https://openreview.net/pdf?id=x7d1qXEn1e
12
+ We cannot sample from this distribution, neither compute its logpdf except in
13
+ some cases. It allows us to apply algorithms such as MYULA and PnPULA.
14
+
15
+ Parameters
16
+ ----------
17
+ restorator : callable f(x, restoration_strength)
18
+ Function f that accepts input x to be restored and returns a two-element
19
+ tuple of the restored version of x and extra information about the
20
+ restoration operation. The second element can be of any type, including
21
+ `None` in case there is no information.
22
+
23
+ restorator_kwargs : dictionary
24
+ Dictionary containing information about the restorator.
25
+ It contains keyword argument parameters that will be passed to the
26
+ restorator f. An example could be algorithm parameters such as the number
27
+ of iterations or the stopping criteria.
28
+
29
+ potential : callable function, optional
30
+ The potential corresponds to the negative logpdf when it is accessible.
31
+ This function is a mapping from the parameter domain to the real set.
32
+ It can be provided if the user knows how to relate it to the restorator.
33
+ Ex: restorator is the proximal operator of the total variation (TV), then
34
+ potential is the TV function.
35
+ """
36
+ def __init__(self, restorator, restorator_kwargs
37
+ =None, potential=None, **kwargs):
38
+ if restorator_kwargs is None:
39
+ restorator_kwargs = {}
40
+ self.restorator = restorator
41
+ self.restorator_kwargs = restorator_kwargs
42
+ self.potential = potential
43
+ super().__init__(**kwargs)
44
+
45
+ def restore(self, x, restoration_strength):
46
+ """This function allows us to restore the input x with the user-supplied
47
+ restorator. Extra information about the restoration operation is stored
48
+ in the `RestorationPrior` info attribute.
49
+
50
+ Parameters
51
+ ----------
52
+ x : ndarray
53
+ parameter we want to restore.
54
+
55
+ restoration_strength: positive float
56
+ Strength of the restoration operation. In the case where the
57
+ restorator is a denoiser, this parameter might correspond to the
58
+ noise level.
59
+ """
60
+ restorator_return = self.restorator(x, restoration_strength=restoration_strength,
61
+ **self.restorator_kwargs)
62
+
63
+ if type(restorator_return) == tuple and len(restorator_return) == 2:
64
+ solution, self.info = restorator_return
65
+ else:
66
+ raise ValueError("Unsupported return type from the user-supplied restorator function. "+
67
+ "Please ensure that the restorator function returns a two-element tuple with the "+
68
+ "restored solution as the first element and additional information about the "+
69
+ "restoration as the second element. The second element can be of any type, "+
70
+ "including `None` in case there is no particular information.")
71
+
72
+ return solution
73
+
74
+ def logpdf(self, x):
75
+ """The logpdf function. It returns nan because we don't know the
76
+ logpdf of the implicit prior."""
77
+ if self.potential is None:
78
+ return np.nan
79
+ else:
80
+ return -self.potential(x)
81
+
82
+ def _sample(self, N, rng=None):
83
+ raise NotImplementedError("The sample method is not implemented for the"
84
+ + "RestorationPrior class.")
85
+
86
+ @property
87
+ def _mutable_vars(self):
88
+ """ Returns the mutable variables of the distribution. """
89
+ # Currently mutable variables are not supported for user-defined
90
+ # distributions.
91
+ return []
92
+
93
+ def get_conditioning_variables(self):
94
+ """ Returns the conditioning variables of the distribution. """
95
+ # Currently conditioning variables are not supported for user-defined
96
+ # distributions.
97
+ return []
98
+
99
+
100
+ class MoreauYoshidaPrior(Distribution):
101
+ """
102
+ This class defines (implicit) smoothed priors for which we can apply
103
+ gradient-based algorithms. The smoothing is performed using
104
+ the Moreau-Yoshida envelope of the target prior potential.
105
+
106
+ In the following we give a detailed explanation of the
107
+ Moreau-Yoshida smoothing.
108
+
109
+ We consider a density such that - \log\pi(x) = -g(x) with g convex, lsc,
110
+ proper but not differentiable. Consequently, we cannot apply any
111
+ algorithm requiring the gradient of g.
112
+ Idea:
113
+ We consider the Moreau envelope of g defined as
114
+
115
+ g_{smoothing_strength} (x) = inf_z 0.5*\| x-z \|_2^2/smoothing_strength + g(z).
116
+
117
+ g_{smoothing_strength} has some nice properties
118
+ - g_{smoothing_strength}(x)-->g(x) as smoothing_strength-->0 for all x
119
+ - \nabla g_{smoothing_strength} is 1/smoothing_strength-Lipschitz
120
+ - \nabla g_{smoothing_strength}(x) = (x - prox_g^{smoothing_strength}(x))/smoothing_strength for all x with
121
+
122
+ prox_g^{smoothing_strength}(x) = argmin_z 0.5*\| x-z \|_2^2/smoothing_strength + g(z) .
123
+
124
+ Consequently, we can apply any gradient-based algorithm with
125
+ g_{smoothing_strength} in lieu of g. These algorithms do not require the
126
+ full knowledge of g_{smoothing_strength} but only its gradient. The gradient
127
+ of g_{smoothing_strength} is fully determined by prox_g^{smoothing_strength}
128
+ and smoothing_strength.
129
+ It is important as, although there exists an explicit formula for
130
+ g_{smoothing_strength}, it is rarely used in practice, as it would require
131
+ us to solve an optimization problem each time we want to
132
+ estimate g_{smoothing_strength}. Furthermore, there exist cases where we dont't
133
+ the regularization g with which the mapping prox_g^{smoothing_strength} is
134
+ associated.
135
+
136
+ Remark (Proximal operators are denoisers):
137
+ We consider the denoising inverse problem x = u + n, with
138
+ n \sim \mathcal{N}(0, smoothing_strength I).
139
+ A mapping solving a denoising inverse problem is called denoiser. It takes
140
+ the noisy observation x as an input and returns a less noisy version of x
141
+ which is an estimate of u.
142
+ We assume a prior density \pi(u) \propto exp(- g(u)).
143
+ Then the MAP estimate is given by
144
+ x_MAP = \argmin_z 0.5 \| x - z \|_2^2/smoothing_strength + g(z) = prox_g^smoothing_strength(x)
145
+ Then proximal operators are denoisers.
146
+
147
+ Remark (Denoisers are not necessarily proximal operators): Data-driven
148
+ denoisers are not necessarily proximal operators
149
+ (see https://arxiv.org/pdf/2201.13256)
150
+
151
+ Parameters
152
+ ----------
153
+ prior : RestorationPrior
154
+ Prior of the RestorationPrior type. In order to stay within the MYULA
155
+ framework the restorator of RestorationPrior must be a proximal operator.
156
+
157
+ smoothing_strength : float
158
+ Smoothing strength of the Moreau-Yoshida envelope of the prior potential.
159
+ """
160
+
161
+ def __init__(self, prior:RestorationPrior, smoothing_strength=0.1,
162
+ **kwargs):
163
+ self.prior = prior
164
+ self.smoothing_strength = smoothing_strength
165
+
166
+ # if kwargs does not contain the geometry,
167
+ # we set it to the geometry of the prior, if it exists
168
+ if "geometry" in kwargs:
169
+ raise ValueError(
170
+ "The geometry parameter is not supported for the"
171
+ + "MoreauYoshidaPrior class. The geometry is"
172
+ + "automatically set to the geometry of the prior.")
173
+ try:
174
+ geometry = prior.geometry
175
+ except:
176
+ geometry = None
177
+
178
+ super().__init__(geometry=geometry, **kwargs)
179
+
180
+ @property
181
+ def geometry(self):
182
+ return self.prior.geometry
183
+
184
+ @geometry.setter
185
+ def geometry(self, value):
186
+ self.prior.geometry = value
187
+
188
+ @property
189
+ def smoothing_strength(self):
190
+ """ smoothing_strength of the distribution"""
191
+ return self._smoothing_strength
192
+
193
+ @smoothing_strength.setter
194
+ def smoothing_strength(self, value):
195
+ self._smoothing_strength = value
196
+
197
+ @property
198
+ def prior(self):
199
+ """Getter for the MoreauYoshida prior."""
200
+ return self._prior
201
+
202
+ @prior.setter
203
+ def prior(self, value):
204
+ self._prior = value
205
+
206
+ def gradient(self, x):
207
+ """This is the gradient of the regularizer ie gradient of the negative
208
+ logpdf of the implicit prior."""
209
+ return -(x - self.prior.restore(x, self.smoothing_strength))/self.smoothing_strength
210
+
211
+ def logpdf(self, x):
212
+ """The logpdf function. It returns nan because we don't know the
213
+ logpdf of the implicit prior."""
214
+ if self.prior.potential == None:
215
+ return np.nan
216
+ else:
217
+ return -(self.prior.potential(self.prior.restore(x, self.smoothing_strength))*self.smoothing_strength +
218
+ 0.5*((x-self.prior.restore(x, self.smoothing_strength))**2).sum())
219
+
220
+ def _sample(self, N, rng=None):
221
+ raise NotImplementedError("The sample method is not implemented for the"
222
+ + f"{self.__class__.__name__} class.")
223
+
224
+ @property
225
+ def _mutable_vars(self):
226
+ """ Returns the mutable variables of the distribution. """
227
+ # Currently mutable variables are not supported for user-defined
228
+ # distributions.
229
+ return []
230
+
231
+ def get_conditioning_variables(self):
232
+ """ Returns the conditioning variables of the distribution. """
233
+ # Currently conditioning variables are not supported for user-defined
234
+ # distributions.
235
+ return []
236
+
237
+ class TweediePrior(MoreauYoshidaPrior):
238
+ """
239
+ Alias for MoreauYoshidaPrior following Tweedie's formula framework. TweediePrior
240
+ defines priors where gradients are computed based on Tweedie's identity that links
241
+ MMSE (Minimum Mean Square Error) denoisers with the underlying smoothed prior, see:
242
+ - Laumont et al. https://arxiv.org/abs/2103.04715 or https://doi.org/10.1137/21M1406349
243
+
244
+ Tweedie's Formula
245
+ -------------------------
246
+ In the context of denoising, Tweedie's identity states that for a signal x
247
+ corrupted by Gaussian noise:
248
+
249
+ ∇_x log p_e(x) = (D_e(x) - x) / e
250
+
251
+ where D_e(x) is the MMSE denoiser output and e is the noise variance.
252
+ This enables us to perform gradient-based sampling with algorithms like ULA.
253
+
254
+ At implementation level, TweediePrior shares identical functionality with MoreauYoshidaPrior.
255
+ Thus, it is implemented as an alias of MoreauYoshidaPrior, meaning all methods,
256
+ properties, and behavior are identical. The separate name provides clarity when
257
+ working specifically with Tweedie's formula-based approaches.
258
+
259
+ Parameters
260
+ ----------
261
+ prior : RestorationPrior
262
+ Prior of the RestorationPrior type containing a denoiser/restorator.
263
+
264
+ smoothing_strength : float, default=0.1
265
+ Corresponds to the noise variance e in Tweedie's formula context.
266
+
267
+ See MoreauYoshidaPrior for the underlying implementation with complete documentation.
268
+ """
269
+ pass
@@ -0,0 +1,2 @@
1
+ """ Legacy module for functionalities that are no longer supported or developed. """
2
+ from . import sampler
@@ -1,15 +1,11 @@
1
- """ Re-implementation of sampler module in a more object oriented way. """
2
-
3
1
  from ._sampler import Sampler, ProposalBasedSampler
2
+ from ._conjugate import Conjugate
3
+ from ._conjugate_approx import ConjugateApprox
4
+ from ._cwmh import CWMH
5
+ from ._gibbs import Gibbs
6
+ from ._hmc import NUTS
4
7
  from ._langevin_algorithm import ULA, MALA
8
+ from ._laplace_approximation import UGLA
5
9
  from ._mh import MH
6
- from ._pcn import PCN
10
+ from ._pcn import pCN
7
11
  from ._rto import LinearRTO, RegularizedLinearRTO
8
- from ._cwmh import CWMH
9
- from ._laplace_approximation import UGLA
10
- from ._hmc import NUTS
11
- from ._gibbs import HybridGibbs
12
- from ._conjugate import Conjugate
13
- from ._conjugate_approx import ConjugateApprox
14
- from ._direct import Direct
15
- from ._utilities import find_valid_samplers
@@ -0,0 +1,55 @@
1
+ from cuqi.distribution import Posterior, Gaussian, Gamma, GMRF
2
+ from cuqi.implicitprior import RegularizedGaussian, RegularizedGMRF
3
+ import numpy as np
4
+
5
+ class Conjugate: # TODO: Subclass from Sampler once updated
6
+ """ Conjugate sampler
7
+
8
+ Sampler for sampling a posterior distribution where the likelihood and prior are conjugate.
9
+
10
+ Currently supported conjugate pairs are:
11
+ - (Gaussian, Gamma)
12
+ - (GMRF, Gamma)
13
+ - (RegularizedGaussian, Gamma) with nonnegativity constraints only
14
+
15
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
16
+
17
+ For implicit regularized Gaussians see:
18
+
19
+ [1] Everink, Jasper M., Yiqiu Dong, and Martin S. Andersen. "Bayesian inference with projected densities." SIAM/ASA Journal on Uncertainty Quantification 11.3 (2023): 1025-1043.
20
+
21
+ """
22
+
23
+ def __init__(self, target: Posterior):
24
+ if not isinstance(target.likelihood.distribution, (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
25
+ raise ValueError("Conjugate sampler only works with a Gaussian-type likelihood function")
26
+ if not isinstance(target.prior, Gamma):
27
+ raise ValueError("Conjugate sampler only works with Gamma prior")
28
+ if not target.prior.dim == 1:
29
+ raise ValueError("Conjugate sampler only works with univariate Gamma prior")
30
+
31
+ if isinstance(target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)) and (target.likelihood.distribution.preset["constraint"] not in ["nonnegativity"] or target.likelihood.distribution.preset["regularization"] is not None) :
32
+ raise ValueError("Conjugate sampler only works implicit regularized Gaussian likelihood with nonnegativity constraints")
33
+
34
+ self.target = target
35
+
36
+ def step(self, x=None):
37
+ # Extract variables
38
+ b = self.target.likelihood.data #mu
39
+ m = self._calc_m_for_Gaussians(b) #n
40
+ Ax = self.target.likelihood.distribution.mean #x_i
41
+ L = self.target.likelihood.distribution(np.array([1])).sqrtprec #L
42
+ alpha = self.target.prior.shape #alpha
43
+ beta = self.target.prior.rate #beta
44
+
45
+ # Create Gamma distribution and sample
46
+ dist = Gamma(shape=m/2+alpha,rate=.5*np.linalg.norm(L@(Ax-b))**2+beta)
47
+
48
+ return dist.sample()
49
+
50
+ def _calc_m_for_Gaussians(self, b):
51
+ """ Helper method to calculate m parameter for Gaussian-Gamma conjugate pair. """
52
+ if isinstance(self.target.likelihood.distribution, (Gaussian, GMRF)):
53
+ return len(b)
54
+ elif isinstance(self.target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)):
55
+ return np.count_nonzero(b) # See
@@ -0,0 +1,52 @@
1
+ from cuqi.distribution import Posterior, LMRF, Gamma
2
+ import numpy as np
3
+ import scipy as sp
4
+
5
+ class ConjugateApprox: # TODO: Subclass from Sampler once updated
6
+ """ Approximate Conjugate sampler
7
+
8
+ Sampler for sampling a posterior distribution where the likelihood and prior can be approximated
9
+ by a conjugate pair.
10
+
11
+ Currently supported pairs are:
12
+ - (LMRF, Gamma): Approximated by (Gaussian, Gamma)
13
+
14
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
15
+
16
+ """
17
+
18
+
19
+ def __init__(self, target: Posterior):
20
+ if not isinstance(target.likelihood.distribution, LMRF):
21
+ raise ValueError("Conjugate sampler only works with Laplace diff likelihood function")
22
+ if not isinstance(target.prior, Gamma):
23
+ raise ValueError("Conjugate sampler only works with Gamma prior")
24
+ self.target = target
25
+
26
+ def step(self, x=None):
27
+ # Extract variables
28
+ # Here we approximate the Laplace diff with a Gaussian
29
+
30
+ # Extract diff_op from target likelihood
31
+ D = self.target.likelihood.distribution._diff_op
32
+ n = D.shape[0]
33
+
34
+ # Gaussian approximation of LMRF prior as function of x_k
35
+ # See Uribe et al. (2022) for details
36
+ # Current has a zero mean assumption on likelihood! TODO
37
+ beta=1e-5
38
+ def Lk_fun(x_k):
39
+ dd = 1/np.sqrt((D @ x_k)**2 + beta*np.ones(n))
40
+ W = sp.sparse.diags(dd)
41
+ return W.sqrt() @ D
42
+
43
+ x = self.target.likelihood.data #x
44
+ d = len(x) #d
45
+ Lx = Lk_fun(x)@x #Lx
46
+ alpha = self.target.prior.shape #alpha
47
+ beta = self.target.prior.rate #beta
48
+
49
+ # Create Gamma distribution and sample
50
+ dist = Gamma(shape=d+alpha, rate=np.linalg.norm(Lx)**2+beta)
51
+
52
+ return dist.sample()
@@ -0,0 +1,196 @@
1
+ import numpy as np
2
+ import cuqi
3
+ from cuqi.legacy.sampler import ProposalBasedSampler
4
+
5
+
6
+ class CWMH(ProposalBasedSampler):
7
+ """Component-wise Metropolis Hastings sampler.
8
+
9
+ Allows sampling of a target distribution by a component-wise random-walk sampling of a proposal distribution along with an accept/reject step.
10
+
11
+ Parameters
12
+ ----------
13
+
14
+ target : `cuqi.distribution.Distribution` or lambda function
15
+ The target distribution to sample. Custom logpdfs are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
16
+
17
+ proposal : `cuqi.distribution.Distribution` or callable method
18
+ The proposal to sample from. If a callable method it should provide a single independent sample from proposal distribution. Defaults to a Gaussian proposal. *Optional*.
19
+
20
+ scale : float
21
+ Scale parameter used to define correlation between previous and proposed sample in random-walk. *Optional*.
22
+
23
+ x0 : ndarray
24
+ Initial parameters. *Optional*
25
+
26
+ dim : int
27
+ Dimension of parameter space. Required if target and proposal are callable functions. *Optional*.
28
+
29
+ callback : callable, *Optional*
30
+ If set this function will be called after every sample.
31
+ The signature of the callback function is `callback(sample, sample_index)`,
32
+ where `sample` is the current sample and `sample_index` is the index of the sample.
33
+ An example is shown in demos/demo31_callback.py.
34
+
35
+ Example
36
+ -------
37
+ .. code-block:: python
38
+
39
+ # Parameters
40
+ dim = 5 # Dimension of distribution
41
+ mu = np.arange(dim) # Mean of Gaussian
42
+ std = 1 # standard deviation of Gaussian
43
+
44
+ # Logpdf function
45
+ logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
46
+
47
+ # Define distribution from logpdf as UserDefinedDistribution (sample and gradients also supported as inputs to UserDefinedDistribution)
48
+ target = cuqi.distribution.UserDefinedDistribution(dim=dim, logpdf_func=logpdf_func)
49
+
50
+ # Set up sampler
51
+ sampler = cuqi.legacy.sampler.CWMH(target, scale=1)
52
+
53
+ # Sample
54
+ samples = sampler.sample(2000)
55
+
56
+ """
57
+ def __init__(self, target, proposal=None, scale=1, x0=None, dim = None, **kwargs):
58
+ super().__init__(target, proposal=proposal, scale=scale, x0=x0, dim=dim, **kwargs)
59
+
60
+ @ProposalBasedSampler.proposal.setter
61
+ def proposal(self, value):
62
+ fail_msg = "Proposal should be either None, cuqi.distribution.Distribution conditioned only on 'location' and 'scale', lambda function, or cuqi.distribution.Normal conditioned only on 'mean' and 'std'"
63
+
64
+ if value is None:
65
+ self._proposal = cuqi.distribution.Normal(mean = lambda location:location,std = lambda scale:scale, geometry=self.dim)
66
+
67
+ elif isinstance(value, cuqi.distribution.Distribution) and sorted(value.get_conditioning_variables())==['location','scale']:
68
+ self._proposal = value
69
+
70
+ elif isinstance(value, cuqi.distribution.Normal) and sorted(value.get_conditioning_variables())==['mean','std']:
71
+ self._proposal = value(mean = lambda location:location, std = lambda scale:scale)
72
+
73
+ elif not isinstance(value, cuqi.distribution.Distribution) and callable(value):
74
+ self._proposal = value
75
+
76
+ else:
77
+ raise ValueError(fail_msg)
78
+
79
+
80
+ def _sample(self, N, Nb):
81
+ Ns = N+Nb # number of simulations
82
+
83
+ # allocation
84
+ samples = np.empty((self.dim, Ns))
85
+ target_eval = np.empty(Ns)
86
+ acc = np.zeros((self.dim, Ns), dtype=int)
87
+
88
+ # initial state
89
+ samples[:, 0] = self.x0
90
+ target_eval[0] = self.target.logd(self.x0)
91
+ acc[:, 0] = np.ones(self.dim)
92
+
93
+ # run MCMC
94
+ for s in range(Ns-1):
95
+ # run component by component
96
+ samples[:, s+1], target_eval[s+1], acc[:, s+1] = self.single_update(samples[:, s], target_eval[s])
97
+
98
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
99
+ self._call_callback(samples[:, s+1], s+1)
100
+
101
+ # remove burn-in
102
+ samples = samples[:, Nb:]
103
+ target_eval = target_eval[Nb:]
104
+ acccomp = acc[:, Nb:].mean(axis=1)
105
+ print('\nAverage acceptance rate all components:', acccomp.mean(), '\n')
106
+
107
+ return samples, target_eval, acccomp
108
+
109
+ def _sample_adapt(self, N, Nb):
110
+ # this follows the vanishing adaptation Algorithm 4 in:
111
+ # Andrieu and Thoms (2008) - A tutorial on adaptive MCMC
112
+ Ns = N+Nb # number of simulations
113
+
114
+ # allocation
115
+ samples = np.empty((self.dim, Ns))
116
+ target_eval = np.empty(Ns)
117
+ acc = np.zeros((self.dim, Ns), dtype=int)
118
+
119
+ # initial state
120
+ samples[:, 0] = self.x0
121
+ target_eval[0] = self.target.logd(self.x0)
122
+ acc[:, 0] = np.ones(self.dim)
123
+
124
+ # initial adaptation params
125
+ Na = int(0.1*N) # iterations to adapt
126
+ hat_acc = np.empty((self.dim, int(np.floor(Ns/Na)))) # average acceptance rate of the chains
127
+ lambd = np.empty((self.dim, int(np.floor(Ns/Na)+1))) # scaling parameter \in (0,1)
128
+ lambd[:, 0] = self.scale
129
+ star_acc = 0.21/self.dim + 0.23 # target acceptance rate RW
130
+ i, idx = 0, 0
131
+
132
+ # run MCMC
133
+ for s in range(Ns-1):
134
+ # run component by component
135
+ samples[:, s+1], target_eval[s+1], acc[:, s+1] = self.single_update(samples[:, s], target_eval[s])
136
+
137
+ # adapt prop spread of each component using acc of past samples
138
+ if ((s+1) % Na == 0):
139
+ # evaluate average acceptance rate
140
+ hat_acc[:, i] = np.mean(acc[:, idx:idx+Na], axis=1)
141
+
142
+ # compute new scaling parameter
143
+ zeta = 1/np.sqrt(i+1) # ensures that the variation of lambda(i) vanishes
144
+ lambd[:, i+1] = np.exp(np.log(lambd[:, i]) + zeta*(hat_acc[:, i]-star_acc))
145
+
146
+ # update parameters
147
+ self.scale = np.minimum(lambd[:, i+1], np.ones(self.dim))
148
+
149
+ # update counters
150
+ i += 1
151
+ idx += Na
152
+
153
+ # display iterations
154
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
155
+ self._call_callback(samples[:, s+1], s+1)
156
+
157
+ # remove burn-in
158
+ samples = samples[:, Nb:]
159
+ target_eval = target_eval[Nb:]
160
+ acccomp = acc[:, Nb:].mean(axis=1)
161
+ print('\nAverage acceptance rate all components:', acccomp.mean(), '\n')
162
+
163
+ return samples, target_eval, acccomp
164
+
165
+ def single_update(self, x_t, target_eval_t):
166
+ if isinstance(self.proposal,cuqi.distribution.Distribution):
167
+ x_i_star = self.proposal(location= x_t, scale = self.scale).sample()
168
+ else:
169
+ x_i_star = self.proposal(x_t, self.scale)
170
+ x_star = x_t.copy()
171
+ acc = np.zeros(self.dim)
172
+
173
+ for j in range(self.dim):
174
+ # propose state
175
+ x_star[j] = x_i_star[j]
176
+
177
+ # evaluate target
178
+ target_eval_star = self.target.logd(x_star)
179
+
180
+ # ratio and acceptance probability
181
+ ratio = target_eval_star - target_eval_t # proposal is symmetric
182
+ alpha = min(0, ratio)
183
+
184
+ # accept/reject
185
+ u_theta = np.log(np.random.rand())
186
+ if (u_theta <= alpha):
187
+ x_t[j] = x_i_star[j]
188
+ target_eval_t = target_eval_star
189
+ acc[j] = 1
190
+ else:
191
+ pass
192
+ # x_t[j] = x_t[j]
193
+ # target_eval_t = target_eval_t
194
+ x_star = x_t.copy()
195
+ #
196
+ return x_t, target_eval_t, acc