CUQIpy 1.3.0.post0.dev298__py3-none-any.whl → 1.4.0.post0.dev61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. cuqi/__init__.py +1 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/density/_density.py +9 -1
  4. cuqi/distribution/_distribution.py +24 -15
  5. cuqi/distribution/_joint_distribution.py +96 -11
  6. cuqi/distribution/_posterior.py +9 -0
  7. cuqi/experimental/__init__.py +1 -2
  8. cuqi/experimental/_recommender.py +4 -4
  9. cuqi/implicitprior/__init__.py +1 -1
  10. cuqi/implicitprior/_restorator.py +35 -1
  11. cuqi/legacy/__init__.py +2 -0
  12. cuqi/legacy/sampler/__init__.py +11 -0
  13. cuqi/legacy/sampler/_conjugate.py +55 -0
  14. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  15. cuqi/legacy/sampler/_cwmh.py +196 -0
  16. cuqi/legacy/sampler/_gibbs.py +231 -0
  17. cuqi/legacy/sampler/_hmc.py +335 -0
  18. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  19. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  20. cuqi/legacy/sampler/_mh.py +190 -0
  21. cuqi/legacy/sampler/_pcn.py +244 -0
  22. cuqi/legacy/sampler/_rto.py +284 -0
  23. cuqi/legacy/sampler/_sampler.py +182 -0
  24. cuqi/likelihood/_likelihood.py +1 -1
  25. cuqi/model/_model.py +212 -77
  26. cuqi/pde/__init__.py +4 -0
  27. cuqi/pde/_observation_map.py +36 -0
  28. cuqi/pde/_pde.py +52 -21
  29. cuqi/problem/_problem.py +87 -80
  30. cuqi/sampler/__init__.py +120 -8
  31. cuqi/sampler/_conjugate.py +376 -35
  32. cuqi/sampler/_conjugate_approx.py +40 -16
  33. cuqi/sampler/_cwmh.py +132 -138
  34. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  35. cuqi/sampler/_gibbs.py +269 -130
  36. cuqi/sampler/_hmc.py +328 -201
  37. cuqi/sampler/_langevin_algorithm.py +282 -98
  38. cuqi/sampler/_laplace_approximation.py +87 -117
  39. cuqi/sampler/_mh.py +47 -157
  40. cuqi/sampler/_pcn.py +56 -211
  41. cuqi/sampler/_rto.py +206 -140
  42. cuqi/sampler/_sampler.py +540 -135
  43. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/METADATA +1 -1
  44. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/RECORD +47 -45
  45. cuqi/experimental/mcmc/__init__.py +0 -122
  46. cuqi/experimental/mcmc/_conjugate.py +0 -396
  47. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  48. cuqi/experimental/mcmc/_cwmh.py +0 -190
  49. cuqi/experimental/mcmc/_gibbs.py +0 -374
  50. cuqi/experimental/mcmc/_hmc.py +0 -460
  51. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  52. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  53. cuqi/experimental/mcmc/_mh.py +0 -80
  54. cuqi/experimental/mcmc/_pcn.py +0 -89
  55. cuqi/experimental/mcmc/_rto.py +0 -306
  56. cuqi/experimental/mcmc/_sampler.py +0 -564
  57. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/WHEEL +0 -0
  58. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/licenses/LICENSE +0 -0
  59. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/top_level.txt +0 -0
@@ -1,396 +0,0 @@
1
- import numpy as np
2
- from abc import ABC, abstractmethod
3
- import math
4
- from cuqi.experimental.mcmc import Sampler
5
- from cuqi.distribution import Posterior, Gaussian, Gamma, GMRF, ModifiedHalfNormal
6
- from cuqi.implicitprior import RegularizedGaussian, RegularizedGMRF, RegularizedUnboundedUniform
7
- from cuqi.utilities import get_non_default_args, count_nonzero, count_within_bounds, count_constant_components_1D, count_constant_components_2D, piecewise_linear_1D_DoF
8
- from cuqi.geometry import Continuous1D, Continuous2D, Image2D
9
-
10
- class Conjugate(Sampler):
11
- """ Conjugate sampler
12
-
13
- Sampler for sampling a posterior distribution which is a so-called "conjugate" distribution, i.e., where the likelihood and prior are conjugate to each other - denoted as a conjugate pair.
14
-
15
- Currently supported conjugate pairs are:
16
- - (Gaussian, Gamma) where Gamma is defined on the precision parameter of the Gaussian
17
- - (GMRF, Gamma) where Gamma is defined on the precision parameter of the GMRF
18
- - (RegularizedGaussian, Gamma) with preset constraints only and Gamma is defined on the precision parameter of the RegularizedGaussian
19
- - (RegularizedGMRF, Gamma) with preset constraints only and Gamma is defined on the precision parameter of the RegularizedGMRF
20
- - (RegularizedGaussian, ModifiedHalfNormal) with most of the preset constraints and regularization
21
- - (RegularizedGMRF, ModifiedHalfNormal) with most of the preset constraints and regularization
22
-
23
- Currently the Gamma and ModifiedHalfNormal distribution must be univariate.
24
-
25
- A conjugate pair defines implicitly a so-called conjugate distribution which can be sampled from directly.
26
-
27
- The conjugate parameter is the parameter that both the likelihood and prior PDF depend on.
28
-
29
- For more information on conjugacy and conjugate distributions see https://en.wikipedia.org/wiki/Conjugate_prior.
30
-
31
- For implicit regularized Gaussians and the corresponding conjugacy relations, see:
32
-
33
- Section 3.3 from [1] Everink, Jasper M., Yiqiu Dong, and Martin S. Andersen. "Bayesian inference with projected densities." SIAM/ASA Journal on Uncertainty Quantification 11.3 (2023): 1025-1043.
34
- Section 4 from [2] Everink, Jasper M., Yiqiu Dong, and Martin S. Andersen. "Sparse Bayesian inference with regularized Gaussian distributions." Inverse Problems 39.11 (2023): 115004.
35
-
36
- """
37
-
38
- def _initialize(self):
39
- pass
40
-
41
- @Sampler.target.setter # Overwrite the target setter to set the conjugate pair
42
- def target(self, value):
43
- """ Set the target density. Runs validation of the target. """
44
- self._target = value
45
- if self._target is not None:
46
- self._set_conjugatepair()
47
- self.validate_target()
48
-
49
- def validate_target(self):
50
- self._ensure_target_is_posterior()
51
- self._conjugatepair.validate_target()
52
-
53
- def step(self):
54
- self.current_point = self._conjugatepair.sample()
55
- return 1 # Returns acceptance rate of 1
56
-
57
- def tune(self, skip_len, update_count):
58
- pass # No tuning required for conjugate sampler
59
-
60
- def _ensure_target_is_posterior(self):
61
- """ Ensure that the target is a Posterior distribution. """
62
- if not isinstance(self.target, Posterior):
63
- raise TypeError("Conjugate sampler requires a target of type Posterior")
64
-
65
- def _set_conjugatepair(self):
66
- """ Set the conjugate pair based on the likelihood and prior. This requires target to be set. """
67
- self._ensure_target_is_posterior()
68
- if isinstance(self.target.likelihood.distribution, (Gaussian, GMRF)) and isinstance(self.target.prior, Gamma):
69
- self._conjugatepair = _GaussianGammaPair(self.target)
70
- elif isinstance(self.target.likelihood.distribution, RegularizedUnboundedUniform) and isinstance(self.target.prior, Gamma):
71
- # Check RegularizedUnboundedUniform before RegularizedGaussian and RegularizedGMRF due to the first inheriting from the second.
72
- self._conjugatepair = _RegularizedUnboundedUniformGammaPair(self.target)
73
- elif isinstance(self.target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)) and isinstance(self.target.prior, Gamma):
74
- self._conjugatepair = _RegularizedGaussianGammaPair(self.target)
75
- elif isinstance(self.target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)) and isinstance(self.target.prior, ModifiedHalfNormal):
76
- self._conjugatepair = _RegularizedGaussianModifiedHalfNormalPair(self.target)
77
- else:
78
- raise ValueError(f"Conjugacy is not defined for likelihood {type(self.target.likelihood.distribution)} and prior {type(self.target.prior)}, in CUQIpy")
79
-
80
- def conjugate_distribution(self):
81
- return self._conjugatepair.conjugate_distribution()
82
-
83
- def __repr__(self):
84
- msg = super().__repr__()
85
- if hasattr(self, "_conjugatepair"):
86
- msg += f"\n Conjugate pair:\n\t {type(self._conjugatepair).__name__.removeprefix('_')}"
87
- return msg
88
-
89
- class _ConjugatePair(ABC):
90
- """ Abstract base class for conjugate pairs (likelihood, prior) used in the Conjugate sampler. """
91
-
92
- def __init__(self, target):
93
- self.target = target
94
-
95
- @abstractmethod
96
- def validate_target(self):
97
- """ Validate the target distribution for the conjugate pair. """
98
- pass
99
-
100
- @abstractmethod
101
- def conjugate_distribution(self):
102
- """ Returns the posterior distribution in the form of a CUQIpy distribution """
103
- pass
104
-
105
- def sample(self):
106
- """ Sample from the conjugate distribution. """
107
- return self.conjugate_distribution().sample()
108
-
109
-
110
- class _GaussianGammaPair(_ConjugatePair):
111
- """ Implementation for the Gaussian-Gamma conjugate pair."""
112
-
113
- def validate_target(self):
114
- if self.target.prior.dim != 1:
115
- raise ValueError("Gaussian-Gamma conjugacy only works with univariate Gamma prior")
116
-
117
- key_value_pairs = _get_conjugate_parameter(self.target)
118
- if len(key_value_pairs) != 1:
119
- raise ValueError(f"Multiple references to conjugate parameter {self.target.prior.name} found in likelihood. Only one occurance is supported.")
120
- for key, value in key_value_pairs:
121
- if key == "cov":
122
- if not _check_conjugate_parameter_is_scalar_linear_reciprocal(value):
123
- raise ValueError("Gaussian-Gamma conjugate pair defined via covariance requires cov: lambda x : s/x for the conjugate parameter")
124
- elif key == "prec":
125
- if not _check_conjugate_parameter_is_scalar_linear(value):
126
- raise ValueError("Gaussian-Gamma conjugate pair defined via precision requires prec: lambda x : s*x for the conjugate parameter")
127
- else:
128
- raise ValueError(f"RegularizedGaussian-ModifiedHalfNormal conjugacy does not support the conjugate parameter {self.target.prior.name} in the {key} attribute. Only cov and prec")
129
-
130
- def conjugate_distribution(self):
131
- # Extract variables
132
- b = self.target.likelihood.data # mu
133
- m = len(b) # n
134
- Ax = self.target.likelihood.distribution.mean # x_i
135
- L = self.target.likelihood.distribution(np.array([1])).sqrtprec # L
136
- alpha = self.target.prior.shape # alpha
137
- beta = self.target.prior.rate # beta
138
-
139
- # Create Gamma distribution and sample
140
- return Gamma(shape=m/2 + alpha, rate=.5 * np.linalg.norm(L @ (Ax - b))**2 + beta)
141
-
142
-
143
- class _RegularizedGaussianGammaPair(_ConjugatePair):
144
- """Implementation for the Regularized Gaussian-Gamma conjugate pair using the conjugacy rules from [1], Section 3.3."""
145
-
146
- def validate_target(self):
147
- if self.target.prior.dim != 1:
148
- raise ValueError("RegularizedGaussian-Gamma conjugacy only works with univariate ModifiedHalfNormal prior")
149
-
150
- # Raises error if preset is not supported
151
- _compute_sparsity_level(self.target)
152
-
153
- key_value_pairs = _get_conjugate_parameter(self.target)
154
- if len(key_value_pairs) != 1:
155
- raise ValueError(f"Multiple references to conjugate parameter {self.target.prior.name} found in likelihood. Only one occurance is supported.")
156
- for key, value in key_value_pairs:
157
- if key == "cov":
158
- if not _check_conjugate_parameter_is_scalar_linear_reciprocal(value):
159
- raise ValueError("Regularized Gaussian-Gamma conjugacy defined via covariance requires cov: lambda x : s/x for the conjugate parameter")
160
- elif key == "prec":
161
- if not _check_conjugate_parameter_is_scalar_linear(value):
162
- raise ValueError("Regularized Gaussian-Gamma conjugacy defined via precision requires prec: lambda x : s*x for the conjugate parameter")
163
- else:
164
- raise ValueError(f"RegularizedGaussian-ModifiedHalfNormal conjugacy does not support the conjugate parameter {self.target.prior.name} in the {key} attribute. Only cov and prec")
165
-
166
- def conjugate_distribution(self):
167
- # Extract variables
168
- b = self.target.likelihood.data # mu
169
- m = _compute_sparsity_level(self.target)
170
- Ax = self.target.likelihood.distribution.mean # x_i
171
- L = self.target.likelihood.distribution(np.array([1])).sqrtprec # L
172
- alpha = self.target.prior.shape # alpha
173
- beta = self.target.prior.rate # beta
174
-
175
- # Create Gamma distribution and sample
176
- return Gamma(shape=m/2 + alpha, rate=.5 * np.linalg.norm(L @ (Ax - b))**2 + beta)
177
-
178
-
179
- class _RegularizedUnboundedUniformGammaPair(_ConjugatePair):
180
- """Implementation for the RegularizedUnboundedUniform-ModifiedHalfNormal conjugate pair using the conjugacy rules from [2], Section 4."""
181
-
182
- def validate_target(self):
183
- if self.target.prior.dim != 1:
184
- raise ValueError("RegularizedUnboundedUniform-Gamma conjugacy only works with univariate Gamma prior")
185
-
186
- # Raises error if preset is not supported
187
- _compute_sparsity_level(self.target)
188
-
189
- key_value_pairs = _get_conjugate_parameter(self.target)
190
- if len(key_value_pairs) != 1:
191
- raise ValueError(f"Multiple references to conjugate parameter {self.target.prior.name} found in likelihood. Only one occurance is supported.")
192
- for key, value in key_value_pairs:
193
- if key == "strength":
194
- if not _check_conjugate_parameter_is_scalar_linear(value):
195
- raise ValueError("RegularizedUnboundedUniform-Gamma conjugacy defined via strength requires strength: lambda x : s*x for the conjugate parameter")
196
- else:
197
- raise ValueError(f"RegularizedUnboundedUniform-Gamma conjugacy does not support the conjugate parameter {self.target.prior.name} in the {key} attribute. Only strength is supported")
198
-
199
- def conjugate_distribution(self):
200
- # Extract prior variables
201
- alpha = self.target.prior.shape
202
- beta = self.target.prior.rate
203
-
204
- # Compute likelihood quantities
205
- x = self.target.likelihood.data
206
- m = _compute_sparsity_level(self.target)
207
-
208
- reg_op = self.target.likelihood.distribution._regularization_oper
209
- reg_strength = self.target.likelihood.distribution(np.array([1])).strength
210
- fx = reg_strength*np.linalg.norm(reg_op@x, ord = 1)
211
-
212
- # Create Gamma distribution
213
- return Gamma(shape=m/2 + alpha, rate=fx + beta)
214
-
215
- class _RegularizedGaussianModifiedHalfNormalPair(_ConjugatePair):
216
- """Implementation for the Regularized Gaussian-ModifiedHalfNormal conjugate pair using the conjugacy rules from [2], Section 4."""
217
-
218
- def validate_target(self):
219
- if self.target.prior.dim != 1:
220
- raise ValueError("RegularizedGaussian-ModifiedHalfNormal conjugacy only works with univariate ModifiedHalfNormal prior")
221
-
222
- # Raises error if preset is not supported
223
- _compute_sparsity_level(self.target)
224
-
225
- key_value_pairs = _get_conjugate_parameter(self.target)
226
- if len(key_value_pairs) != 2:
227
- raise ValueError(f"Incorrect number of references to conjugate parameter {self.target.prior.name} found in likelihood. Found {len(key_value_pairs)} times, but needs to occur in prec or cov, and in strength")
228
- for key, value in key_value_pairs:
229
- if key == "strength":
230
- if not _check_conjugate_parameter_is_scalar_linear(value):
231
- raise ValueError("RegularizedGaussian-ModifiedHalfNormal conjugacy defined via strength requires strength: lambda x : s*x for the conjugate parameter")
232
- elif key == "prec":
233
- if not _check_conjugate_parameter_is_scalar_quadratic(value):
234
- raise ValueError("RegularizedGaussian-ModifiedHalfNormal conjugacy defined via precision requires prec: lambda x : s*x for the conjugate parameter")
235
- elif key == "cov":
236
- if not _check_conjugate_parameter_is_scalar_quadratic_reciprocal(value):
237
- raise ValueError("RegularizedGaussian-ModifiedHalfNormal conjugacy defined via covariance requires cov: lambda x : s/x for the conjugate parameter")
238
- else:
239
- raise ValueError(f"RegularizedGaussian-ModifiedHalfNormal conjugacy does not support the conjugate parameter {self.target.prior.name} in the {key} attribute. Only cov, prec and strength are supported")
240
-
241
-
242
- def conjugate_distribution(self):
243
- # Extract prior variables
244
- alpha = self.target.prior.alpha
245
- beta = self.target.prior.beta
246
- gamma = self.target.prior.gamma
247
-
248
- # Compute likelihood variables
249
- x = self.target.likelihood.data
250
- mu = self.target.likelihood.distribution.mean
251
- L = self.target.likelihood.distribution(np.array([1])).sqrtprec
252
-
253
- m = _compute_sparsity_level(self.target)
254
-
255
- reg_op = self.target.likelihood.distribution._regularization_oper
256
- reg_strength = self.target.likelihood.distribution(np.array([1])).strength
257
- fx = reg_strength*np.linalg.norm(reg_op@x, ord = 1)
258
-
259
- # Compute parameters of conjugate distribution
260
- conj_alpha = m + alpha
261
- conj_beta = 0.5*np.linalg.norm(L @ (mu - x))**2 + beta
262
- conj_gamma = -fx + gamma
263
-
264
- # Create conjugate distribution
265
- return ModifiedHalfNormal(conj_alpha, conj_beta, conj_gamma)
266
-
267
-
268
- def _compute_sparsity_level(target):
269
- """Computes the sparsity level in accordance with Section 4 from [2],
270
- this can be interpreted as the number of degrees of freedom, that is,
271
- the number of components n minus the dimension the of the subdifferential of the regularized.
272
- """
273
- x = target.likelihood.data
274
-
275
- constraint = target.likelihood.distribution.preset["constraint"]
276
- regularization = target.likelihood.distribution.preset["regularization"]
277
-
278
- # There is no reference for some of these conjugacy rules
279
- if constraint == "nonnegativity":
280
- if regularization in [None, "l1"]:
281
- # Number of non-zero components in x
282
- return count_nonzero(x)
283
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, Continuous1D):
284
- # Number of non-zero constant components in x
285
- return count_constant_components_1D(x, lower = 0.0)
286
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, (Continuous2D, Image2D)):
287
- # Number of non-zero constant components in x
288
- return count_constant_components_2D(target.likelihood.distribution.geometry.par2fun(x), lower = 0.0)
289
- elif constraint == "box":
290
- bounds = target.likelihood.distribution._box_bounds
291
- if regularization is None:
292
- # Number of components in x that are strictly between the lower and upper bound
293
- return count_within_bounds(x, bounds[0], bounds[1])
294
- elif regularization == "l1":
295
- # Number of components in x that are strictly between the lower and upper bound and are not zero
296
- return count_within_bounds(x, bounds[0], bounds[1], exception = 0.0)
297
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, Continuous1D):
298
- # Number of constant components in x between are strictly between the lower and upper bound
299
- return count_constant_components_1D(x, lower = bounds[0], upper = bounds[1])
300
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, (Continuous2D, Image2D)):
301
- # Number of constant components in x between are strictly between the lower and upper bound
302
- return count_constant_components_2D(target.likelihood.distribution.geometry.par2fun(x), lower = bounds[0], upper = bounds[1])
303
- elif constraint in ["increasing", "decreasing"]:
304
- if regularization is None:
305
- # Number of constant components in x
306
- return count_constant_components_1D(x)
307
- elif regularization == "l1":
308
- # Number of constant components in x that are not zero
309
- return count_constant_components_1D(x, exception = 0.0)
310
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, Continuous1D):
311
- # Number of constant components in x
312
- return count_constant_components_1D(x)
313
- # Increasing and decreasing cannot be done in 2D
314
- elif constraint in ["convex", "concave"]:
315
- if regularization is None:
316
- # Number of piecewise linear components in x
317
- return piecewise_linear_1D_DoF(x)
318
- elif regularization == "l1":
319
- # Number of piecewise linear components in x that are not zero
320
- return piecewise_linear_1D_DoF(x, exception_zero = True)
321
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, Continuous1D):
322
- # Number of piecewise linear components in x that are not flat
323
- return piecewise_linear_1D_DoF(x, exception_flat = True)
324
- # convex and concave has only been implemented in 1D
325
- elif constraint == None:
326
- if regularization == "l1":
327
- # Number of non-zero components in x
328
- return count_nonzero(x)
329
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, Continuous1D):
330
- # Number of non-zero constant components in x
331
- return count_constant_components_1D(x)
332
- elif regularization == "tv" and isinstance(target.likelihood.distribution.geometry, (Continuous2D, Image2D)):
333
- # Number of non-zero constant components in x
334
- return count_constant_components_2D(target.likelihood.distribution.geometry.par2fun(x))
335
-
336
- raise ValueError("RegularizedGaussian preset constraint and regularization choice is currently not supported with conjugacy.")
337
-
338
-
339
- def _get_conjugate_parameter(target):
340
- """Extract the conjugate parameter name (e.g. d), and returns the mutable variable that is defined by the conjugate parameter, e.g. cov and its value e.g. lambda d:1/d"""
341
- par_name = target.prior.name
342
- mutable_likelihood_vars = target.likelihood.distribution.get_mutable_variables()
343
-
344
- found_parameter_pairs = []
345
-
346
- for var_key in mutable_likelihood_vars:
347
- attr = getattr(target.likelihood.distribution, var_key)
348
- if callable(attr) and par_name in get_non_default_args(attr):
349
- found_parameter_pairs.append((var_key, attr))
350
- if len(found_parameter_pairs) == 0:
351
- raise ValueError(f"Unable to find conjugate parameter {par_name} in likelihood function for conjugate sampler with target {target}")
352
- return found_parameter_pairs
353
-
354
- def _check_conjugate_parameter_is_scalar_identity(f):
355
- """Tests whether a function (scalar to scalar) is the identity (lambda x: x)."""
356
- test_values = [1.0, 10.0, 100.0]
357
- return all(np.allclose(f(x), x) for x in test_values)
358
-
359
- def _check_conjugate_parameter_is_scalar_reciprocal(f):
360
- """Tests whether a function (scalar to scalar) is the reciprocal (lambda x : 1.0/x)."""
361
- return all(math.isclose(f(x), 1.0 / x) for x in [1.0, 10.0, 100.0])
362
-
363
- def _check_conjugate_parameter_is_scalar_linear(f):
364
- """
365
- Tests whether a function (scalar to scalar) is linear (lambda x: s*x for some s).
366
- The tests checks whether the function is zero and some finite differences are constant.
367
- """
368
- test_values = [1.0, 10.0, 100.0]
369
- h = 1e-2
370
- finite_diffs = [(f(x + h*x)-f(x))/(h*x) for x in test_values]
371
- return np.isclose(f(0.0), 0.0) and all(np.allclose(c, finite_diffs[0]) for c in finite_diffs[1:])
372
-
373
- def _check_conjugate_parameter_is_scalar_linear_reciprocal(f):
374
- """
375
- Tests whether a function (scalar to scalar) is a constant times the inverse of the input (lambda x: s/x for some s).
376
- The tests checks whether the the reciprocal of the function has constant finite differences.
377
- """
378
- g = lambda x : 1.0/f(x)
379
- test_values = [1.0, 10.0, 100.0]
380
- h = 1e-2
381
- finite_diffs = [(g(x + h*x)-g(x))/(h*x) for x in test_values]
382
- return all(np.allclose(c, finite_diffs[0]) for c in finite_diffs[1:])
383
-
384
- def _check_conjugate_parameter_is_scalar_quadratic(f):
385
- """
386
- Tests whether a function (scalar to scalar) is linear (lambda x: s*x**2 for some s).
387
- The tests checks whether the function divided by the parameter is linear
388
- """
389
- return _check_conjugate_parameter_is_scalar_linear(lambda x: f(x)/x if x != 0.0 else f(0.0))
390
-
391
- def _check_conjugate_parameter_is_scalar_quadratic_reciprocal(f):
392
- """
393
- Tests whether a function (scalar to scalar) is linear (lambda x: s*x**-2 for some s).
394
- The tests checks whether the function divided by the parameter is the reciprical of a linear function.
395
- """
396
- return _check_conjugate_parameter_is_scalar_linear_reciprocal(lambda x: f(x)/x)
@@ -1,76 +0,0 @@
1
- import numpy as np
2
- from cuqi.experimental.mcmc import Conjugate
3
- from cuqi.experimental.mcmc._conjugate import _ConjugatePair, _get_conjugate_parameter, _check_conjugate_parameter_is_scalar_reciprocal
4
- from cuqi.distribution import LMRF, Gamma
5
- import scipy as sp
6
-
7
- class ConjugateApprox(Conjugate):
8
- """ Approximate Conjugate sampler
9
-
10
- Sampler for sampling a posterior distribution where the likelihood and prior can be approximated
11
- by a conjugate pair.
12
-
13
- Currently supported pairs are:
14
- - (LMRF, Gamma): Approximated by (Gaussian, Gamma) where Gamma is defined on the inverse of the scale parameter of the LMRF distribution.
15
-
16
- Gamma distribution must be univariate.
17
-
18
- LMRF likelihood must have zero mean.
19
-
20
- For more details on conjugacy see :class:`Conjugate`.
21
-
22
- """
23
-
24
- def _set_conjugatepair(self):
25
- """ Set the conjugate pair based on the likelihood and prior. This requires target to be set. """
26
- if isinstance(self.target.likelihood.distribution, LMRF) and isinstance(self.target.prior, Gamma):
27
- self._conjugatepair = _LMRFGammaPair(self.target)
28
- else:
29
- raise ValueError(f"Conjugacy is not defined for likelihood {type(self.target.likelihood.distribution)} and prior {type(self.target.prior)}, in CUQIpy")
30
-
31
-
32
- class _LMRFGammaPair(_ConjugatePair):
33
- """ Implementation of the conjugate pair (LMRF, Gamma) """
34
-
35
- def validate_target(self):
36
- if not self.target.prior.dim == 1:
37
- raise ValueError("Approximate conjugate sampler only works with univariate Gamma prior")
38
-
39
- if np.sum(self.target.likelihood.distribution.location) != 0:
40
- raise ValueError("Approximate conjugate sampler only works with zero mean LMRF likelihood")
41
-
42
- key_value_pairs = _get_conjugate_parameter(self.target)
43
- if len(key_value_pairs) != 1:
44
- raise ValueError(f"Multiple references to conjugate parameter {self.target.prior.name} found in likelihood. Only one occurance is supported.")
45
- for key, value in key_value_pairs:
46
- if key == "scale":
47
- if not _check_conjugate_parameter_is_scalar_reciprocal(value):
48
- raise ValueError("Approximate conjugate sampler only works with Gamma prior on the inverse of the scale parameter of the LMRF likelihood")
49
- else:
50
- raise ValueError(f"No approximate conjugacy defined for likelihood {type(self.target.likelihood.distribution)} and prior {type(self.target.prior)}, in CUQIpy")
51
-
52
- def conjugate_distribution(self):
53
- # Extract variables
54
- # Here we approximate the LMRF with a Gaussian
55
-
56
- # Extract diff_op from target likelihood
57
- D = self.target.likelihood.distribution._diff_op
58
- n = D.shape[0]
59
-
60
- # Gaussian approximation of LMRF prior as function of x_k
61
- # See Uribe et al. (2022) for details
62
- # Current has a zero mean assumption on likelihood! TODO
63
- beta=1e-5
64
- def Lk_fun(x_k):
65
- dd = 1/np.sqrt((D @ x_k)**2 + beta*np.ones(n))
66
- W = sp.sparse.diags(dd)
67
- return W.sqrt() @ D
68
-
69
- x = self.target.likelihood.data #x
70
- d = len(x) #d
71
- Lx = Lk_fun(x)@x #Lx
72
- alpha = self.target.prior.shape #alpha
73
- beta = self.target.prior.rate #beta
74
-
75
- # Create Gamma distribution and sample
76
- return Gamma(shape=d+alpha, rate=np.linalg.norm(Lx)**2+beta)
@@ -1,190 +0,0 @@
1
- import numpy as np
2
- import cuqi
3
- from cuqi.experimental.mcmc import ProposalBasedSampler
4
- from cuqi.array import CUQIarray
5
- from numbers import Number
6
-
7
- class CWMH(ProposalBasedSampler):
8
- """Component-wise Metropolis Hastings sampler.
9
-
10
- Allows sampling of a target distribution by a component-wise random-walk
11
- sampling of a proposal distribution along with an accept/reject step.
12
-
13
- Parameters
14
- ----------
15
-
16
- target : `cuqi.distribution.Distribution` or lambda function
17
- The target distribution to sample. Custom logpdfs are supported by using
18
- a :class:`cuqi.distribution.UserDefinedDistribution`.
19
-
20
- proposal : `cuqi.distribution.Distribution` or callable method
21
- The proposal to sample from. If a callable method it should provide a
22
- single independent sample from proposal distribution. Defaults to a
23
- Gaussian proposal. *Optional*.
24
-
25
- scale : float or ndarray
26
- Scale parameter used to define correlation between previous and proposed
27
- sample in random-walk. *Optional*. If float, the same scale is used for
28
- all dimensions. If ndarray, a (possibly) different scale is used for
29
- each dimension.
30
-
31
- initial_point : ndarray
32
- Initial parameters. *Optional*
33
-
34
- callback : callable, optional
35
- A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
36
- The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
37
-
38
- kwargs : dict
39
- Additional keyword arguments to be passed to the base class
40
- :class:`ProposalBasedSampler`.
41
-
42
- Example
43
- -------
44
- .. code-block:: python
45
- import numpy as np
46
- import cuqi
47
- # Parameters
48
- dim = 5 # Dimension of distribution
49
- mu = np.arange(dim) # Mean of Gaussian
50
- std = 1 # standard deviation of Gaussian
51
-
52
- # Logpdf function
53
- logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
54
-
55
- # Define distribution from logpdf as UserDefinedDistribution (sample
56
- # and gradients also supported as inputs to UserDefinedDistribution)
57
- target = cuqi.distribution.UserDefinedDistribution(
58
- dim=dim, logpdf_func=logpdf_func)
59
-
60
- # Set up sampler
61
- sampler = cuqi.experimental.mcmc.CWMH(target, scale=1)
62
-
63
- # Sample
64
- samples = sampler.sample(2000).get_samples()
65
-
66
- """
67
-
68
- _STATE_KEYS = ProposalBasedSampler._STATE_KEYS.union(['_scale_temp'])
69
-
70
- def __init__(self, target:cuqi.density.Density=None, proposal=None, scale=1,
71
- initial_point=None, **kwargs):
72
- super().__init__(target, proposal=proposal, scale=scale,
73
- initial_point=initial_point, **kwargs)
74
-
75
- def _initialize(self):
76
- if isinstance(self.scale, Number):
77
- self.scale = np.ones(self.dim)*self.scale
78
- self._acc = [np.ones((self.dim))] # Overwrite acc from ProposalBasedSampler with list of arrays
79
-
80
- # Handling of temporary scale parameter due to possible bug in old CWMH
81
- self._scale_temp = self.scale.copy()
82
-
83
- @property
84
- def scale(self):
85
- """ Get the scale parameter. """
86
- return self._scale
87
-
88
- @scale.setter
89
- def scale(self, value):
90
- """ Set the scale parameter. """
91
- if self._is_initialized and isinstance(value, Number):
92
- value = np.ones(self.dim)*value
93
- self._scale = value
94
-
95
- def validate_target(self):
96
- if not isinstance(self.target, cuqi.density.Density):
97
- raise ValueError(
98
- "Target should be an instance of "+\
99
- f"{cuqi.density.Density.__class__.__name__}")
100
- # Fail when there is no log density, which is currently assumed to be the case in case NaN is returned.
101
- if np.isnan(self.target.logd(self._get_default_initial_point(self.dim))):
102
- raise ValueError("Target does not have valid logd")
103
-
104
- def validate_proposal(self):
105
- if not isinstance(self.proposal, cuqi.distribution.Distribution):
106
- raise ValueError("Proposal must be a cuqi.distribution.Distribution object")
107
- if not self.proposal.is_symmetric:
108
- raise ValueError("Proposal must be symmetric")
109
-
110
- @property
111
- def proposal(self):
112
- if self._proposal is None:
113
- self._proposal = cuqi.distribution.Normal(
114
- mean=lambda location: location,
115
- std=lambda scale: scale,
116
- geometry=self.dim,
117
- )
118
- return self._proposal
119
-
120
- @proposal.setter
121
- def proposal(self, value):
122
- self._proposal = value
123
-
124
- def step(self):
125
- # Initialize x_t which is used to store the current CWMH sample
126
- x_t = self.current_point.copy()
127
-
128
- # Initialize x_star which is used to store the proposed sample by
129
- # updating the current sample component-by-component
130
- x_star = self.current_point.copy()
131
-
132
- # Propose a sample x_all_components from the proposal distribution
133
- # for all the components
134
- target_eval_t = self.current_target_logd
135
- if isinstance(self.proposal,cuqi.distribution.Distribution):
136
- x_all_components = self.proposal(
137
- location= self.current_point, scale=self.scale).sample()
138
- else:
139
- x_all_components = self.proposal(self.current_point, self.scale)
140
-
141
- # Initialize acceptance rate
142
- acc = np.zeros(self.dim)
143
-
144
- # Loop over all the components of the sample and accept/reject
145
- # each component update.
146
- for j in range(self.dim):
147
- # propose state x_star by updating the j-th component
148
- x_star[j] = x_all_components[j]
149
-
150
- # evaluate target
151
- target_eval_star = self.target.logd(x_star)
152
-
153
- # compute Metropolis acceptance ratio
154
- alpha = min(0, target_eval_star - target_eval_t)
155
-
156
- # accept/reject
157
- u_theta = np.log(np.random.rand())
158
- if (u_theta <= alpha) and \
159
- (not np.isnan(target_eval_star)) and \
160
- (not np.isinf(target_eval_star)):
161
- x_t[j] = x_all_components[j]
162
- target_eval_t = target_eval_star
163
- acc[j] = 1
164
-
165
- x_star = x_t.copy()
166
-
167
- self.current_target_logd = target_eval_t
168
- self.current_point = x_t
169
-
170
- return acc
171
-
172
- def tune(self, skip_len, update_count):
173
- # Store update_count in variable i for readability
174
- i = update_count
175
-
176
- # Optimal acceptance rate for CWMH
177
- star_acc = 0.21/self.dim + 0.23
178
-
179
- # Mean of acceptance rate over the last skip_len samples
180
- hat_acc = np.mean(self._acc[i*skip_len:(i+1)*skip_len], axis=0)
181
-
182
- # Compute new intermediate scaling parameter scale_temp
183
- # Factor zeta ensures that the variation of the scale update vanishes
184
- zeta = 1/np.sqrt(update_count+1)
185
- scale_temp = np.exp(
186
- np.log(self._scale_temp) + zeta*(hat_acc-star_acc))
187
-
188
- # Update the scale parameter
189
- self.scale = np.minimum(scale_temp, np.ones(self.dim))
190
- self._scale_temp = scale_temp