CUQIpy 1.0.0.post0.dev305__tar.gz → 1.0.0.post0.dev352__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (115) hide show
  1. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/CUQIpy.egg-info/PKG-INFO +1 -1
  2. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/CUQIpy.egg-info/SOURCES.txt +5 -0
  3. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/PKG-INFO +1 -1
  4. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/_version.py +3 -3
  5. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/__init__.py +1 -0
  6. cuqipy-1.0.0.post0.dev352/cuqi/distribution/_modifiedhalfnormal.py +184 -0
  7. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/__init__.py +4 -0
  8. cuqipy-1.0.0.post0.dev352/cuqi/experimental/mcmc/_conjugate.py +77 -0
  9. cuqipy-1.0.0.post0.dev352/cuqi/experimental/mcmc/_conjugate_approx.py +75 -0
  10. cuqipy-1.0.0.post0.dev352/cuqi/experimental/mcmc/_direct.py +28 -0
  11. cuqipy-1.0.0.post0.dev352/cuqi/experimental/mcmc/_gibbs.py +267 -0
  12. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_laplace_approximation.py +1 -1
  13. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_mh.py +1 -1
  14. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_rto.py +1 -1
  15. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_sampler.py +14 -3
  16. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_distribution.py +22 -0
  17. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_distributions_shape.py +3 -2
  18. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/CUQIpy.egg-info/dependency_links.txt +0 -0
  19. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/CUQIpy.egg-info/requires.txt +0 -0
  20. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/CUQIpy.egg-info/top_level.txt +0 -0
  21. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/LICENSE +0 -0
  22. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/README.md +0 -0
  23. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/__init__.py +0 -0
  24. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/_messages.py +0 -0
  25. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/array/__init__.py +0 -0
  26. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/array/_array.py +0 -0
  27. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/config.py +0 -0
  28. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/data/__init__.py +0 -0
  29. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/data/_data.py +0 -0
  30. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/data/astronaut.npz +0 -0
  31. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/data/camera.npz +0 -0
  32. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/data/cat.npz +0 -0
  33. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/data/cookie.png +0 -0
  34. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/data/satellite.mat +0 -0
  35. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/density/__init__.py +0 -0
  36. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/density/_density.py +0 -0
  37. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/diagnostics.py +0 -0
  38. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_beta.py +0 -0
  39. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_cauchy.py +0 -0
  40. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_cmrf.py +0 -0
  41. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_custom.py +0 -0
  42. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_distribution.py +0 -0
  43. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_gamma.py +0 -0
  44. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_gaussian.py +0 -0
  45. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_gmrf.py +0 -0
  46. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_inverse_gamma.py +0 -0
  47. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_joint_distribution.py +0 -0
  48. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_laplace.py +0 -0
  49. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_lmrf.py +0 -0
  50. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_lognormal.py +0 -0
  51. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_normal.py +0 -0
  52. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_posterior.py +0 -0
  53. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/distribution/_uniform.py +0 -0
  54. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/__init__.py +0 -0
  55. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_cwmh.py +0 -0
  56. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_hmc.py +0 -0
  57. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_langevin_algorithm.py +0 -0
  58. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/experimental/mcmc/_pcn.py +0 -0
  59. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/geometry/__init__.py +0 -0
  60. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/geometry/_geometry.py +0 -0
  61. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/implicitprior/__init__.py +0 -0
  62. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/implicitprior/_regularizedGMRF.py +0 -0
  63. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/implicitprior/_regularizedGaussian.py +0 -0
  64. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/likelihood/__init__.py +0 -0
  65. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/likelihood/_likelihood.py +0 -0
  66. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/model/__init__.py +0 -0
  67. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/model/_model.py +0 -0
  68. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/operator/__init__.py +0 -0
  69. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/operator/_operator.py +0 -0
  70. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/pde/__init__.py +0 -0
  71. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/pde/_pde.py +0 -0
  72. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/problem/__init__.py +0 -0
  73. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/problem/_problem.py +0 -0
  74. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/__init__.py +0 -0
  75. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_conjugate.py +0 -0
  76. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_conjugate_approx.py +0 -0
  77. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_cwmh.py +0 -0
  78. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_gibbs.py +0 -0
  79. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_hmc.py +0 -0
  80. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_langevin_algorithm.py +0 -0
  81. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_laplace_approximation.py +0 -0
  82. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_mh.py +0 -0
  83. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_pcn.py +0 -0
  84. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_rto.py +0 -0
  85. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/sampler/_sampler.py +0 -0
  86. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/samples/__init__.py +0 -0
  87. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/samples/_samples.py +0 -0
  88. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/solver/__init__.py +0 -0
  89. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/solver/_solver.py +0 -0
  90. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/testproblem/__init__.py +0 -0
  91. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/testproblem/_testproblem.py +0 -0
  92. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/utilities/__init__.py +0 -0
  93. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/utilities/_get_python_variable_name.py +0 -0
  94. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/cuqi/utilities/_utilities.py +0 -0
  95. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/pyproject.toml +0 -0
  96. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/requirements.txt +0 -0
  97. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/setup.cfg +0 -0
  98. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/setup.py +0 -0
  99. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_MRFs.py +0 -0
  100. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_abstract_distribution_density.py +0 -0
  101. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_bayesian_inversion.py +0 -0
  102. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_density.py +0 -0
  103. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_geometry.py +0 -0
  104. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_implicit_priors.py +0 -0
  105. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_joint_distribution.py +0 -0
  106. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_likelihood.py +0 -0
  107. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_model.py +0 -0
  108. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_pde.py +0 -0
  109. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_posterior.py +0 -0
  110. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_problem.py +0 -0
  111. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_sampler.py +0 -0
  112. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_samples.py +0 -0
  113. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_solver.py +0 -0
  114. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_testproblem.py +0 -0
  115. {cuqipy-1.0.0.post0.dev305 → cuqipy-1.0.0.post0.dev352}/tests/test_utilities.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CUQIpy
3
- Version: 1.0.0.post0.dev305
3
+ Version: 1.0.0.post0.dev352
4
4
  Summary: Computational Uncertainty Quantification for Inverse problems in Python
5
5
  Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
6
6
  License: Apache License
@@ -38,12 +38,17 @@ cuqi/distribution/_joint_distribution.py
38
38
  cuqi/distribution/_laplace.py
39
39
  cuqi/distribution/_lmrf.py
40
40
  cuqi/distribution/_lognormal.py
41
+ cuqi/distribution/_modifiedhalfnormal.py
41
42
  cuqi/distribution/_normal.py
42
43
  cuqi/distribution/_posterior.py
43
44
  cuqi/distribution/_uniform.py
44
45
  cuqi/experimental/__init__.py
45
46
  cuqi/experimental/mcmc/__init__.py
47
+ cuqi/experimental/mcmc/_conjugate.py
48
+ cuqi/experimental/mcmc/_conjugate_approx.py
46
49
  cuqi/experimental/mcmc/_cwmh.py
50
+ cuqi/experimental/mcmc/_direct.py
51
+ cuqi/experimental/mcmc/_gibbs.py
47
52
  cuqi/experimental/mcmc/_hmc.py
48
53
  cuqi/experimental/mcmc/_langevin_algorithm.py
49
54
  cuqi/experimental/mcmc/_laplace_approximation.py
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CUQIpy
3
- Version: 1.0.0.post0.dev305
3
+ Version: 1.0.0.post0.dev352
4
4
  Summary: Computational Uncertainty Quantification for Inverse problems in Python
5
5
  Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
6
6
  License: Apache License
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2024-05-27T07:34:21+0200",
11
+ "date": "2024-06-19T09:05:37+0200",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "1274ccfff1cb9663b547dd2aeb9c86c72dccb2ae",
15
- "version": "1.0.0.post0.dev305"
14
+ "full-revisionid": "1e1deddbe7925206fdaef6a2d9bf8ce9e33ad73d",
15
+ "version": "1.0.0.post0.dev352"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -3,6 +3,7 @@ from ._beta import Beta
3
3
  from ._cauchy import Cauchy
4
4
  from ._cmrf import CMRF
5
5
  from ._gamma import Gamma
6
+ from ._modifiedhalfnormal import ModifiedHalfNormal
6
7
  from ._gaussian import Gaussian, JointGaussianSqrtPrec
7
8
  from ._gmrf import GMRF
8
9
  from ._inverse_gamma import InverseGamma
@@ -0,0 +1,184 @@
1
+ import numpy as np
2
+ import scipy.stats as sps
3
+ import scipy.special as special
4
+ from cuqi.distribution import Distribution
5
+ from cuqi.utilities import force_ndarray
6
+
7
+ class ModifiedHalfNormal(Distribution):
8
+ """
9
+ Represents a modified half-normal (MHN) distribution, a three-parameter family of distributions generalizing the Gamma distribution.
10
+ The distribution is continuous with pdf
11
+ f(x; alpha, beta, gamma) propto x^(alpha-1) * exp(-beta * x^2 + gamma * x)
12
+
13
+ The MHN generalizes the half-normal distribution, because
14
+ f(x; 1, beta, 0) propto exp(-beta * x^2)
15
+
16
+ The MHN generalizes the gamma distribution because
17
+ f(x; alpha, 0, -gamma) propto x^(alpha-1) * exp(- gamma * x)
18
+
19
+ Reference:
20
+ [1] Sun, et al. "The Modified-Half-Normal distribution: Properties and an efficient sampling scheme." Communications in Statistics-Theory and Methods
21
+
22
+ Parameters
23
+ ----------
24
+ alpha : float
25
+ The polynomial exponent parameter of the MHN distribution. Must be positive.
26
+
27
+ beta : float
28
+ The quadratic exponential parameter of the MHN distribution. Must be positive.
29
+
30
+ gamma : float
31
+ The linear exponential parameter of the MHN distribution.
32
+
33
+ """
34
+ def __init__(self, alpha=None, beta=None, gamma=None, is_symmetric=False, **kwargs):
35
+ # Init from abstract distribution class
36
+ super().__init__(is_symmetric=is_symmetric, **kwargs)
37
+
38
+ self._alpha = alpha
39
+ self._beta = beta
40
+ self._gamma = gamma
41
+
42
+ @property
43
+ def alpha(self):
44
+ """ The polynomial exponent parameter of the MHN distribution. Must be positive. """
45
+ return self._alpha
46
+
47
+ @alpha.setter
48
+ def shape(self, value):
49
+ self._shape = force_ndarray(value, flatten=True)
50
+
51
+ @property
52
+ def beta(self):
53
+ """ The quadratic exponential parameter of the MHN distribution. Must be positive. """
54
+ return self._alpha
55
+
56
+ @beta.setter
57
+ def beta(self, value):
58
+ self._beta = force_ndarray(value, flatten=True)
59
+
60
+ @property
61
+ def gamma(self):
62
+ """ The linear exponential parameter of the MHN distribution. """
63
+ return self._alpha
64
+
65
+ @gamma.setter
66
+ def gamma(self, value):
67
+ self._gamma = force_ndarray(value, flatten=True)
68
+
69
+ def logpdf(self, x): # Unnormalized
70
+ return np.sum((self.alpha - 1)*np.log(x) - self.beta * x * x + self.gamma * x)
71
+
72
+ def _gradient_scalar(self, val):
73
+ if val <= 0.0:
74
+ return np.nan
75
+ return (self.alpha - 1)/val - 2*self.beta*val + self.gamma
76
+
77
+ def _gradient(self, val, *args, **kwargs):
78
+ if hasattr(self.alpha, '__iter__'):
79
+ return np.array([self._gradient_scalar(v) for v in val])
80
+ else:
81
+ return np.array([self.dim*[self._gradient_scalar(v)] for v in val])
82
+
83
+ def _MHN_sample_gamma_proposal(self, alpha, beta, gamma, rng, delta=None):
84
+ """
85
+ Sample from a modified half-normal distribution using a Gamma distribution proposal.
86
+ """
87
+ if delta is None:
88
+ delta = beta + (gamma*gamma - gamma*np.sqrt(gamma*gamma + 8*beta*alpha))/(4*alpha)
89
+
90
+ while True:
91
+ T = rng.gamma(alpha/2, 1.0/delta)
92
+ X = np.sqrt(T)
93
+ U = rng.uniform()
94
+ if X > 0 and np.log(U) < -(beta-delta)*T + gamma*X - gamma*gamma/(4*(beta-delta)):
95
+ return X
96
+
97
+ def _MHN_sample_normal_proposal(self, alpha, beta, gamma, mu, rng):
98
+ """
99
+ Sample from a modified half-normal distribution using a Normal/Gaussian distribution proposal.
100
+ """
101
+ if mu is None:
102
+ mu = (gamma + np.sqrt(gamma*gamma + 8*beta*(alpha - 1)))/(4*beta)
103
+
104
+ while True:
105
+ X = rng.normal(mu, np.sqrt(0.5/beta))
106
+ U = rng.uniform()
107
+ if X > 0 and np.log(U) < (alpha-1)*np.log(X) - np.log(mu) + (2*beta*mu-gamma)*(mu-X):
108
+ return X
109
+
110
+ def _MHN_sample_positive_gamma_1(self, alpha, beta, gamma, rng):
111
+ """
112
+ Sample from a modified half-normal distribution, assuming alpha is greater than one and gamma is positive.
113
+ """
114
+ if gamma <= 0.0:
115
+ raise ValueError("gamma needs to be positive")
116
+
117
+ if alpha <= 1.0:
118
+ raise ValueError("alpha needs to be greater than 1.0")
119
+
120
+ # Decide whether to use Normal or sqrt(Gamma) proposals for acceptance-rejectance scheme
121
+ mu = (gamma + np.sqrt(gamma*gamma + 8*beta*(alpha - 1)))/(4*beta)
122
+ K1 = 2*np.sqrt(np.pi)
123
+ K1 *= np.power((np.sqrt(beta)*(alpha-1))/(2*beta*mu-gamma), alpha - 1)
124
+ K1 *= np.exp(-(alpha-1)+beta*mu*mu)
125
+
126
+ delta = beta + (gamma*gamma - gamma*np.sqrt(gamma*gamma + 8*beta*alpha))/(4*alpha)
127
+ K2 = np.power(beta/delta, 0.5*alpha)
128
+ K2 *= special.gamma(alpha/2.0)
129
+ K2 *= np.exp(gamma*gamma/(4*(beta-delta)))
130
+
131
+ if K2 > K1: # Use normal proposal
132
+ return self._MHN_sample_normal_proposal(alpha, beta, gamma, mu, rng)
133
+ else: # Use sqrt(gamma) proposal
134
+ return self._MHN_sample_gamma_proposal(alpha, beta, gamma, rng, delta)
135
+
136
+ def _MHN_sample_negative_gamma(self, alpha, beta, gamma, rng, m=None):
137
+ """
138
+ Sample from a modified half-normal distribution, assuming gamma is negative.
139
+ The argument 'm' is the matching point, see Algorithm 3 from [1] for details.
140
+ """
141
+ if gamma > 0.0:
142
+ raise ValueError("gamma needs to be negative")
143
+
144
+ if m is None:
145
+ if alpha <= 1.0:
146
+ m = 1.0
147
+ else:
148
+ m = "mode"
149
+
150
+ # The acceptance rate of this choice is at least 0.5*sqrt(2) approx 70.7 percent, according to Theorem 4 from [1].
151
+ if isinstance(m, str) and m.lower() == "mode":
152
+ m = (gamma + np.sqrt(gamma*gamma + 8*beta*alpha))/(4*beta)
153
+
154
+ while True:
155
+ val1 = (beta*m-gamma)/(2*beta*m-gamma)
156
+ val2 = m*(beta*m-gamma)
157
+ T = rng.gamma(alpha*val1, 1.0/val2)
158
+ X = m*np.power(T,val1)
159
+ U = rng.uniform()
160
+ if np.log(U) < val2*T-beta*X*X+gamma*X:
161
+ return X
162
+
163
+ def _MHN_sample(self, alpha, beta, gamma, m=None, rng=None):
164
+ """
165
+ Sample from a modified half-normal distribution using an algorithm from [1].
166
+ """
167
+ if rng == None:
168
+ rng = np.random
169
+
170
+ if gamma <= 0.0:
171
+ return self._MHN_sample_negative_gamma(alpha, beta, gamma, m=m, rng=rng)
172
+
173
+ if alpha > 1:
174
+ return self._MHN_sample_positive_gamma_1(alpha, beta, gamma, rng=rng)
175
+
176
+ return self._MHN_sample_gamma_proposal(alpha, beta, gamma, rng=rng)
177
+
178
+ def _sample(self, N, rng=None):
179
+ if hasattr(self.alpha, '__getitem__'):
180
+ return np.array([self._MHN_sample(self.alpha[i], self.beta[i], self.gamma[i], rng=rng) for i in range(N)])
181
+ else:
182
+ return np.array([self._MHN_sample(self.alpha, self.beta, self.gamma, rng=rng) for i in range(N)])
183
+
184
+
@@ -8,3 +8,7 @@ from ._rto import LinearRTONew, RegularizedLinearRTONew
8
8
  from ._cwmh import CWMHNew
9
9
  from ._laplace_approximation import UGLANew
10
10
  from ._hmc import NUTSNew
11
+ from ._gibbs import HybridGibbsNew
12
+ from ._conjugate import ConjugateNew
13
+ from ._conjugate_approx import ConjugateApproxNew
14
+ from ._direct import DirectNew
@@ -0,0 +1,77 @@
1
+ import numpy as np
2
+ from cuqi.experimental.mcmc import SamplerNew
3
+ from cuqi.distribution import Posterior, Gaussian, Gamma, GMRF
4
+ from cuqi.implicitprior import RegularizedGaussian, RegularizedGMRF
5
+
6
+ class ConjugateNew(SamplerNew):
7
+ """ Conjugate sampler
8
+
9
+ Sampler for sampling a posterior distribution where the likelihood and prior are conjugate.
10
+
11
+ Currently supported conjugate pairs are:
12
+ - (Gaussian, Gamma) where Gamma is defined on the precision parameter of the Gaussian
13
+ - (GMRF, Gamma) where Gamma is defined on the precision parameter of the GMRF
14
+ - (RegularizedGaussian, Gamma) with nonnegativity constraints only and Gamma is defined on the precision parameter of the RegularizedGaussian
15
+ - (RegularizedGMRF, Gamma) with nonnegativity constraints only and Gamma is defined on the precision parameter of the RegularizedGMRF
16
+
17
+ Gamma distribution must be univariate.
18
+
19
+ Currently, the sampler does NOT automatically check that the conjugate distributions are defined on the correct parameters.
20
+
21
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
22
+
23
+ For implicit regularized Gaussians see:
24
+
25
+ [1] Everink, Jasper M., Yiqiu Dong, and Martin S. Andersen. "Bayesian inference with projected densities." SIAM/ASA Journal on Uncertainty Quantification 11.3 (2023): 1025-1043.
26
+
27
+ """
28
+ def _initialize(self):
29
+ pass
30
+
31
+ def validate_target(self):
32
+
33
+ if not isinstance(self.target, Posterior):
34
+ raise TypeError("Conjugate sampler requires a target of type Posterior")
35
+
36
+ if not isinstance(self.target.likelihood.distribution, (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
37
+ raise ValueError("Conjugate sampler only works with a Gaussian-type likelihood function")
38
+
39
+ if not isinstance(self.target.prior, Gamma):
40
+ raise ValueError("Conjugate sampler only works with Gamma prior")
41
+
42
+ if not self.target.prior.dim == 1:
43
+ raise ValueError("Conjugate sampler only works with univariate Gamma prior")
44
+
45
+ if isinstance(self.target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)) and self.target.likelihood.distribution.preset not in ["nonnegativity"]:
46
+ raise ValueError("Conjugate sampler only works with implicit regularized Gaussian likelihood with nonnegativity constraints")
47
+
48
+ def step(self):
49
+ # Extract variables
50
+ b = self.target.likelihood.data #mu
51
+ m = self._calc_m_for_Gaussians(b) #n
52
+ Ax = self.target.likelihood.distribution.mean #x_i
53
+ L = self.target.likelihood.distribution(np.array([1])).sqrtprec #L
54
+ alpha = self.target.prior.shape #alpha
55
+ beta = self.target.prior.rate #beta
56
+
57
+ # Create Gamma distribution and sample
58
+ dist = Gamma(shape=m/2+alpha,rate=.5*np.linalg.norm(L@(Ax-b))**2+beta)
59
+
60
+ self.current_point = dist.sample()
61
+
62
+ def tune(self, skip_len, update_count):
63
+ pass
64
+
65
+ def _calc_m_for_Gaussians(self, b):
66
+ """ Helper method to calculate m parameter for Gaussian-Gamma conjugate pair.
67
+
68
+ Classically m defines the number of observations in the Gaussian likelihood function.
69
+
70
+ However, for implicit regularized Gaussians, m is the number of non-zero elements in the data vector b see [1].
71
+
72
+ """
73
+
74
+ if isinstance(self.target.likelihood.distribution, (Gaussian, GMRF)):
75
+ return len(b)
76
+ elif isinstance(self.target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)):
77
+ return np.count_nonzero(b)
@@ -0,0 +1,75 @@
1
+ import numpy as np
2
+ from cuqi.experimental.mcmc import SamplerNew
3
+ from cuqi.distribution import Posterior, LMRF, Gamma
4
+ import scipy as sp
5
+
6
+ class ConjugateApproxNew(SamplerNew):
7
+ """ Approximate Conjugate sampler
8
+
9
+ Sampler for sampling a posterior distribution where the likelihood and prior can be approximated
10
+ by a conjugate pair.
11
+
12
+ Currently supported pairs are:
13
+ - (LMRF, Gamma): Approximated by (Gaussian, Gamma) where Gamma is defined on the inverse of the scale parameter of the LMRF distribution.
14
+
15
+ Gamma distribution must be univariate.
16
+
17
+ LMRF likelihood must have zero mean.
18
+
19
+ Currently, the sampler does NOT automatically check that the conjugate distributions are defined on the correct parameters.
20
+
21
+
22
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
23
+
24
+ """
25
+
26
+ def _initialize(self):
27
+ pass
28
+
29
+ def validate_target(self):
30
+
31
+ if not isinstance(self.target, Posterior):
32
+ raise TypeError("Approximate conjugate sampler requires a target of type Posterior")
33
+
34
+ if not isinstance(self.target.likelihood.distribution, LMRF):
35
+ raise ValueError("Approximate conjugate sampler only works with LMRF likelihood function")
36
+
37
+ if not isinstance(self.target.prior, Gamma):
38
+ raise ValueError("Approximate conjugate sampler only works with Gamma prior")
39
+
40
+ if not self.target.prior.dim == 1:
41
+ raise ValueError("Approximate conjugate sampler only works with univariate Gamma prior")
42
+
43
+ if np.sum(self.target.likelihood.distribution.location) != 0:
44
+ raise ValueError("Approximate conjugate sampler only works with zero mean LMRF likelihood")
45
+
46
+ def step(self):
47
+ # Extract variables
48
+ # Here we approximate the LMRF with a Gaussian
49
+
50
+ # Extract diff_op from target likelihood
51
+ D = self.target.likelihood.distribution._diff_op
52
+ n = D.shape[0]
53
+
54
+ # Gaussian approximation of LMRF prior as function of x_k
55
+ # See Uribe et al. (2022) for details
56
+ # Current has a zero mean assumption on likelihood! TODO
57
+ beta=1e-5
58
+ def Lk_fun(x_k):
59
+ dd = 1/np.sqrt((D @ x_k)**2 + beta*np.ones(n))
60
+ W = sp.sparse.diags(dd)
61
+ return W.sqrt() @ D
62
+
63
+ x = self.target.likelihood.data #x
64
+ d = len(x) #d
65
+ Lx = Lk_fun(x)@x #Lx
66
+ alpha = self.target.prior.shape #alpha
67
+ beta = self.target.prior.rate #beta
68
+
69
+ # Create Gamma distribution and sample
70
+ dist = Gamma(shape=d+alpha, rate=np.linalg.norm(Lx)**2+beta)
71
+
72
+ self.current_point = dist.sample()
73
+
74
+ def tune(self, skip_len, update_count):
75
+ pass
@@ -0,0 +1,28 @@
1
+ from cuqi.experimental.mcmc import SamplerNew
2
+
3
+ class DirectNew(SamplerNew):
4
+ """ Direct sampler
5
+
6
+ This sampler is used to sample from a target distribution directly. It simply calls the sample method of the target object to generate a sample.
7
+
8
+ Parameters
9
+ ----------
10
+ target : Distribution
11
+ The target distribution to sample from.
12
+
13
+ """
14
+
15
+ def _initialize(self):
16
+ pass
17
+
18
+ def validate_target(self):
19
+ try:
20
+ self.target.sample()
21
+ except:
22
+ raise TypeError("Direct sampler requires a target with a sample method.")
23
+
24
+ def step(self):
25
+ self.current_point = self.target.sample()
26
+
27
+ def tune(self, skip_len, update_count):
28
+ pass
@@ -0,0 +1,267 @@
1
+ from cuqi.distribution import JointDistribution
2
+ from cuqi.experimental.mcmc import SamplerNew
3
+ from cuqi.samples import Samples
4
+ from typing import Dict
5
+ import numpy as np
6
+ import warnings
7
+
8
+ try:
9
+ from progressbar import progressbar
10
+ except ImportError:
11
+ def progressbar(iterable, **kwargs):
12
+ warnings.warn("Module mcmc: Progressbar not found. Install progressbar2 to get sampling progress.")
13
+ return iterable
14
+
15
+ # Not subclassed from SamplerNew as Gibbs handles multiple samplers and samples multiple parameters
16
+ # Similar approach as for JointDistribution
17
+ class HybridGibbsNew:
18
+ """
19
+ Hybrid Gibbs sampler for sampling a joint distribution.
20
+
21
+ Gibbs sampling samples the variables of the distribution sequentially,
22
+ one variable at a time. When a variable represents a random vector, the
23
+ whole vector is sampled simultaneously.
24
+
25
+ The sampling of each variable is done by sampling from the conditional
26
+ distribution of that variable given the values of the other variables.
27
+ This is often a very efficient way of sampling from a joint distribution
28
+ if the conditional distributions are easy to sample from.
29
+
30
+ Hybrid Gibbs sampler is a generalization of the Gibbs sampler where the
31
+ conditional distributions are sampled using different MCMC samplers.
32
+
33
+ When the conditionals are sampled exactly, the samples from the Gibbs
34
+ sampler converge to the joint distribution. See e.g.
35
+ Gelman et al. "Bayesian Data Analysis" (2014), Third Edition
36
+ for more details.
37
+
38
+ In each Gibbs step, the corresponding sampler has the initial_point
39
+ and initial_scale (if applicable) set to the value of the previous step
40
+ and the sampler is reinitialized. This means that the sampling is not
41
+ fully stateful at this point. This means samplers like NUTS will lose
42
+ their internal state between Gibbs steps.
43
+
44
+ Parameters
45
+ ----------
46
+ target : cuqi.distribution.JointDistribution
47
+ Target distribution to sample from.
48
+
49
+ sampling_strategy : dict
50
+ Dictionary of sampling strategies for each variable.
51
+ Keys are variable names.
52
+ Values are sampler objects.
53
+
54
+ num_sampling_steps : dict, *optional*
55
+ Dictionary of number of sampling steps for each variable.
56
+ The sampling steps are defined as the number of times the sampler
57
+ will call its step method in each Gibbs step.
58
+ Default is 1 for all variables.
59
+
60
+ Example
61
+ -------
62
+ .. code-block:: python
63
+
64
+ import cuqi
65
+ import numpy as np
66
+
67
+ # Model and data
68
+ A, y_obs, probinfo = cuqi.testproblem.Deconvolution1D(phantom='square').get_components()
69
+ n = A.domain_dim
70
+
71
+ # Define distributions
72
+ d = cuqi.distribution.Gamma(1, 1e-4)
73
+ l = cuqi.distribution.Gamma(1, 1e-4)
74
+ x = cuqi.distribution.GMRF(np.zeros(n), lambda d: d)
75
+ y = cuqi.distribution.Gaussian(A, lambda l: 1/l)
76
+
77
+ # Combine into a joint distribution and create posterior
78
+ joint = cuqi.distribution.JointDistribution(d, l, x, y)
79
+ posterior = joint(y=y_obs)
80
+
81
+ # Define sampling strategy
82
+ sampling_strategy = {
83
+ 'x': cuqi.experimental.mcmc.LinearRTONew(maxit=15),
84
+ 'd': cuqi.experimental.mcmc.ConjugateNew(),
85
+ 'l': cuqi.experimental.mcmc.ConjugateNew(),
86
+ }
87
+
88
+ # Define Gibbs sampler
89
+ sampler = cuqi.experimental.mcmc.HybridGibbsNew(posterior, sampling_strategy)
90
+
91
+ # Run sampler
92
+ samples = sampler.sample(Ns=1000, Nb=200)
93
+
94
+ # Plot results
95
+ samples['x'].plot_ci(exact=probinfo.exactSolution)
96
+ samples['d'].plot_trace(figsize=(8,2))
97
+ samples['l'].plot_trace(figsize=(8,2))
98
+
99
+ """
100
+
101
+ def __init__(self, target: JointDistribution, sampling_strategy: Dict[str, SamplerNew], num_sampling_steps: Dict[str, int] = None):
102
+
103
+ # Store target and allow conditioning to reduce to a single density
104
+ self.target = target() # Create a copy of target distribution (to avoid modifying the original)
105
+
106
+ # Store sampler instances (again as a copy to avoid modifying the original)
107
+ self.samplers = sampling_strategy.copy()
108
+
109
+ # Store number of sampling steps for each parameter
110
+ self.num_sampling_steps = num_sampling_steps
111
+
112
+ # Store parameter names
113
+ self.par_names = self.target.get_parameter_names()
114
+
115
+ # Initialize sampler (after target is set)
116
+ self._initialize()
117
+
118
+ def _initialize(self):
119
+ """ Initialize sampler """
120
+
121
+ # Initial points
122
+ self.current_samples = self._get_initial_points()
123
+
124
+ # Initialize sampling steps
125
+ self._initialize_num_sampling_steps()
126
+
127
+ # Allocate samples
128
+ self._allocate_samples()
129
+
130
+ # Set targets
131
+ self._set_targets()
132
+
133
+ # Initialize the samplers
134
+ self._initialize_samplers()
135
+
136
+ # Run over pre-sample methods for samplers that have it
137
+ # TODO. Some samplers (NUTS) seem to require to run _pre_warmup before _pre_sample
138
+ # This is not ideal and should be fixed in the future
139
+ for sampler in self.samplers.values():
140
+ self._pre_warmup_and_pre_sample_sampler(sampler)
141
+
142
+ # Validate all targets for samplers.
143
+ self.validate_targets()
144
+
145
+ # ------------ Public methods ------------
146
+ def validate_targets(self):
147
+ """ Validate each of the conditional targets used in the Gibbs steps """
148
+ if not isinstance(self.target, JointDistribution):
149
+ raise ValueError('Target distribution must be a JointDistribution.')
150
+ for sampler in self.samplers.values():
151
+ sampler.validate_target()
152
+
153
+ def sample(self, Ns) -> 'HybridGibbsNew':
154
+ """ Sample from the joint distribution using Gibbs sampling """
155
+ for _ in progressbar(range(Ns)):
156
+ self.step()
157
+ self._store_samples()
158
+
159
+ def warmup(self, Nb) -> 'HybridGibbsNew':
160
+ """ Warmup (tune) the Gibbs sampler """
161
+ for idx in progressbar(range(Nb)):
162
+ self.step()
163
+ self.tune(idx)
164
+ self._store_samples()
165
+
166
+ def get_samples(self) -> Dict[str, Samples]:
167
+ samples_object = {}
168
+ for par_name in self.par_names:
169
+ samples_array = np.array(self.samples[par_name]).T
170
+ samples_object[par_name] = Samples(samples_array, self.target.get_density(par_name).geometry)
171
+ return samples_object
172
+
173
+ def step(self):
174
+ """ Sequentially go through all parameters and sample them conditionally on each other """
175
+
176
+ # Sample from each conditional distribution
177
+ for par_name in self.par_names:
178
+
179
+ # Set target for current parameter
180
+ self._set_target(par_name)
181
+
182
+ # Get sampler
183
+ sampler = self.samplers[par_name]
184
+
185
+ # Set initial parameters using current point and scale (subset of state)
186
+ # This does not store the full state from e.g. NUTS sampler
187
+ # But works on samplers like MH, PCN, ULA, MALA, LinearRTO, UGLA, CWMH
188
+ # that only use initial_point and initial_scale
189
+ sampler.initial_point = self.current_samples[par_name]
190
+ if hasattr(sampler, 'initial_scale'): sampler.initial_scale = sampler.scale
191
+
192
+ # Reinitialize sampler
193
+ # This makes the sampler lose all of its state.
194
+ # This is only OK because we set the initial values above from the previous state
195
+ sampler.reinitialize()
196
+
197
+ # Run pre_warmup and pre_sample methods for sampler
198
+ # TODO. Some samplers (NUTS) seem to require to run _pre_warmup before _pre_sample
199
+ self._pre_warmup_and_pre_sample_sampler(sampler)
200
+
201
+ # Take MCMC steps
202
+ for _ in range(self.num_sampling_steps[par_name]):
203
+ sampler.step()
204
+
205
+ # Extract samples (Ensure even 1-dimensional samples are 1D arrays)
206
+ self.current_samples[par_name] = sampler.current_point.reshape(-1)
207
+
208
+ def tune(self, idx):
209
+ """ Tune each of the samplers """
210
+ for par_name in self.par_names:
211
+ self.samplers[par_name].tune(skip_len=1, update_count=idx)
212
+
213
+ # ------------ Private methods ------------
214
+ def _initialize_samplers(self):
215
+ """ Initialize samplers """
216
+ for sampler in self.samplers.values():
217
+ sampler.initialize()
218
+
219
+ def _initialize_num_sampling_steps(self):
220
+ """ Initialize the number of sampling steps for each sampler. Defaults to 1 if not set by user """
221
+
222
+ if self.num_sampling_steps is None:
223
+ self.num_sampling_steps = {par_name: 1 for par_name in self.par_names}
224
+
225
+ for par_name in self.par_names:
226
+ if par_name not in self.num_sampling_steps:
227
+ self.num_sampling_steps[par_name] = 1
228
+
229
+
230
+ def _pre_warmup_and_pre_sample_sampler(self, sampler):
231
+ if hasattr(sampler, '_pre_warmup'): sampler._pre_warmup()
232
+ if hasattr(sampler, '_pre_sample'): sampler._pre_sample()
233
+
234
+ def _set_targets(self):
235
+ """ Set targets for all samplers using the current samples """
236
+ par_names = self.par_names
237
+ for par_name in par_names:
238
+ self._set_target(par_name)
239
+
240
+ def _set_target(self, par_name):
241
+ """ Set target conditional distribution for a single parameter using the current samples """
242
+ # Get all other conditional parameters other than the current parameter and update the target
243
+ # This defines - from a joint p(x,y,z) - the conditional distribution p(x|y,z) or p(y|x,z) or p(z|x,y)
244
+ conditional_params = {par_name_: self.current_samples[par_name_] for par_name_ in self.par_names if par_name_ != par_name}
245
+ self.samplers[par_name].target = self.target(**conditional_params)
246
+
247
+ def _allocate_samples(self):
248
+ """ Allocate memory for samples """
249
+ samples = {}
250
+ for par_name in self.par_names:
251
+ samples[par_name] = []
252
+ self.samples = samples
253
+
254
+ def _get_initial_points(self):
255
+ """ Get initial points for each parameter """
256
+ initial_points = {}
257
+ for par_name in self.par_names:
258
+ if hasattr(self.target.get_density(par_name), 'init_point'):
259
+ initial_points[par_name] = self.target.get_density(par_name).init_point
260
+ else:
261
+ initial_points[par_name] = np.ones(self.target.get_density(par_name).dim)
262
+ return initial_points
263
+
264
+ def _store_samples(self):
265
+ """ Store current samples at index i of samples dict """
266
+ for par_name in self.par_names:
267
+ self.samples[par_name].append(self.current_samples[par_name])
@@ -49,7 +49,7 @@ class UGLANew(SamplerNew):
49
49
  where `sample` is the current sample and `sample_index` is the index of the sample.
50
50
  An example is shown in demos/demo31_callback.py.
51
51
  """
52
- def __init__(self, target, initial_point=None, maxit=50, tol=1e-4, beta=1e-5, **kwargs):
52
+ def __init__(self, target=None, initial_point=None, maxit=50, tol=1e-4, beta=1e-5, **kwargs):
53
53
 
54
54
  super().__init__(target=target, initial_point=initial_point, **kwargs)
55
55
 
@@ -24,7 +24,7 @@ class MHNew(ProposalBasedSamplerNew):
24
24
 
25
25
  _STATE_KEYS = ProposalBasedSamplerNew._STATE_KEYS.union({'scale', '_scale_temp'})
26
26
 
27
- def __init__(self, target, proposal=None, scale=1, **kwargs):
27
+ def __init__(self, target=None, proposal=None, scale=1, **kwargs):
28
28
  super().__init__(target, proposal=proposal, scale=scale, **kwargs)
29
29
 
30
30
  def _initialize(self):
@@ -189,7 +189,7 @@ class RegularizedLinearRTONew(LinearRTONew):
189
189
  An example is shown in demos/demo31_callback.py.
190
190
 
191
191
  """
192
- def __init__(self, target, initial_point=None, maxit=100, stepsize="automatic", abstol=1e-10, adaptive=True, **kwargs):
192
+ def __init__(self, target=None, initial_point=None, maxit=100, stepsize="automatic", abstol=1e-10, adaptive=True, **kwargs):
193
193
 
194
194
  super().__init__(target=target, initial_point=initial_point, **kwargs)
195
195
 
@@ -43,7 +43,7 @@ class SamplerNew(ABC):
43
43
  _HISTORY_KEYS = {'_samples', '_acc'}
44
44
  """ Set of keys for the history dictionary. """
45
45
 
46
- def __init__(self, target: cuqi.density.Density, initial_point=None, callback=None):
46
+ def __init__(self, target:cuqi.density.Density=None, initial_point=None, callback=None):
47
47
  """ Initializer for abstract base class for all samplers.
48
48
 
49
49
  Any subclassing samplers should simply store input parameters as part of the __init__ method.
@@ -102,8 +102,18 @@ class SamplerNew(ABC):
102
102
  pass
103
103
 
104
104
  @abstractmethod
105
- def tune(self):
106
- """ Tune the parameters of the sampler. This method is called after each step of the warmup phase. """
105
+ def tune(self, skip_len, update_count):
106
+ """ Tune the parameters of the sampler. This method is called after each step of the warmup phase.
107
+
108
+ Parameters
109
+ ----------
110
+ skip_len : int
111
+ Defines the number of steps in between tuning (i.e. the tuning interval).
112
+
113
+ update_count : int
114
+ The number of times tuning has been performed. Can be used for internal bookkeeping.
115
+
116
+ """
107
117
  pass
108
118
 
109
119
  @abstractmethod
@@ -385,6 +395,7 @@ class SamplerNew(ABC):
385
395
  """ Return a string representation of the sampler. """
386
396
  if self.target is None:
387
397
  return f"Sampler: {self.__class__.__name__} \n Target: None"
398
+ self._ensure_initialized()
388
399
  state = self.get_state()
389
400
  msg = f" Sampler: \n\t {self.__class__.__name__} \n Target: \n \t {self.target} \n Current state: \n"
390
401
  # Sort keys alphabetically
@@ -758,3 +758,25 @@ def test_Gaussian_from_linear_operator_sqrtprec():
758
758
  y_from_dense = cuqi.distribution.Gaussian(mean = np.zeros(N), sqrtprec = sqrtprec.todense())
759
759
 
760
760
  assert np.allclose(y_from_dense.logpdf(np.ones(N)), y_from_sparse.logpdf(np.ones(N)))
761
+
762
+ @pytest.mark.parametrize("alpha, beta, gamma, expected",[
763
+ (1.0, 2.0, 3.0, [0.77974597, 0.77361298, 0.5422682, 0.81054637, 1.35205349]),
764
+ (128.0, 3.0, -4.0, [1.02250649, 1.05735833, 0.98794758, 1.04533337, 1.00500678])
765
+ ])
766
+ def test_MHN_sample_regression(alpha, beta, gamma, expected):
767
+ rng = np.random.RandomState(0)
768
+ dist = cuqi.distribution.ModifiedHalfNormal(alpha, beta, gamma)
769
+ samples = dist.sample(5,rng=rng).samples
770
+ assert np.allclose( samples, np.array(expected))
771
+
772
+ @pytest.mark.parametrize("alpha, beta, gamma, expected_logpdf, expected_gradient",[
773
+ (1.0, 2.0, 3.0, -40.0, [[-1.], [-3.], [-5.], [-7.], [-9.]]),
774
+ (64.0, 3.0, -4.0, -2258.388020204731, [[-1.], [-160.5], [-299.], [-432.25], [-563.4 ]])
775
+ ])
776
+ def test_MHN_regression(alpha, beta, gamma, expected_logpdf, expected_gradient):
777
+ rng = np.random.RandomState(0)
778
+ dist = cuqi.distribution.ModifiedHalfNormal(alpha, beta, gamma)
779
+ logpdf = dist.logpdf(np.array([1.0, 2.0, 3.0, 4.0, 5.0]))
780
+ gradient = dist._gradient(np.array([1.0, 2.0, 3.0, 4.0, 5.0]))
781
+ assert np.allclose( logpdf, np.array(expected_logpdf))
782
+ assert np.allclose( gradient, np.array(expected_gradient))
@@ -17,7 +17,7 @@ ignore_list = [
17
17
 
18
18
  # Define cases to skip (these are TODO)
19
19
  skip_logd = [
20
- cuqi.distribution.Gamma # Missing force_ndarray
20
+ cuqi.distribution.Gamma, # Missing force_ndarray
21
21
  ]
22
22
  skip_sample = [
23
23
  cuqi.distribution.Gamma, # Missing force_ndarray
@@ -67,7 +67,8 @@ def test_multivariate_scalar_vars_logd(dist):
67
67
  val = np.random.randn(5)
68
68
  assert np.allclose(
69
69
  dist_from_vec.logd(val),
70
- dist_from_dim.logd(val)
70
+ dist_from_dim.logd(val),
71
+ equal_nan=True
71
72
  )
72
73
 
73
74