CUQIpy 1.2.0.post0.dev42__tar.gz → 1.2.0.post0.dev109__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (119) hide show
  1. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/CUQIpy.egg-info/PKG-INFO +1 -1
  2. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/CUQIpy.egg-info/SOURCES.txt +1 -0
  3. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/PKG-INFO +1 -1
  4. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/_version.py +3 -3
  5. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/__init__.py +1 -0
  6. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_normal.py +34 -0
  7. cuqipy-1.2.0.post0.dev109/cuqi/distribution/_truncated_normal.py +129 -0
  8. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_rto.py +2 -2
  9. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_rto.py +2 -2
  10. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/solver/__init__.py +1 -0
  11. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/solver/_solver.py +169 -4
  12. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_distribution.py +67 -0
  13. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_solver.py +47 -3
  14. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/CUQIpy.egg-info/dependency_links.txt +0 -0
  15. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/CUQIpy.egg-info/requires.txt +0 -0
  16. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/CUQIpy.egg-info/top_level.txt +0 -0
  17. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/LICENSE +0 -0
  18. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/README.md +0 -0
  19. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/__init__.py +0 -0
  20. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/_messages.py +0 -0
  21. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/array/__init__.py +0 -0
  22. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/array/_array.py +0 -0
  23. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/config.py +0 -0
  24. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/data/__init__.py +0 -0
  25. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/data/_data.py +0 -0
  26. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/data/astronaut.npz +0 -0
  27. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/data/camera.npz +0 -0
  28. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/data/cat.npz +0 -0
  29. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/data/cookie.png +0 -0
  30. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/data/satellite.mat +0 -0
  31. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/density/__init__.py +0 -0
  32. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/density/_density.py +0 -0
  33. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/diagnostics.py +0 -0
  34. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_beta.py +0 -0
  35. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_cauchy.py +0 -0
  36. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_cmrf.py +0 -0
  37. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_custom.py +0 -0
  38. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_distribution.py +0 -0
  39. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_gamma.py +0 -0
  40. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_gaussian.py +0 -0
  41. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_gmrf.py +0 -0
  42. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_inverse_gamma.py +0 -0
  43. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_joint_distribution.py +0 -0
  44. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_laplace.py +0 -0
  45. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_lmrf.py +0 -0
  46. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_lognormal.py +0 -0
  47. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_modifiedhalfnormal.py +0 -0
  48. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_posterior.py +0 -0
  49. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_smoothed_laplace.py +0 -0
  50. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/distribution/_uniform.py +0 -0
  51. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/__init__.py +0 -0
  52. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/__init__.py +0 -0
  53. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_conjugate.py +0 -0
  54. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_conjugate_approx.py +0 -0
  55. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_cwmh.py +0 -0
  56. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_direct.py +0 -0
  57. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_gibbs.py +0 -0
  58. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_hmc.py +0 -0
  59. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_langevin_algorithm.py +0 -0
  60. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_laplace_approximation.py +0 -0
  61. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_mh.py +0 -0
  62. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_pcn.py +0 -0
  63. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_sampler.py +0 -0
  64. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/experimental/mcmc/_utilities.py +0 -0
  65. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/geometry/__init__.py +0 -0
  66. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/geometry/_geometry.py +0 -0
  67. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/implicitprior/__init__.py +0 -0
  68. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/implicitprior/_regularizedGMRF.py +0 -0
  69. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/implicitprior/_regularizedGaussian.py +0 -0
  70. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/implicitprior/_regularizedUnboundedUniform.py +0 -0
  71. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/likelihood/__init__.py +0 -0
  72. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/likelihood/_likelihood.py +0 -0
  73. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/model/__init__.py +0 -0
  74. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/model/_model.py +0 -0
  75. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/operator/__init__.py +0 -0
  76. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/operator/_operator.py +0 -0
  77. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/pde/__init__.py +0 -0
  78. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/pde/_pde.py +0 -0
  79. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/problem/__init__.py +0 -0
  80. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/problem/_problem.py +0 -0
  81. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/__init__.py +0 -0
  82. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_conjugate.py +0 -0
  83. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_conjugate_approx.py +0 -0
  84. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_cwmh.py +0 -0
  85. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_gibbs.py +0 -0
  86. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_hmc.py +0 -0
  87. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_langevin_algorithm.py +0 -0
  88. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_laplace_approximation.py +0 -0
  89. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_mh.py +0 -0
  90. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_pcn.py +0 -0
  91. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/sampler/_sampler.py +0 -0
  92. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/samples/__init__.py +0 -0
  93. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/samples/_samples.py +0 -0
  94. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/testproblem/__init__.py +0 -0
  95. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/testproblem/_testproblem.py +0 -0
  96. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/utilities/__init__.py +0 -0
  97. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/utilities/_get_python_variable_name.py +0 -0
  98. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/cuqi/utilities/_utilities.py +0 -0
  99. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/pyproject.toml +0 -0
  100. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/requirements.txt +0 -0
  101. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/setup.cfg +0 -0
  102. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/setup.py +0 -0
  103. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_MRFs.py +0 -0
  104. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_abstract_distribution_density.py +0 -0
  105. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_bayesian_inversion.py +0 -0
  106. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_density.py +0 -0
  107. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_distributions_shape.py +0 -0
  108. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_geometry.py +0 -0
  109. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_implicit_priors.py +0 -0
  110. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_joint_distribution.py +0 -0
  111. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_likelihood.py +0 -0
  112. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_model.py +0 -0
  113. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_pde.py +0 -0
  114. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_posterior.py +0 -0
  115. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_problem.py +0 -0
  116. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_sampler.py +0 -0
  117. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_samples.py +0 -0
  118. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_testproblem.py +0 -0
  119. {cuqipy-1.2.0.post0.dev42 → cuqipy-1.2.0.post0.dev109}/tests/test_utilities.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CUQIpy
3
- Version: 1.2.0.post0.dev42
3
+ Version: 1.2.0.post0.dev109
4
4
  Summary: Computational Uncertainty Quantification for Inverse problems in Python
5
5
  Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
6
6
  License: Apache License
@@ -42,6 +42,7 @@ cuqi/distribution/_modifiedhalfnormal.py
42
42
  cuqi/distribution/_normal.py
43
43
  cuqi/distribution/_posterior.py
44
44
  cuqi/distribution/_smoothed_laplace.py
45
+ cuqi/distribution/_truncated_normal.py
45
46
  cuqi/distribution/_uniform.py
46
47
  cuqi/experimental/__init__.py
47
48
  cuqi/experimental/mcmc/__init__.py
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CUQIpy
3
- Version: 1.2.0.post0.dev42
3
+ Version: 1.2.0.post0.dev109
4
4
  Summary: Computational Uncertainty Quantification for Inverse problems in Python
5
5
  Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
6
6
  License: Apache License
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2024-10-11T13:31:57+0300",
11
+ "date": "2024-11-08T11:08:22+0100",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "85645ceb5eccff9ab309005d62defa609b9f53b2",
15
- "version": "1.2.0.post0.dev42"
14
+ "full-revisionid": "17570092caf729244ade9c6d647cfa1d2b9ef5f0",
15
+ "version": "1.2.0.post0.dev109"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -12,6 +12,7 @@ from ._laplace import Laplace
12
12
  from ._smoothed_laplace import SmoothedLaplace
13
13
  from ._lognormal import Lognormal
14
14
  from ._normal import Normal
15
+ from ._truncated_normal import TruncatedNormal
15
16
  from ._posterior import Posterior
16
17
  from ._uniform import Uniform
17
18
  from ._custom import UserDefinedDistribution, DistributionGallery
@@ -1,5 +1,8 @@
1
1
  import numpy as np
2
+ import numbers
2
3
  from scipy.special import erf
4
+ from cuqi.geometry import _get_identity_geometries
5
+ from cuqi.utilities import force_ndarray
3
6
  from cuqi.distribution import Distribution
4
7
 
5
8
  class Normal(Distribution):
@@ -27,6 +30,24 @@ class Normal(Distribution):
27
30
  self.mean = mean
28
31
  self.std = std
29
32
 
33
+ @property
34
+ def mean(self):
35
+ """ Mean of the distribution """
36
+ return self._mean
37
+
38
+ @mean.setter
39
+ def mean(self, value):
40
+ self._mean = force_ndarray(value, flatten=True)
41
+
42
+ @property
43
+ def std(self):
44
+ """ Std of the distribution """
45
+ return self._std
46
+
47
+ @std.setter
48
+ def std(self, value):
49
+ self._std = force_ndarray(value, flatten=True)
50
+
30
51
  def pdf(self, x):
31
52
  return np.prod(1/(self.std*np.sqrt(2*np.pi))*np.exp(-0.5*((x-self.mean)/self.std)**2))
32
53
 
@@ -36,6 +57,19 @@ class Normal(Distribution):
36
57
  def cdf(self, x):
37
58
  return np.prod(0.5*(1 + erf((x-self.mean)/(self.std*np.sqrt(2)))))
38
59
 
60
+ def _gradient(self, val, *args, **kwargs):
61
+ if not type(self.geometry) in _get_identity_geometries():
62
+ raise NotImplementedError("Gradient not implemented for distribution {} with geometry {}".format(self,self.geometry))
63
+ if not callable(self.mean):
64
+ return -(val-self.mean)/(self.std**2)
65
+ elif hasattr(self.mean, "gradient"): # for likelihood
66
+ model = self.mean
67
+ dev = val - model.forward(*args, **kwargs)
68
+ print(dev)
69
+ return model.gradient(1.0/(np.array(self.std)) @ dev, *args, **kwargs)
70
+ else:
71
+ raise NotImplementedError("Gradient not implemented for distribution {} with location {}".format(self,self.mean))
72
+
39
73
  def _sample(self,N=1, rng=None):
40
74
 
41
75
  """
@@ -0,0 +1,129 @@
1
+ import numpy as np
2
+ from scipy.special import erf
3
+ from cuqi.utilities import force_ndarray
4
+ from cuqi.distribution import Distribution
5
+ from cuqi.distribution import Normal
6
+
7
+ class TruncatedNormal(Distribution):
8
+ """
9
+ Truncated Normal probability distribution.
10
+
11
+ Generates instance of cuqi.distribution.TruncatedNormal.
12
+ It allows the user to specify upper and lower bounds on random variables
13
+ represented by a Normal distribution. This distribution is suitable for a
14
+ small dimension setup (e.g. `dim`=3 or 4). Using TruncatedNormal
15
+ Distribution with a larger dimension can lead to a high rejection rate when
16
+ used within MCMC samplers.
17
+
18
+ The variables of this distribution are iid.
19
+
20
+
21
+ Parameters
22
+ ------------
23
+ mean : float or array_like of floats
24
+ mean of distribution
25
+ std : float or array_like of floats
26
+ standard deviation
27
+ low : float or array_like of floats
28
+ lower bound of the distribution
29
+ high : float or array_like of floats
30
+ upper bound of the distribution
31
+
32
+ Example
33
+ -----------
34
+ .. code-block:: python
35
+
36
+ #Generate Normal with mean 0, standard deviation 1 and bounds [-2,2]
37
+ p = cuqi.distribution.TruncatedNormal(mean=0, std=1, low=-2, high=2)
38
+ samples = p.sample(5000)
39
+ """
40
+ def __init__(self, mean=None, std=None, low=-np.Inf, high=np.Inf, is_symmetric=False, **kwargs):
41
+ # Init from abstract distribution class
42
+ super().__init__(is_symmetric=is_symmetric, **kwargs)
43
+
44
+ # Init specific to this distribution
45
+ self.mean = mean
46
+ self.std = std
47
+ self.low = low
48
+ self.high = high
49
+
50
+ # Init underlying normal distribution
51
+ self._normal = Normal(self.mean, self.std, is_symmetric=True, **kwargs)
52
+
53
+ @property
54
+ def mean(self):
55
+ """ Mean of the distribution """
56
+ return self._mean
57
+
58
+ @mean.setter
59
+ def mean(self, value):
60
+ self._mean = force_ndarray(value, flatten=True)
61
+ if hasattr(self, '_normal'):
62
+ self._normal.mean = self._mean
63
+
64
+ @property
65
+ def std(self):
66
+ """ Std of the distribution """
67
+ return self._std
68
+
69
+ @std.setter
70
+ def std(self, value):
71
+ self._std = force_ndarray(value, flatten=True)
72
+ if hasattr(self, '_normal'):
73
+ self._normal.std = self._std
74
+
75
+ @property
76
+ def low(self):
77
+ """ Lower bound of the distribution """
78
+ return self._low
79
+
80
+ @low.setter
81
+ def low(self, value):
82
+ self._low = force_ndarray(value, flatten=True)
83
+
84
+ @property
85
+ def high(self):
86
+ """ Higher bound of the distribution """
87
+ return self._high
88
+
89
+ @high.setter
90
+ def high(self, value):
91
+ self._high = force_ndarray(value, flatten=True)
92
+
93
+ def logpdf(self, x):
94
+ """
95
+ Computes the unnormalized logpdf at the given values of x.
96
+ """
97
+ # the unnormalized logpdf
98
+ # check if x falls in the range between np.array a and b
99
+ if np.any(x < self.low) or np.any(x > self.high):
100
+ return -np.Inf
101
+ else:
102
+ return self._normal.logpdf(x)
103
+
104
+ def _gradient(self, x, *args, **kwargs):
105
+ """
106
+ Computes the gradient of the unnormalized logpdf at the given values of x.
107
+ """
108
+ # check if x falls in the range between np.array a and b
109
+ if np.any(x < self.low) or np.any(x > self.high):
110
+ return np.NaN*np.ones_like(x)
111
+ else:
112
+ return self._normal.gradient(x, *args, **kwargs)
113
+
114
+ def _sample(self, N=1, rng=None):
115
+ """
116
+ Generates random samples from the distribution.
117
+ """
118
+ max_iter = 1e9 # maximum number of trials to avoid infinite loop
119
+ samples = []
120
+ for i in range(int(max_iter)):
121
+ if len(samples) == N:
122
+ break
123
+ sample = self._normal.sample(1,rng)
124
+ if np.all(sample >= self.low) and np.all(sample <= self.high):
125
+ samples.append(sample)
126
+ # raise a error if the number of iterations exceeds max_iter
127
+ if i == max_iter-1:
128
+ raise RuntimeError("Failed to generate {} samples within {} iterations".format(N, max_iter))
129
+ return np.array(samples).T.reshape(-1,N)
@@ -235,8 +235,8 @@ class RegularizedLinearRTO(LinearRTO):
235
235
 
236
236
  def step(self):
237
237
  y = self.b_tild + np.random.randn(len(self.b_tild))
238
- sim = FISTA(self.M, y, self.current_point, self.proximal,
239
- maxit = self.maxit, stepsize = self._stepsize, abstol = self.abstol, adaptive = self.adaptive)
238
+ sim = FISTA(self.M, y, self.proximal,
239
+ self.current_point, maxit = self.maxit, stepsize = self._stepsize, abstol = self.abstol, adaptive = self.adaptive)
240
240
  self.current_point, _ = sim.solve()
241
241
  acc = 1
242
242
  return acc
@@ -267,8 +267,8 @@ class RegularizedLinearRTO(LinearRTO):
267
267
  samples[:, 0] = self.x0
268
268
  for s in range(Ns-1):
269
269
  y = self.b_tild + np.random.randn(len(self.b_tild))
270
- sim = FISTA(self.M, y, samples[:, s], self.proximal,
271
- maxit = self.maxit, stepsize = _stepsize, abstol = self.abstol, adaptive = self.adaptive)
270
+ sim = FISTA(self.M, y, self.proximal,
271
+ samples[:, s], maxit = self.maxit, stepsize = _stepsize, abstol = self.abstol, adaptive = self.adaptive)
272
272
  samples[:, s+1], _ = sim.solve()
273
273
 
274
274
  self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
@@ -7,6 +7,7 @@ from ._solver import (
7
7
  LM,
8
8
  PDHG,
9
9
  FISTA,
10
+ ADMM,
10
11
  ProjectNonnegative,
11
12
  ProjectBox,
12
13
  ProximalL1
@@ -584,8 +584,8 @@ class FISTA(object):
584
584
  ----------
585
585
  A : ndarray or callable f(x,*args).
586
586
  b : ndarray.
587
- x0 : ndarray. Initial guess.
588
587
  proximal : callable f(x, gamma) for proximal mapping.
588
+ x0 : ndarray. Initial guess.
589
589
  maxit : The maximum number of iterations.
590
590
  stepsize : The stepsize of the gradient step.
591
591
  abstol : The numerical tolerance for convergence checks.
@@ -606,11 +606,11 @@ class FISTA(object):
606
606
  b = rng.standard_normal(m)
607
607
  stepsize = 0.99/(sp.linalg.interpolative.estimate_spectral_norm(A)**2)
608
608
  x0 = np.zeros(n)
609
- fista = FISTA(A, b, x0, proximal = ProximalL1, stepsize = stepsize, maxit = 100, abstol=1e-12, adaptive = True)
609
+ fista = FISTA(A, b, proximal = ProximalL1, x0, stepsize = stepsize, maxit = 100, abstol=1e-12, adaptive = True)
610
610
  sol, _ = fista.solve()
611
611
 
612
612
  """
613
- def __init__(self, A, b, x0, proximal, maxit=100, stepsize=1e0, abstol=1e-14, adaptive = True):
613
+ def __init__(self, A, b, proximal, x0, maxit=100, stepsize=1e0, abstol=1e-14, adaptive = True):
614
614
 
615
615
  self.A = A
616
616
  self.b = b
@@ -650,8 +650,157 @@ class FISTA(object):
650
650
  x_new = x_new + ((k-1)/(k+2))*(x_new - x_old)
651
651
 
652
652
  x = x_new.copy()
653
+
654
+ class ADMM(object):
655
+ """Alternating Direction Method of Multipliers for solving regularized linear least squares problems of the form:
656
+ Minimize ||Ax-b||^2 + sum_i f_i(L_i x),
657
+ where the sum ranges from 1 to an arbitrary n. See definition of the parameter `penalty_terms` below for more details about f_i and L_i
658
+
659
+ Reference:
660
+ [1] Boyd et al. "Distributed optimization and statistical learning via the alternating direction method of multipliers."Foundations and Trends® in Machine learning, 2011.
661
+
662
+
663
+ Parameters
664
+ ----------
665
+ A : ndarray or callable
666
+ Represents a matrix or a function that performs matrix-vector multiplications.
667
+ When A is a callable, it accepts arguments (x, flag) where:
668
+ - flag=1 indicates multiplication of A with vector x, that is A @ x.
669
+ - flag=2 indicates multiplication of the transpose of A with vector x, that is A.T @ x.
670
+ b : ndarray.
671
+ penalty_terms : List of tuples (callable proximal operator of f_i, linear operator L_i)
672
+ Each callable proximal operator f_i accepts two arguments (x, p) and should return the minimizer of p/2||x-z||^2 + f(x) over z for some f.
673
+ x0 : ndarray. Initial guess.
674
+ penalty_parameter : Trade-off between linear least squares and regularization term in the solver iterates. Denoted as "rho" in [1].
675
+ maxit : The maximum number of iterations.
676
+ adaptive : Whether to adaptively update the penalty_parameter each iteration such that the primal and dual residual norms are of the same order of magnitude. Based on [1], Subsection 3.4.1
677
+
678
+ Example
679
+ -----------
680
+ .. code-block:: python
653
681
 
682
+ from cuqi.solver import ADMM, ProximalL1, ProjectNonnegative
683
+ import numpy as np
684
+
685
+ rng = np.random.default_rng()
686
+
687
+ m, n, k = 10, 5, 4
688
+ A = rng.standard_normal((m, n))
689
+ b = rng.standard_normal(m)
690
+ L = rng.standard_normal((k, n))
691
+
692
+ x0 = np.zeros(n)
693
+ admm = ADMM(A, b, x0, penalty_terms = [(ProximalL1, L), (lambda z, _ : ProjectNonnegative(z), np.eye(n))], tradeoff = 10)
694
+ sol, _ = admm.solve()
695
+
696
+ """
697
+
698
+ def __init__(self, A, b, penalty_terms, x0, penalty_parameter = 10, maxit = 100, inner_max_it = 10, adaptive = True):
699
+
700
+ self.A = A
701
+ self.b = b
702
+ self.x_cur = x0
703
+
704
+ dual_len = [penalty[1].shape[0] for penalty in penalty_terms]
705
+ self.z_cur = [np.zeros(l) for l in dual_len]
706
+ self.u_cur = [np.zeros(l) for l in dual_len]
707
+ self.n = penalty_terms[0][1].shape[1]
708
+
709
+ self.rho = penalty_parameter
710
+ self.maxit = maxit
711
+ self.inner_max_it = inner_max_it
712
+ self.adaptive = adaptive
713
+
714
+ self.penalty_terms = penalty_terms
715
+
716
+ self.p = len(self.penalty_terms)
717
+ self._big_matrix = None
718
+ self._big_vector = None
719
+
720
+ def solve(self):
721
+ """
722
+ Solves the regularized linear least squares problem using ADMM in scaled form. Based on [1], Subsection 3.1.1
723
+ """
724
+ z_new = self.p*[0]
725
+ u_new = self.p*[0]
726
+
727
+ # Iterating
728
+ for i in range(self.maxit):
729
+ self._iteration_pre_processing()
730
+
731
+ # Main update (Least Squares)
732
+ solver = CGLS(self._big_matrix, self._big_vector, self.x_cur, self.inner_max_it)
733
+ x_new, _ = solver.solve()
734
+
735
+ # Regularization update
736
+ for j, penalty in enumerate(self.penalty_terms):
737
+ z_new[j] = penalty[0](penalty[1]@x_new + self.u_cur[j], 1.0/self.rho)
738
+
739
+ res_primal = 0.0
740
+ # Dual update
741
+ for j, penalty in enumerate(self.penalty_terms):
742
+ r_partial = penalty[1]@x_new - z_new[j]
743
+ res_primal += LA.norm(r_partial)**2
744
+
745
+ u_new[j] = self.u_cur[j] + r_partial
746
+
747
+ res_dual = 0.0
748
+ for j, penalty in enumerate(self.penalty_terms):
749
+ res_dual += LA.norm(penalty[1].T@(z_new[j] - self.z_cur[j]))**2
750
+
751
+ # Adaptive approach based on [1], Subsection 3.4.1
752
+ if self.adaptive:
753
+ if res_dual > 1e2*res_primal:
754
+ self.rho *= 0.5 # More regularization
755
+ elif res_primal > 1e2*res_dual:
756
+ self.rho *= 2.0 # More data fidelity
757
+
758
+ self.x_cur, self.z_cur, self.u_cur = x_new, z_new.copy(), u_new
759
+
760
+ return self.x_cur, i
654
761
 
762
+ def _iteration_pre_processing(self):
763
+ """ Preprocessing
764
+ Every iteration of ADMM requires solving a linear least squares system of the form
765
+ minimize 1/(rho) \|Ax-b\|_2^2 + sum_{i=1}^{p} \|penalty[1]x - (y - u)\|_2^2
766
+ To solve this, all linear least squares terms are combined into a single big term
767
+ with matrix big_matrix and data big_vector.
768
+
769
+ The matrix only needs to be updated when rho changes, i.e., when the adaptive option is used.
770
+ The data vector needs to be updated every iteration.
771
+ """
772
+
773
+ self._big_vector = np.hstack([np.sqrt(1/self.rho)*self.b] + [self.z_cur[i] - self.u_cur[i] for i in range(self.p)])
774
+
775
+ # Check whether matrix needs to be updated
776
+ if self._big_matrix is not None and not self.adaptive:
777
+ return
778
+
779
+ # Update big_matrix
780
+ if callable(self.A):
781
+ def matrix_eval(x, flag):
782
+ if flag == 1:
783
+ out1 = np.sqrt(1/self.rho)*self.A(x, 1)
784
+ out2 = [penalty[1]@x for penalty in self.penalty_terms]
785
+ out = np.hstack([out1] + out2)
786
+ elif flag == 2:
787
+ idx_start = len(x)
788
+ idx_end = len(x)
789
+ out1 = np.zeros(self.n)
790
+ for _, t in reversed(self.penalty_terms):
791
+ idx_start -= t.shape[0]
792
+ out1 += t.T@x[idx_start:idx_end]
793
+ idx_end = idx_start
794
+ out2 = np.sqrt(1/self.rho)*self.A(x[:idx_end], 2)
795
+ out = out1 + out2
796
+ return out
797
+ self._big_matrix = matrix_eval
798
+ else:
799
+ self._big_matrix = np.vstack([np.sqrt(1/self.rho)*self.A] + [penalty[1] for penalty in self.penalty_terms])
800
+
801
+
802
+
803
+
655
804
  def ProjectNonnegative(x):
656
805
  """(Euclidean) projection onto the nonnegative orthant.
657
806
 
@@ -678,6 +827,22 @@ def ProjectBox(x, lower = None, upper = None):
678
827
 
679
828
  return np.minimum(np.maximum(x, lower), upper)
680
829
 
830
+ def ProjectHalfspace(x, a, b):
831
+ """(Euclidean) projection onto the halfspace defined {z|<a,z> <= b}.
832
+
833
+ Parameters
834
+ ----------
835
+ x : array_like.
836
+ a : array_like.
837
+ b : array_like.
838
+ """
839
+
840
+ ax_b = np.inner(a,x) - b
841
+ if ax_b <= 0:
842
+ return x
843
+ else:
844
+ return x - (ax_b/np.inner(a,a))*a
845
+
681
846
  def ProximalL1(x, gamma):
682
847
  """(Euclidean) proximal operator of the \|x\|_1 norm.
683
848
  Also known as the shrinkage or soft thresholding operator.
@@ -687,4 +852,4 @@ def ProximalL1(x, gamma):
687
852
  x : array_like.
688
853
  gamma : scale parameter.
689
854
  """
690
- return np.multiply(np.sign(x), np.maximum(np.abs(x)-gamma, 0))
855
+ return np.multiply(np.sign(x), np.maximum(np.abs(x)-gamma, 0))
@@ -28,6 +28,73 @@ def test_Normal_sample_regression(mean,var,expected):
28
28
  target = np.array(expected).T
29
29
  assert np.allclose( samples.samples, target)
30
30
 
31
+ @pytest.mark.parametrize("mean,std,points,expected",[
32
+ (0,1,[-1,0,1],[1,0,-1]),
33
+ (np.array([0,0]),np.array([1,1]),[[-1,0],[0,0],[0,-1]], [[1,0],[0,0],[0,1]])])
34
+ def test_Normal_gradient(mean,std,points,expected):
35
+ p = cuqi.distribution.Normal(mean,std)
36
+ for point, grad in zip(points, expected):
37
+ assert np.allclose(p.gradient(point), grad)
38
+
39
+ @pytest.mark.parametrize("mean,std,low,high,points",[(0.0,
40
+ 1.0,
41
+ -1.0,
42
+ 1.0,
43
+ [-1.5, -0.5, 0.5, 1.5]),
44
+ (np.array([0.0, 0.0]),
45
+ np.array([1.0, 1.0]),
46
+ np.array([-1.0, -1.0]),
47
+ np.array([1.0, 1.0]),
48
+ [np.array([-0.5, 0.0]),
49
+ np.array([0.5, 0.0]),
50
+ np.array([-2.0, 0.0]),
51
+ np.array([2.0, 0.0])])])
52
+ def test_TruncatedNormal_logpdf(mean,std,low,high,points):
53
+ x_trun = cuqi.distribution.TruncatedNormal(mean,std,low=low,high=high)
54
+ x = cuqi.distribution.Normal(mean,std)
55
+ for point in points:
56
+ if np.all(point >= low) and np.all(point <= high):
57
+ assert x_trun.logpdf(point) == approx(x.logpdf(point))
58
+ else:
59
+ assert np.isneginf(x_trun.logpdf(point))
60
+
61
+ @pytest.mark.parametrize("mean,std,low,high,points",[(0.0,
62
+ 1.0,
63
+ -1.0,
64
+ 1.0,
65
+ [-1.5, -0.5, 0.5, 1.5]),
66
+ (np.array([0.0, 0.0]),
67
+ np.array([1.0, 1.0]),
68
+ np.array([-1.0, -1.0]),
69
+ np.array([1.0, 1.0]),
70
+ [np.array([-0.5, 0.0]),
71
+ np.array([0.5, 0.0]),
72
+ np.array([-2.0, 0.0]),
73
+ np.array([2.0, 0.0])])])
74
+ def test_TruncatedNormal_gradient(mean,std,low,high,points):
75
+ x_trun = cuqi.distribution.TruncatedNormal(mean,std,low=low,high=high)
76
+ x = cuqi.distribution.Normal(mean,std)
77
+ for point in points:
78
+ if np.all(point >= low) and np.all(point <= high):
79
+ assert np.all(x_trun.gradient(point) == approx(x.gradient(point)))
80
+ else:
81
+ assert np.all(np.isnan(x_trun.gradient(point)))
82
+
83
+ @pytest.mark.parametrize("mean,std,low,high",[(0.0,
84
+ 1.0,
85
+ -1.0,
86
+ 1.0),
87
+ (np.array([0.0, 0.0]),
88
+ np.array([1.0, 1.0]),
89
+ np.array([-1.0, -1.0]),
90
+ np.array([1.0, 1.0]))])
91
+ def test_TruncatedNormal_sampling(mean,std,low,high):
92
+ x = cuqi.distribution.TruncatedNormal(mean,std,low=low,high=high)
93
+ samples = x.sample(10000).samples
94
+ for i in range(samples.shape[1]):
95
+ sample = samples[:,i]
96
+ assert np.all(sample >= low) and np.all(sample <= high)
97
+
31
98
  def test_Gaussian_mean():
32
99
  mean = np.array([0, 0])
33
100
  std = np.array([1, 1])
@@ -1,7 +1,7 @@
1
1
  import numpy as np
2
2
  import scipy as sp
3
3
 
4
- from cuqi.solver import CGLS, LM, FISTA, ProximalL1
4
+ from cuqi.solver import CGLS, LM, FISTA, ADMM, ProximalL1, ProjectNonnegative
5
5
  from scipy.optimize import lsq_linear
6
6
 
7
7
 
@@ -54,8 +54,52 @@ def test_FISTA():
54
54
 
55
55
  stepsize = 0.99/(sp.linalg.interpolative.estimate_spectral_norm(A)**2)
56
56
  x0 = np.zeros(n)
57
- sol, _ = FISTA(A, b, x0, proximal = ProximalL1, stepsize = stepsize, maxit = 100, abstol=1e-12, adaptive = True).solve()
57
+ sol, _ = FISTA(A, b, ProximalL1, x0, stepsize = stepsize, maxit = 100, abstol=1e-12, adaptive = True).solve()
58
58
 
59
59
  ref_sol = np.array([-1.83273787e-03, -1.72094582e-13, 0.0, -3.35835639e-01, -1.27795593e-01])
60
60
  # Compare
61
- assert np.allclose(sol, ref_sol, atol=1e-4)
61
+ assert np.allclose(sol, ref_sol, atol=1e-4)
62
+
63
+ def test_ADMM_matrix_form():
64
+ # Parameters
65
+ rng = np.random.default_rng(seed = 42)
66
+ m, n = 10, 5
67
+ A = rng.standard_normal((m, n))
68
+ b = rng.standard_normal(m)
69
+
70
+ k = 4
71
+ L = rng.standard_normal((k, n))
72
+
73
+ x0 = np.zeros(n)
74
+ sol, _ = ADMM(A, b, [(ProximalL1, np.eye(n)), (lambda z, _ : ProjectNonnegative(z), L)],
75
+ x0, 10, maxit = 100, adaptive = True).solve()
76
+
77
+ ref_sol = np.array([-3.99513417e-03, -1.32339656e-01, -4.52822633e-02, -7.44973888e-02, -3.35005208e-11])
78
+ # Compare
79
+ assert np.allclose(sol, ref_sol, atol=1e-4)
80
+
81
+
82
+ def test_ADMM_function_form():
83
+ # Parameters
84
+ rng = np.random.default_rng(seed = 42)
85
+ m, n = 10, 5
86
+ A = rng.standard_normal((m, n))
87
+ def A_fun(x, flag):
88
+ if flag == 1:
89
+ return A@x
90
+ if flag == 2:
91
+ return A.T@x
92
+
93
+ b = rng.standard_normal(m)
94
+
95
+ k = 4
96
+ L = rng.standard_normal((k, n))
97
+
98
+ x0 = np.zeros(n)
99
+ sol, _ = ADMM(A_fun, b, [(ProximalL1, np.eye(n)), (lambda z, _ : ProjectNonnegative(z), L)],
100
+ x0, 10, maxit = 100, adaptive = True).solve()
101
+
102
+ print(sol)
103
+ ref_sol = np.array([-3.99513417e-03, -1.32339656e-01, -4.52822633e-02, -7.44973888e-02, -3.35005208e-11])
104
+ # Compare
105
+ assert np.allclose(sol, ref_sol, atol=1e-4)