CUQIpy 1.0.0.post0.dev180__py3-none-any.whl → 1.0.0.post0.dev202__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of CUQIpy might be problematic. Click here for more details.
- {CUQIpy-1.0.0.post0.dev180.dist-info → CUQIpy-1.0.0.post0.dev202.dist-info}/METADATA +1 -1
- {CUQIpy-1.0.0.post0.dev180.dist-info → CUQIpy-1.0.0.post0.dev202.dist-info}/RECORD +8 -7
- cuqi/_version.py +3 -3
- cuqi/experimental/mcmc/__init__.py +1 -0
- cuqi/experimental/mcmc/_laplace_approximation.py +159 -0
- {CUQIpy-1.0.0.post0.dev180.dist-info → CUQIpy-1.0.0.post0.dev202.dist-info}/LICENSE +0 -0
- {CUQIpy-1.0.0.post0.dev180.dist-info → CUQIpy-1.0.0.post0.dev202.dist-info}/WHEEL +0 -0
- {CUQIpy-1.0.0.post0.dev180.dist-info → CUQIpy-1.0.0.post0.dev202.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: CUQIpy
|
|
3
|
-
Version: 1.0.0.post0.
|
|
3
|
+
Version: 1.0.0.post0.dev202
|
|
4
4
|
Summary: Computational Uncertainty Quantification for Inverse problems in Python
|
|
5
5
|
Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
|
|
6
6
|
License: Apache License
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
cuqi/__init__.py,sha256=LsGilhl-hBLEn6Glt8S_l0OJzAA1sKit_rui8h-D-p0,488
|
|
2
2
|
cuqi/_messages.py,sha256=fzEBrZT2kbmfecBBPm7spVu7yHdxGARQB4QzXhJbCJ0,415
|
|
3
|
-
cuqi/_version.py,sha256=
|
|
3
|
+
cuqi/_version.py,sha256=Eygdo6Qj24ocqoILprXlMc5JUVsIEnrGb4MKDLDpuDo,510
|
|
4
4
|
cuqi/config.py,sha256=wcYvz19wkeKW2EKCGIKJiTpWt5kdaxyt4imyRkvtTRA,526
|
|
5
5
|
cuqi/diagnostics.py,sha256=5OrbJeqpynqRXOe5MtOKKhe7EAVdOEpHIqHnlMW9G_c,3029
|
|
6
6
|
cuqi/array/__init__.py,sha256=-EeiaiWGNsE3twRS4dD814BIlfxEsNkTCZUc5gjOXb0,30
|
|
@@ -32,10 +32,11 @@ cuqi/distribution/_normal.py,sha256=UeoTtGDT7YSf4ZNo2amlVF9K-YQpYbf8q76jcRJTVFw,
|
|
|
32
32
|
cuqi/distribution/_posterior.py,sha256=zAfL0GECxekZ2lBt1W6_LN0U_xskMwK4VNce5xAF7ig,5018
|
|
33
33
|
cuqi/distribution/_uniform.py,sha256=7xJmCZH_LPhuGkwEDGh-_CTtzcWKrXMOxtTJUFb7Ydo,1607
|
|
34
34
|
cuqi/experimental/__init__.py,sha256=vhZvyMX6rl8Y0haqCzGLPz6PSUKyu75XMQbeDHqTTrw,83
|
|
35
|
-
cuqi/experimental/mcmc/__init__.py,sha256=
|
|
35
|
+
cuqi/experimental/mcmc/__init__.py,sha256=meBaf5xOELviF866nB6BnpfftYXhyx-w78ad0HshP1I,384
|
|
36
36
|
cuqi/experimental/mcmc/_cwmh.py,sha256=yRlTk5a1QYfH3JyCecfOOTeDf-4-tmJ3Tl2Bc3pyp1Y,7336
|
|
37
37
|
cuqi/experimental/mcmc/_hmc.py,sha256=qqAyoAajLE_JenYMgAbD3tknuEf75AJu-ufF69GKGk4,19384
|
|
38
38
|
cuqi/experimental/mcmc/_langevin_algorithm.py,sha256=MX48u3GYgCckB6Q5h5kXr_qdIaLQH2toOG5u29OY7gk,8245
|
|
39
|
+
cuqi/experimental/mcmc/_laplace_approximation.py,sha256=7reeOnDY77WnOwqYls5WStftHgylwCNVodudRroApF0,5812
|
|
39
40
|
cuqi/experimental/mcmc/_mh.py,sha256=aIV1Ntq0EAq3QJ1_X-DbP7eDAL-d_Or7d3RUO-R48I4,3090
|
|
40
41
|
cuqi/experimental/mcmc/_pcn.py,sha256=3M8zhQGQa53Gz04AkC8wJM61_5rIjGVnhPefi8m4dbY,3531
|
|
41
42
|
cuqi/experimental/mcmc/_rto.py,sha256=jSPznr34XPfWM6LysWIiN4hE-vtyti3cHyvzy9ruykg,11349
|
|
@@ -76,8 +77,8 @@ cuqi/testproblem/_testproblem.py,sha256=x769LwwRdJdzIiZkcQUGb_5-vynNTNALXWKato7s
|
|
|
76
77
|
cuqi/utilities/__init__.py,sha256=EfxHLdsyDNugbmbzs43nV_AeKcycM9sVBjG9WZydagA,351
|
|
77
78
|
cuqi/utilities/_get_python_variable_name.py,sha256=QwlBVj2koJRA8s8pWd554p7-ElcI7HUwY32HknaR92E,1827
|
|
78
79
|
cuqi/utilities/_utilities.py,sha256=At3DOXRdF3GwLkVcM2FXooGyjAGfPkIM0bRzhTfLmWk,8046
|
|
79
|
-
CUQIpy-1.0.0.post0.
|
|
80
|
-
CUQIpy-1.0.0.post0.
|
|
81
|
-
CUQIpy-1.0.0.post0.
|
|
82
|
-
CUQIpy-1.0.0.post0.
|
|
83
|
-
CUQIpy-1.0.0.post0.
|
|
80
|
+
CUQIpy-1.0.0.post0.dev202.dist-info/LICENSE,sha256=kJWRPrtRoQoZGXyyvu50Uc91X6_0XRaVfT0YZssicys,10799
|
|
81
|
+
CUQIpy-1.0.0.post0.dev202.dist-info/METADATA,sha256=3HmIqBG9T7FXxvuQBu7LK7p1PLLJ2-e1BBi4ZKWK0vk,18393
|
|
82
|
+
CUQIpy-1.0.0.post0.dev202.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
83
|
+
CUQIpy-1.0.0.post0.dev202.dist-info/top_level.txt,sha256=AgmgMc6TKfPPqbjV0kvAoCBN334i_Lwwojc7HE3ZwD0,5
|
|
84
|
+
CUQIpy-1.0.0.post0.dev202.dist-info/RECORD,,
|
cuqi/_version.py
CHANGED
|
@@ -8,11 +8,11 @@ import json
|
|
|
8
8
|
|
|
9
9
|
version_json = '''
|
|
10
10
|
{
|
|
11
|
-
"date": "2024-04-
|
|
11
|
+
"date": "2024-04-30T14:38:06+0200",
|
|
12
12
|
"dirty": false,
|
|
13
13
|
"error": null,
|
|
14
|
-
"full-revisionid": "
|
|
15
|
-
"version": "1.0.0.post0.
|
|
14
|
+
"full-revisionid": "e5596bf72d78672549c26c4921e473c12fcf2553",
|
|
15
|
+
"version": "1.0.0.post0.dev202"
|
|
16
16
|
}
|
|
17
17
|
''' # END VERSION_JSON
|
|
18
18
|
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import scipy as sp
|
|
2
|
+
import numpy as np
|
|
3
|
+
import cuqi
|
|
4
|
+
from cuqi.solver import CGLS
|
|
5
|
+
from cuqi.experimental.mcmc import SamplerNew
|
|
6
|
+
|
|
7
|
+
class UGLANew(SamplerNew):
|
|
8
|
+
""" Unadjusted (Gaussian) Laplace Approximation sampler
|
|
9
|
+
|
|
10
|
+
Samples an approximate posterior where the prior is approximated
|
|
11
|
+
by a Gaussian distribution. The likelihood must be Gaussian.
|
|
12
|
+
|
|
13
|
+
Currently only works for LMRF priors.
|
|
14
|
+
|
|
15
|
+
The inner solver is Conjugate Gradient Least Squares (CGLS) solver.
|
|
16
|
+
|
|
17
|
+
For more details see: Uribe, Felipe, et al. A hybrid Gibbs sampler for edge-preserving
|
|
18
|
+
tomographic reconstruction with uncertain view angles. SIAM/ASA Journal on UQ,
|
|
19
|
+
https://doi.org/10.1137/21M1412268 (2022).
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
target : `cuqi.distribution.Posterior`
|
|
24
|
+
The target posterior distribution to sample.
|
|
25
|
+
|
|
26
|
+
initial_point : ndarray, *Optional*
|
|
27
|
+
Initial parameters.
|
|
28
|
+
If not provided, it defaults to zeros.
|
|
29
|
+
|
|
30
|
+
maxit : int
|
|
31
|
+
Maximum number of inner iterations for solver when generating one sample.
|
|
32
|
+
If not provided, it defaults to 50.
|
|
33
|
+
|
|
34
|
+
tol : float
|
|
35
|
+
Tolerance for inner solver.
|
|
36
|
+
The inner solvers will stop before maxit if convergence check reaches tol.
|
|
37
|
+
If not provided, it defaults to 1e-4.
|
|
38
|
+
|
|
39
|
+
beta : float
|
|
40
|
+
Smoothing parameter for the Gaussian approximation of the Laplace distribution.
|
|
41
|
+
A small value in the range of 1e-7 to 1e-3 is recommended, though values out of this
|
|
42
|
+
range might give better results in some cases. Generally, a larger beta value makes
|
|
43
|
+
sampling easier but results in a worse approximation. See details in Section 3.3 of the paper.
|
|
44
|
+
If not provided, it defaults to 1e-5.
|
|
45
|
+
|
|
46
|
+
callback : callable, *Optional*
|
|
47
|
+
If set, this function will be called after every sample.
|
|
48
|
+
The signature of the callback function is `callback(sample, sample_index)`,
|
|
49
|
+
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
50
|
+
An example is shown in demos/demo31_callback.py.
|
|
51
|
+
"""
|
|
52
|
+
def __init__(self, target, initial_point=None, maxit=50, tol=1e-4, beta=1e-5, **kwargs):
|
|
53
|
+
|
|
54
|
+
super().__init__(target=target, initial_point=initial_point, **kwargs)
|
|
55
|
+
|
|
56
|
+
if initial_point is None: #TODO: Replace later with a getter
|
|
57
|
+
self.initial_point = np.zeros(self.dim)
|
|
58
|
+
self._samples = [self.initial_point]
|
|
59
|
+
|
|
60
|
+
self.current_point = self.initial_point
|
|
61
|
+
self._acc = [1] # TODO. Check if we need this
|
|
62
|
+
|
|
63
|
+
# Parameters
|
|
64
|
+
self.maxit = maxit
|
|
65
|
+
self.tol = tol
|
|
66
|
+
self.beta = beta
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def prior(self):
|
|
70
|
+
return self.target.prior
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def likelihood(self):
|
|
74
|
+
return self.target.likelihood
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def model(self):
|
|
78
|
+
return self.target.model
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def data(self):
|
|
82
|
+
return self.target.data
|
|
83
|
+
|
|
84
|
+
def _pre_warmup(self):
|
|
85
|
+
super()._pre_warmup()
|
|
86
|
+
D = self.prior._diff_op
|
|
87
|
+
n = D.shape[0]
|
|
88
|
+
|
|
89
|
+
# Gaussian approximation of LMRF prior as function of x_k
|
|
90
|
+
def Lk_fun(x_k):
|
|
91
|
+
dd = 1/np.sqrt((D @ x_k)**2 + self.beta*np.ones(n))
|
|
92
|
+
W = sp.sparse.diags(dd)
|
|
93
|
+
return W.sqrt() @ D
|
|
94
|
+
self.Lk_fun = Lk_fun
|
|
95
|
+
|
|
96
|
+
self._m = len(self.data)
|
|
97
|
+
self._L1 = self.likelihood.distribution.sqrtprec
|
|
98
|
+
|
|
99
|
+
# If prior location is scalar, repeat it to match dimensions
|
|
100
|
+
if len(self.prior.location) == 1:
|
|
101
|
+
self._priorloc = np.repeat(self.prior.location, self.dim)
|
|
102
|
+
else:
|
|
103
|
+
self._priorloc = self.prior.location
|
|
104
|
+
|
|
105
|
+
# Initial Laplace approx
|
|
106
|
+
self._L2 = Lk_fun(self.initial_point)
|
|
107
|
+
self._L2mu = self._L2@self._priorloc
|
|
108
|
+
self._b_tild = np.hstack([self._L1@self.data, self._L2mu])
|
|
109
|
+
|
|
110
|
+
# Least squares form
|
|
111
|
+
def M(x, flag):
|
|
112
|
+
if flag == 1:
|
|
113
|
+
out1 = self._L1 @ self.model.forward(x)
|
|
114
|
+
out2 = np.sqrt(1/self.prior.scale)*(self._L2 @ x)
|
|
115
|
+
out = np.hstack([out1, out2])
|
|
116
|
+
elif flag == 2:
|
|
117
|
+
idx = int(self._m)
|
|
118
|
+
out1 = self.model.adjoint(self._L1.T@x[:idx])
|
|
119
|
+
out2 = np.sqrt(1/self.prior.scale)*(self._L2.T @ x[idx:])
|
|
120
|
+
out = out1 + out2
|
|
121
|
+
return out
|
|
122
|
+
self.M = M
|
|
123
|
+
|
|
124
|
+
def _pre_sample(self):
|
|
125
|
+
self._pre_warmup()
|
|
126
|
+
|
|
127
|
+
def step(self):
|
|
128
|
+
# Update Laplace approximation
|
|
129
|
+
self._L2 = self.Lk_fun(self.current_point)
|
|
130
|
+
self._L2mu = self._L2@self._priorloc
|
|
131
|
+
self._b_tild = np.hstack([self._L1@self.data, self._L2mu])
|
|
132
|
+
|
|
133
|
+
# Sample from approximate posterior
|
|
134
|
+
e = np.random.randn(len(self._b_tild))
|
|
135
|
+
y = self._b_tild + e # Perturb data
|
|
136
|
+
sim = CGLS(self.M, y, self.current_point, self.maxit, self.tol)
|
|
137
|
+
self.current_point, _ = sim.solve()
|
|
138
|
+
acc = 1
|
|
139
|
+
return acc
|
|
140
|
+
|
|
141
|
+
def tune(self, skip_len, update_count):
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
def validate_target(self):
|
|
145
|
+
# Check target type
|
|
146
|
+
if not isinstance(self.target, cuqi.distribution.Posterior):
|
|
147
|
+
raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
|
|
148
|
+
|
|
149
|
+
# Check Linear model
|
|
150
|
+
if not isinstance(self.likelihood.model, cuqi.model.LinearModel):
|
|
151
|
+
raise TypeError("Model needs to be linear")
|
|
152
|
+
|
|
153
|
+
# Check Gaussian likelihood
|
|
154
|
+
if not hasattr(self.likelihood.distribution, "sqrtprec"):
|
|
155
|
+
raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
|
|
156
|
+
|
|
157
|
+
# Check that prior is LMRF
|
|
158
|
+
if not isinstance(self.prior, cuqi.distribution.LMRF):
|
|
159
|
+
raise ValueError('Unadjusted Gaussian Laplace approximation (UGLA) requires LMRF prior')
|
|
File without changes
|
|
File without changes
|
|
File without changes
|