CUQIpy 1.3.0__py3-none-any.whl → 1.4.0.post0.dev61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. cuqi/__init__.py +1 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/density/_density.py +9 -1
  4. cuqi/distribution/__init__.py +1 -1
  5. cuqi/distribution/_beta.py +1 -1
  6. cuqi/distribution/_cauchy.py +2 -2
  7. cuqi/distribution/_distribution.py +24 -15
  8. cuqi/distribution/_joint_distribution.py +97 -12
  9. cuqi/distribution/_posterior.py +9 -0
  10. cuqi/distribution/_truncated_normal.py +3 -3
  11. cuqi/distribution/_uniform.py +36 -2
  12. cuqi/experimental/__init__.py +1 -1
  13. cuqi/experimental/_recommender.py +216 -0
  14. cuqi/experimental/geometry/_productgeometry.py +3 -3
  15. cuqi/geometry/_geometry.py +12 -1
  16. cuqi/implicitprior/__init__.py +1 -1
  17. cuqi/implicitprior/_regularizedGaussian.py +40 -4
  18. cuqi/implicitprior/_restorator.py +35 -1
  19. cuqi/legacy/__init__.py +2 -0
  20. cuqi/legacy/sampler/__init__.py +11 -0
  21. cuqi/legacy/sampler/_conjugate.py +55 -0
  22. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  23. cuqi/legacy/sampler/_cwmh.py +196 -0
  24. cuqi/legacy/sampler/_gibbs.py +231 -0
  25. cuqi/legacy/sampler/_hmc.py +335 -0
  26. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  27. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  28. cuqi/legacy/sampler/_mh.py +190 -0
  29. cuqi/legacy/sampler/_pcn.py +244 -0
  30. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +134 -152
  31. cuqi/legacy/sampler/_sampler.py +182 -0
  32. cuqi/likelihood/_likelihood.py +1 -1
  33. cuqi/model/_model.py +1248 -357
  34. cuqi/pde/__init__.py +4 -0
  35. cuqi/pde/_observation_map.py +36 -0
  36. cuqi/pde/_pde.py +133 -32
  37. cuqi/problem/_problem.py +88 -82
  38. cuqi/sampler/__init__.py +120 -8
  39. cuqi/sampler/_conjugate.py +376 -35
  40. cuqi/sampler/_conjugate_approx.py +40 -16
  41. cuqi/sampler/_cwmh.py +132 -138
  42. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  43. cuqi/sampler/_gibbs.py +269 -130
  44. cuqi/sampler/_hmc.py +328 -201
  45. cuqi/sampler/_langevin_algorithm.py +282 -98
  46. cuqi/sampler/_laplace_approximation.py +87 -117
  47. cuqi/sampler/_mh.py +47 -157
  48. cuqi/sampler/_pcn.py +56 -211
  49. cuqi/sampler/_rto.py +206 -140
  50. cuqi/sampler/_sampler.py +540 -135
  51. cuqi/solver/_solver.py +6 -2
  52. cuqi/testproblem/_testproblem.py +2 -3
  53. cuqi/utilities/__init__.py +3 -1
  54. cuqi/utilities/_utilities.py +94 -12
  55. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/METADATA +6 -4
  56. cuqipy-1.4.0.post0.dev61.dist-info/RECORD +102 -0
  57. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/WHEEL +1 -1
  58. CUQIpy-1.3.0.dist-info/RECORD +0 -100
  59. cuqi/experimental/mcmc/__init__.py +0 -123
  60. cuqi/experimental/mcmc/_conjugate.py +0 -345
  61. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  62. cuqi/experimental/mcmc/_cwmh.py +0 -193
  63. cuqi/experimental/mcmc/_gibbs.py +0 -318
  64. cuqi/experimental/mcmc/_hmc.py +0 -464
  65. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -392
  66. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  67. cuqi/experimental/mcmc/_mh.py +0 -80
  68. cuqi/experimental/mcmc/_pcn.py +0 -89
  69. cuqi/experimental/mcmc/_sampler.py +0 -566
  70. cuqi/experimental/mcmc/_utilities.py +0 -17
  71. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info/licenses}/LICENSE +0 -0
  72. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,196 @@
1
+ import numpy as np
2
+ import cuqi
3
+ from cuqi.legacy.sampler import ProposalBasedSampler
4
+
5
+
6
+ class CWMH(ProposalBasedSampler):
7
+ """Component-wise Metropolis Hastings sampler.
8
+
9
+ Allows sampling of a target distribution by a component-wise random-walk sampling of a proposal distribution along with an accept/reject step.
10
+
11
+ Parameters
12
+ ----------
13
+
14
+ target : `cuqi.distribution.Distribution` or lambda function
15
+ The target distribution to sample. Custom logpdfs are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
16
+
17
+ proposal : `cuqi.distribution.Distribution` or callable method
18
+ The proposal to sample from. If a callable method it should provide a single independent sample from proposal distribution. Defaults to a Gaussian proposal. *Optional*.
19
+
20
+ scale : float
21
+ Scale parameter used to define correlation between previous and proposed sample in random-walk. *Optional*.
22
+
23
+ x0 : ndarray
24
+ Initial parameters. *Optional*
25
+
26
+ dim : int
27
+ Dimension of parameter space. Required if target and proposal are callable functions. *Optional*.
28
+
29
+ callback : callable, *Optional*
30
+ If set this function will be called after every sample.
31
+ The signature of the callback function is `callback(sample, sample_index)`,
32
+ where `sample` is the current sample and `sample_index` is the index of the sample.
33
+ An example is shown in demos/demo31_callback.py.
34
+
35
+ Example
36
+ -------
37
+ .. code-block:: python
38
+
39
+ # Parameters
40
+ dim = 5 # Dimension of distribution
41
+ mu = np.arange(dim) # Mean of Gaussian
42
+ std = 1 # standard deviation of Gaussian
43
+
44
+ # Logpdf function
45
+ logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
46
+
47
+ # Define distribution from logpdf as UserDefinedDistribution (sample and gradients also supported as inputs to UserDefinedDistribution)
48
+ target = cuqi.distribution.UserDefinedDistribution(dim=dim, logpdf_func=logpdf_func)
49
+
50
+ # Set up sampler
51
+ sampler = cuqi.legacy.sampler.CWMH(target, scale=1)
52
+
53
+ # Sample
54
+ samples = sampler.sample(2000)
55
+
56
+ """
57
+ def __init__(self, target, proposal=None, scale=1, x0=None, dim = None, **kwargs):
58
+ super().__init__(target, proposal=proposal, scale=scale, x0=x0, dim=dim, **kwargs)
59
+
60
+ @ProposalBasedSampler.proposal.setter
61
+ def proposal(self, value):
62
+ fail_msg = "Proposal should be either None, cuqi.distribution.Distribution conditioned only on 'location' and 'scale', lambda function, or cuqi.distribution.Normal conditioned only on 'mean' and 'std'"
63
+
64
+ if value is None:
65
+ self._proposal = cuqi.distribution.Normal(mean = lambda location:location,std = lambda scale:scale, geometry=self.dim)
66
+
67
+ elif isinstance(value, cuqi.distribution.Distribution) and sorted(value.get_conditioning_variables())==['location','scale']:
68
+ self._proposal = value
69
+
70
+ elif isinstance(value, cuqi.distribution.Normal) and sorted(value.get_conditioning_variables())==['mean','std']:
71
+ self._proposal = value(mean = lambda location:location, std = lambda scale:scale)
72
+
73
+ elif not isinstance(value, cuqi.distribution.Distribution) and callable(value):
74
+ self._proposal = value
75
+
76
+ else:
77
+ raise ValueError(fail_msg)
78
+
79
+
80
+ def _sample(self, N, Nb):
81
+ Ns = N+Nb # number of simulations
82
+
83
+ # allocation
84
+ samples = np.empty((self.dim, Ns))
85
+ target_eval = np.empty(Ns)
86
+ acc = np.zeros((self.dim, Ns), dtype=int)
87
+
88
+ # initial state
89
+ samples[:, 0] = self.x0
90
+ target_eval[0] = self.target.logd(self.x0)
91
+ acc[:, 0] = np.ones(self.dim)
92
+
93
+ # run MCMC
94
+ for s in range(Ns-1):
95
+ # run component by component
96
+ samples[:, s+1], target_eval[s+1], acc[:, s+1] = self.single_update(samples[:, s], target_eval[s])
97
+
98
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
99
+ self._call_callback(samples[:, s+1], s+1)
100
+
101
+ # remove burn-in
102
+ samples = samples[:, Nb:]
103
+ target_eval = target_eval[Nb:]
104
+ acccomp = acc[:, Nb:].mean(axis=1)
105
+ print('\nAverage acceptance rate all components:', acccomp.mean(), '\n')
106
+
107
+ return samples, target_eval, acccomp
108
+
109
+ def _sample_adapt(self, N, Nb):
110
+ # this follows the vanishing adaptation Algorithm 4 in:
111
+ # Andrieu and Thoms (2008) - A tutorial on adaptive MCMC
112
+ Ns = N+Nb # number of simulations
113
+
114
+ # allocation
115
+ samples = np.empty((self.dim, Ns))
116
+ target_eval = np.empty(Ns)
117
+ acc = np.zeros((self.dim, Ns), dtype=int)
118
+
119
+ # initial state
120
+ samples[:, 0] = self.x0
121
+ target_eval[0] = self.target.logd(self.x0)
122
+ acc[:, 0] = np.ones(self.dim)
123
+
124
+ # initial adaptation params
125
+ Na = int(0.1*N) # iterations to adapt
126
+ hat_acc = np.empty((self.dim, int(np.floor(Ns/Na)))) # average acceptance rate of the chains
127
+ lambd = np.empty((self.dim, int(np.floor(Ns/Na)+1))) # scaling parameter \in (0,1)
128
+ lambd[:, 0] = self.scale
129
+ star_acc = 0.21/self.dim + 0.23 # target acceptance rate RW
130
+ i, idx = 0, 0
131
+
132
+ # run MCMC
133
+ for s in range(Ns-1):
134
+ # run component by component
135
+ samples[:, s+1], target_eval[s+1], acc[:, s+1] = self.single_update(samples[:, s], target_eval[s])
136
+
137
+ # adapt prop spread of each component using acc of past samples
138
+ if ((s+1) % Na == 0):
139
+ # evaluate average acceptance rate
140
+ hat_acc[:, i] = np.mean(acc[:, idx:idx+Na], axis=1)
141
+
142
+ # compute new scaling parameter
143
+ zeta = 1/np.sqrt(i+1) # ensures that the variation of lambda(i) vanishes
144
+ lambd[:, i+1] = np.exp(np.log(lambd[:, i]) + zeta*(hat_acc[:, i]-star_acc))
145
+
146
+ # update parameters
147
+ self.scale = np.minimum(lambd[:, i+1], np.ones(self.dim))
148
+
149
+ # update counters
150
+ i += 1
151
+ idx += Na
152
+
153
+ # display iterations
154
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
155
+ self._call_callback(samples[:, s+1], s+1)
156
+
157
+ # remove burn-in
158
+ samples = samples[:, Nb:]
159
+ target_eval = target_eval[Nb:]
160
+ acccomp = acc[:, Nb:].mean(axis=1)
161
+ print('\nAverage acceptance rate all components:', acccomp.mean(), '\n')
162
+
163
+ return samples, target_eval, acccomp
164
+
165
+ def single_update(self, x_t, target_eval_t):
166
+ if isinstance(self.proposal,cuqi.distribution.Distribution):
167
+ x_i_star = self.proposal(location= x_t, scale = self.scale).sample()
168
+ else:
169
+ x_i_star = self.proposal(x_t, self.scale)
170
+ x_star = x_t.copy()
171
+ acc = np.zeros(self.dim)
172
+
173
+ for j in range(self.dim):
174
+ # propose state
175
+ x_star[j] = x_i_star[j]
176
+
177
+ # evaluate target
178
+ target_eval_star = self.target.logd(x_star)
179
+
180
+ # ratio and acceptance probability
181
+ ratio = target_eval_star - target_eval_t # proposal is symmetric
182
+ alpha = min(0, ratio)
183
+
184
+ # accept/reject
185
+ u_theta = np.log(np.random.rand())
186
+ if (u_theta <= alpha):
187
+ x_t[j] = x_i_star[j]
188
+ target_eval_t = target_eval_star
189
+ acc[j] = 1
190
+ else:
191
+ pass
192
+ # x_t[j] = x_t[j]
193
+ # target_eval_t = target_eval_t
194
+ x_star = x_t.copy()
195
+ #
196
+ return x_t, target_eval_t, acc
@@ -0,0 +1,231 @@
1
+ from cuqi.distribution import JointDistribution
2
+ from cuqi.legacy.sampler import Sampler
3
+ from cuqi.samples import Samples
4
+ from typing import Dict, Union
5
+ import numpy as np
6
+ import sys
7
+ import warnings
8
+
9
+ class Gibbs:
10
+ """
11
+ Gibbs sampler for sampling a joint distribution.
12
+
13
+ Gibbs sampling samples the variables of the distribution sequentially,
14
+ one variable at a time. When a variable represents a random vector, the
15
+ whole vector is sampled simultaneously.
16
+
17
+ The sampling of each variable is done by sampling from the conditional
18
+ distribution of that variable given the values of the other variables.
19
+ This is often a very efficient way of sampling from a joint distribution
20
+ if the conditional distributions are easy to sample from.
21
+
22
+ Parameters
23
+ ----------
24
+ target : cuqi.distribution.JointDistribution
25
+ Target distribution to sample from.
26
+
27
+ sampling_strategy : dict
28
+ Dictionary of sampling strategies for each parameter.
29
+ Keys are parameter names.
30
+ Values are sampler objects.
31
+
32
+ Example
33
+ -------
34
+ .. code-block:: python
35
+
36
+ import cuqi
37
+ import numpy as np
38
+
39
+ # Model and data
40
+ A, y_obs, probinfo = cuqi.testproblem.Deconvolution1D(phantom='square').get_components()
41
+ n = A.domain_dim
42
+
43
+ # Define distributions
44
+ d = cuqi.distribution.Gamma(1, 1e-4)
45
+ l = cuqi.distribution.Gamma(1, 1e-4)
46
+ x = cuqi.distribution.GMRF(np.zeros(n), lambda d: d)
47
+ y = cuqi.distribution.Gaussian(A, lambda l: 1/l)
48
+
49
+ # Combine into a joint distribution and create posterior
50
+ joint = cuqi.distribution.JointDistribution(d, l, x, y)
51
+ posterior = joint(y=y_obs)
52
+
53
+ # Define sampling strategy
54
+ sampling_strategy = {
55
+ 'x': cuqi.legacy.sampler.LinearRTO,
56
+ ('d', 'l'): cuqi.legacy.sampler.Conjugate,
57
+ }
58
+
59
+ # Define Gibbs sampler
60
+ sampler = cuqi.legacy.sampler.Gibbs(posterior, sampling_strategy)
61
+
62
+ # Run sampler
63
+ samples = sampler.sample(Ns=1000, Nb=200)
64
+
65
+ # Plot results
66
+ samples['x'].plot_ci(exact=probinfo.exactSolution)
67
+ samples['d'].plot_trace(figsize=(8,2))
68
+ samples['l'].plot_trace(figsize=(8,2))
69
+
70
+ """
71
+
72
+ def __init__(self, target: JointDistribution, sampling_strategy: Dict[Union[str,tuple], Sampler]):
73
+
74
+ warnings.warn(f"\nYou are using the legacy sampler '{self.__class__.__name__}'.\n"
75
+ f"This will be removed in a future release of CUQIpy.\n"
76
+ f"Please consider using the new samplers in the 'cuqi.sampler' module.\n", UserWarning, stacklevel=2)
77
+
78
+ # Store target and allow conditioning to reduce to a single density
79
+ self.target = target() # Create a copy of target distribution (to avoid modifying the original)
80
+
81
+ # Parse samplers and split any keys that are tuple into separate keys
82
+ self.samplers = {}
83
+ for par_name in sampling_strategy.keys():
84
+ if isinstance(par_name, tuple):
85
+ for par_name_ in par_name:
86
+ self.samplers[par_name_] = sampling_strategy[par_name]
87
+ else:
88
+ self.samplers[par_name] = sampling_strategy[par_name]
89
+
90
+ # Store parameter names
91
+ self.par_names = self.target.get_parameter_names()
92
+
93
+ # ------------ Public methods ------------
94
+ def sample(self, Ns, Nb=0):
95
+ """ Sample from target distribution """
96
+
97
+ # Initial points
98
+ current_samples = self._get_initial_points()
99
+
100
+ # Compute how many samples were already taken previously
101
+ at_Nb = self._Nb
102
+ at_Ns = self._Ns
103
+
104
+ # Allocate memory for samples
105
+ self._allocate_samples_warmup(Nb)
106
+ self._allocate_samples(Ns)
107
+
108
+ # Sample tuning phase
109
+ for i in range(at_Nb, at_Nb+Nb):
110
+ current_samples = self.step_tune(current_samples)
111
+ self._store_samples(self.samples_warmup, current_samples, i)
112
+ self._print_progress(i+1+at_Nb, at_Nb+Nb, 'Warmup')
113
+
114
+ # Sample phase
115
+ for i in range(at_Ns, at_Ns+Ns):
116
+ current_samples = self.step(current_samples)
117
+ self._store_samples(self.samples, current_samples, i)
118
+ self._print_progress(i+1, at_Ns+Ns, 'Sample')
119
+
120
+ # Convert to samples objects and return
121
+ return self._convert_to_Samples(self.samples)
122
+
123
+ def step(self, current_samples):
124
+ """ Sequentially go through all parameters and sample them conditionally on each other """
125
+
126
+ # Extract par names
127
+ par_names = self.par_names
128
+
129
+ # Sample from each conditional distribution
130
+ for par_name in par_names:
131
+
132
+ # Dict of all other parameters to condition on
133
+ other_params = {par_name_: current_samples[par_name_] for par_name_ in par_names if par_name_ != par_name}
134
+
135
+ # Set up sampler for current conditional distribution
136
+ sampler = self.samplers[par_name](self.target(**other_params))
137
+
138
+ # Take a MCMC step
139
+ current_samples[par_name] = sampler.step(current_samples[par_name])
140
+
141
+ # Ensure even 1-dimensional samples are 1D arrays
142
+ current_samples[par_name] = current_samples[par_name].reshape(-1)
143
+
144
+ return current_samples
145
+
146
+ def step_tune(self, current_samples):
147
+ """ Perform a single MCMC step for each parameter and tune the sampler """
148
+ # Not implemented. No tuning happening here yet. Requires samplers to be able to be modified after initialization.
149
+ return self.step(current_samples)
150
+
151
+ # ------------ Private methods ------------
152
+ def _allocate_samples(self, Ns):
153
+ """ Allocate memory for samples """
154
+ # Allocate memory for samples
155
+ samples = {}
156
+ for par_name in self.par_names:
157
+ samples[par_name] = np.zeros((self.target.get_density(par_name).dim, Ns))
158
+
159
+ # Store samples in self
160
+ if hasattr(self, 'samples'):
161
+ # Append to existing samples (This makes a copy)
162
+ for par_name in self.par_names:
163
+ samples[par_name] = np.hstack((self.samples[par_name], samples[par_name]))
164
+ self.samples = samples
165
+
166
+ def _allocate_samples_warmup(self, Nb):
167
+ """ Allocate memory for samples """
168
+
169
+ # If we already have warmup samples and more are requested raise error
170
+ if hasattr(self, 'samples_warmup') and Nb != 0:
171
+ raise ValueError('Sampler already has run warmup phase. Cannot run warmup phase again.')
172
+
173
+ # Allocate memory for samples
174
+ samples = {}
175
+ for par_name in self.par_names:
176
+ samples[par_name] = np.zeros((self.target.get_density(par_name).dim, Nb))
177
+ self.samples_warmup = samples
178
+
179
+ def _get_initial_points(self):
180
+ """ Get initial points for each parameter """
181
+ initial_points = {}
182
+ for par_name in self.par_names:
183
+ if hasattr(self, 'samples'):
184
+ initial_points[par_name] = self.samples[par_name][:, -1]
185
+ elif hasattr(self, 'samples_warmup'):
186
+ initial_points[par_name] = self.samples_warmup[par_name][:, -1]
187
+ elif hasattr(self.target.get_density(par_name), 'init_point'):
188
+ initial_points[par_name] = self.target.get_density(par_name).init_point
189
+ else:
190
+ initial_points[par_name] = np.ones(self.target.get_density(par_name).dim)
191
+ return initial_points
192
+
193
+ def _store_samples(self, samples, current_samples, i):
194
+ """ Store current samples at index i of samples dict """
195
+ for par_name in self.par_names:
196
+ samples[par_name][:, i] = current_samples[par_name]
197
+
198
+ def _convert_to_Samples(self, samples):
199
+ """ Convert each parameter in samples dict to cuqi.samples.Samples object with correct geometry """
200
+ samples_object = {}
201
+ for par_name in self.par_names:
202
+ samples_object[par_name] = Samples(samples[par_name], self.target.get_density(par_name).geometry)
203
+ return samples_object
204
+
205
+ def _print_progress(self, s, Ns, phase):
206
+ """Prints sampling progress"""
207
+ if Ns < 2: # Don't print progress if only one sample
208
+ return
209
+ if (s % (max(Ns//100,1))) == 0:
210
+ msg = f'{phase} {s} / {Ns}'
211
+ sys.stdout.write('\r'+msg)
212
+ if s==Ns:
213
+ msg = f'{phase} {s} / {Ns}'
214
+ sys.stdout.write('\r'+msg+'\n')
215
+
216
+ # ------------ Private properties ------------
217
+ @property
218
+ def _Ns(self):
219
+ """ Number of samples already taken """
220
+ if hasattr(self, 'samples'):
221
+ return self.samples[self.par_names[0]].shape[-1]
222
+ else:
223
+ return 0
224
+
225
+ @property
226
+ def _Nb(self):
227
+ """ Number of samples already taken in warmup phase """
228
+ if hasattr(self, 'samples_warmup'):
229
+ return self.samples_warmup[self.par_names[0]].shape[-1]
230
+ else:
231
+ return 0