CUQIpy 1.3.0.post0.dev298__py3-none-any.whl → 1.4.0.post0.dev92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/{experimental/algebra/_randomvariable.py → algebra/_random_variable.py} +4 -4
  5. cuqi/density/_density.py +9 -1
  6. cuqi/distribution/_distribution.py +25 -16
  7. cuqi/distribution/_joint_distribution.py +99 -14
  8. cuqi/distribution/_posterior.py +9 -0
  9. cuqi/experimental/__init__.py +1 -4
  10. cuqi/experimental/_recommender.py +4 -4
  11. cuqi/geometry/__init__.py +2 -0
  12. cuqi/{experimental/geometry/_productgeometry.py → geometry/_product_geometry.py} +1 -1
  13. cuqi/implicitprior/__init__.py +1 -1
  14. cuqi/implicitprior/_restorator.py +35 -1
  15. cuqi/legacy/__init__.py +2 -0
  16. cuqi/legacy/sampler/__init__.py +11 -0
  17. cuqi/legacy/sampler/_conjugate.py +55 -0
  18. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  19. cuqi/legacy/sampler/_cwmh.py +196 -0
  20. cuqi/legacy/sampler/_gibbs.py +231 -0
  21. cuqi/legacy/sampler/_hmc.py +335 -0
  22. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  23. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  24. cuqi/legacy/sampler/_mh.py +190 -0
  25. cuqi/legacy/sampler/_pcn.py +244 -0
  26. cuqi/legacy/sampler/_rto.py +284 -0
  27. cuqi/legacy/sampler/_sampler.py +182 -0
  28. cuqi/likelihood/_likelihood.py +1 -1
  29. cuqi/model/_model.py +225 -90
  30. cuqi/pde/__init__.py +4 -0
  31. cuqi/pde/_observation_map.py +36 -0
  32. cuqi/pde/_pde.py +52 -21
  33. cuqi/problem/_problem.py +87 -80
  34. cuqi/sampler/__init__.py +120 -8
  35. cuqi/sampler/_conjugate.py +376 -35
  36. cuqi/sampler/_conjugate_approx.py +40 -16
  37. cuqi/sampler/_cwmh.py +132 -138
  38. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  39. cuqi/sampler/_gibbs.py +276 -130
  40. cuqi/sampler/_hmc.py +328 -201
  41. cuqi/sampler/_langevin_algorithm.py +282 -98
  42. cuqi/sampler/_laplace_approximation.py +87 -117
  43. cuqi/sampler/_mh.py +47 -157
  44. cuqi/sampler/_pcn.py +65 -213
  45. cuqi/sampler/_rto.py +206 -140
  46. cuqi/sampler/_sampler.py +540 -135
  47. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/METADATA +1 -1
  48. cuqipy-1.4.0.post0.dev92.dist-info/RECORD +101 -0
  49. cuqi/experimental/algebra/__init__.py +0 -2
  50. cuqi/experimental/geometry/__init__.py +0 -1
  51. cuqi/experimental/mcmc/__init__.py +0 -122
  52. cuqi/experimental/mcmc/_conjugate.py +0 -396
  53. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  54. cuqi/experimental/mcmc/_cwmh.py +0 -190
  55. cuqi/experimental/mcmc/_gibbs.py +0 -374
  56. cuqi/experimental/mcmc/_hmc.py +0 -460
  57. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  58. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  59. cuqi/experimental/mcmc/_mh.py +0 -80
  60. cuqi/experimental/mcmc/_pcn.py +0 -89
  61. cuqi/experimental/mcmc/_rto.py +0 -306
  62. cuqi/experimental/mcmc/_sampler.py +0 -564
  63. cuqipy-1.3.0.post0.dev298.dist-info/RECORD +0 -100
  64. /cuqi/{experimental/algebra/_ast.py → algebra/_abstract_syntax_tree.py} +0 -0
  65. /cuqi/{experimental/algebra/_orderedset.py → algebra/_ordered_set.py} +0 -0
  66. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/WHEEL +0 -0
  67. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/licenses/LICENSE +0 -0
  68. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/top_level.txt +0 -0
cuqi/sampler/_pcn.py CHANGED
@@ -1,244 +1,96 @@
1
1
  import numpy as np
2
2
  import cuqi
3
3
  from cuqi.sampler import Sampler
4
+ from cuqi.array import CUQIarray
4
5
 
5
- class pCN(Sampler):
6
- #Samples target*proposal
7
- #TODO. Check proposal, needs to be Gaussian and zero mean.
8
- """Preconditioned Crank-Nicolson sampler
9
-
10
- Parameters
11
- ----------
12
- target : `cuqi.distribution.Posterior` or tuple of likelihood and prior objects
13
- If target is of type cuqi.distribution.Posterior, it represents the posterior distribution.
14
- If target is a tuple of (cuqi.likelihood.Likelihood, cuqi.distribution.Distribution) objects,
15
- the first element is considered the likelihood and the second is considered the prior.
6
+ class PCN(Sampler): # Refactor to Proposal-based sampler?
16
7
 
17
- scale : int
8
+ _STATE_KEYS = Sampler._STATE_KEYS.union({'scale', 'current_likelihood_logd', 'lambd'})
18
9
 
19
- x0 : `np.ndarray`
20
- Initial point for the sampler
10
+ def __init__(self, target=None, scale=1.0, **kwargs):
21
11
 
22
- callback : callable, *Optional*
23
- If set this function will be called after every sample.
24
- The signature of the callback function is `callback(sample, sample_index)`,
25
- where `sample` is the current sample and `sample_index` is the index of the sample.
26
- An example is shown in demos/demo31_callback.py.
12
+ super().__init__(target, **kwargs)
13
+ self.initial_scale = scale
27
14
 
28
- Example
29
- -------
15
+ def _initialize(self):
16
+ self.scale = self.initial_scale
17
+ self.current_likelihood_logd = self._loglikelihood(self.current_point)
30
18
 
31
- This uses a custom logpdf and sample function.
19
+ # parameters used in the Robbins-Monro recursion for tuning the scale parameter
20
+ # see details and reference in the tune method
21
+ self.lambd = self.scale
22
+ self.star_acc = 0.44 #TODO: 0.234 # target acceptance rate
32
23
 
33
- .. code-block:: python
34
-
35
- # Parameters
36
- dim = 5 # Dimension of distribution
37
- mu = np.arange(dim) # Mean of Gaussian
38
- std = 1 # standard deviation of Gaussian
39
-
40
- # Logpdf function of likelihood
41
- logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
42
-
43
- # sample function of prior N(0,I)
44
- sample_func = lambda : 0 + 1*np.random.randn(dim,1)
45
-
46
- # Define as UserDefinedDistributions
47
- likelihood = cuqi.likelihood.UserDefinedLikelihood(dim=dim, logpdf_func=logpdf_func)
48
- prior = cuqi.distribution.UserDefinedDistribution(dim=dim, sample_func=sample_func)
49
-
50
- # Set up sampler
51
- sampler = cuqi.sampler.pCN((likelihood,prior), scale = 0.1)
52
-
53
- # Sample
54
- samples = sampler.sample(5000)
55
-
56
- Example
57
- -------
58
-
59
- This uses CUQIpy distributions.
60
-
61
- .. code-block:: python
24
+ def validate_target(self):
25
+ if not isinstance(self.target, cuqi.distribution.Posterior):
26
+ raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
27
+ if not isinstance(
28
+ self.prior,
29
+ (
30
+ cuqi.distribution.Gaussian,
31
+ cuqi.distribution.Normal,
32
+ cuqi.distribution.GMRF,
33
+ ),
34
+ ):
35
+ raise ValueError("The prior distribution of the target need to be Gaussian")
36
+
37
+ def step(self):
38
+ # propose state
39
+ xi = self.prior.sample(1).flatten() # sample from the prior
40
+ x_star = np.sqrt(1-self.scale**2)*self.current_point + self.scale*xi # PCN proposal
62
41
 
63
- # Parameters
64
- dim = 5 # Dimension of distribution
65
- mu = np.arange(dim) # Mean of Gaussian
66
- std = 1 # standard deviation of Gaussian
42
+ # evaluate target
43
+ loglike_eval_star = self._loglikelihood(x_star)
67
44
 
68
- # Define as UserDefinedDistributions
69
- model = cuqi.model.Model(lambda x: x, range_geometry=dim, domain_geometry=dim)
70
- likelihood = cuqi.distribution.Gaussian(mean=model, cov=np.ones(dim)).to_likelihood(mu)
71
- prior = cuqi.distribution.Gaussian(mean=np.zeros(dim), cov=1)
45
+ # ratio and acceptance probability
46
+ ratio = loglike_eval_star - self.current_likelihood_logd # proposal is symmetric
47
+ alpha = min(0, ratio)
72
48
 
73
- target = cuqi.distribution.Posterior(likelihood, prior)
49
+ # accept/reject
50
+ acc = 0
51
+ u_theta = np.log(np.random.rand())
52
+ if (u_theta <= alpha):
53
+ self.current_point = x_star
54
+ self.current_likelihood_logd = loglike_eval_star
55
+ acc = 1
74
56
 
75
- # Set up sampler
76
- sampler = cuqi.sampler.pCN(target, scale = 0.1)
57
+ return acc
77
58
 
78
- # Sample
79
- samples = sampler.sample(5000)
80
-
81
- """
82
- def __init__(self, target, scale=None, x0=None, **kwargs):
83
- super().__init__(target, x0=x0, dim=None, **kwargs)
84
- self.scale = scale
85
-
86
59
  @property
87
60
  def prior(self):
88
- if isinstance(self.target, cuqi.distribution.Posterior):
89
- return self.target.prior
90
- elif isinstance(self.target,tuple) and len(self.target)==2:
91
- return self.target[1]
61
+ return self.target.prior
92
62
 
93
63
  @property
94
64
  def likelihood(self):
95
- if isinstance(self.target, cuqi.distribution.Posterior):
96
- return self.target.likelihood
97
- elif isinstance(self.target,tuple) and len(self.target)==2:
98
- return self.target[0]
99
-
100
-
101
- @Sampler.target.setter
102
- def target(self, value):
103
- if isinstance(value, cuqi.distribution.Posterior):
104
- self._target = value
105
- self._loglikelihood = lambda x : self.likelihood.logd(x)
106
- elif isinstance(value,tuple) and len(value)==2 and \
107
- (isinstance(value[0], cuqi.likelihood.Likelihood) or isinstance(value[0], cuqi.likelihood.UserDefinedLikelihood)) and \
108
- isinstance(value[1], cuqi.distribution.Distribution):
109
- self._target = value
110
- self._loglikelihood = lambda x : self.likelihood.logd(x)
111
- else:
112
- raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
113
-
114
- #TODO:
115
- #if not isinstance(self.prior,(cuqi.distribution.Gaussian, cuqi.distribution.Normal)):
116
- # raise ValueError("The prior distribution of the target need to be Gaussian")
65
+ return self.target.likelihood
66
+
67
+ def _loglikelihood(self, x):
68
+ return self.likelihood.logd(x)
117
69
 
118
70
  @property
119
- def dim(self):
71
+ def dim(self): # TODO. Check if we need this. Implemented in base class
120
72
  if hasattr(self,'target') and hasattr(self.target,'dim'):
121
73
  self._dim = self.target.dim
122
74
  elif hasattr(self,'target') and isinstance(self.target,tuple) and len(self.target)==2:
123
75
  self._dim = self.target[0].dim
124
76
  return self._dim
125
77
 
126
- def _sample(self, N, Nb):
127
- if self.scale is None:
128
- raise ValueError("Scale must be set to sample without adaptation. Consider using sample_adapt instead.")
129
-
130
- Ns = N+Nb # number of simulations
131
-
132
- # allocation
133
- samples = np.empty((self.dim, Ns))
134
- loglike_eval = np.empty(Ns)
135
- acc = np.zeros(Ns, dtype=int)
136
-
137
- # initial state
138
- samples[:, 0] = self.x0
139
- loglike_eval[0] = self._loglikelihood(self.x0)
140
- acc[0] = 1
141
-
142
- # run MCMC
143
- for s in range(Ns-1):
144
- # run component by component
145
- samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
146
-
147
- self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
148
- self._call_callback(samples[:, s+1], s+1)
149
-
150
- # remove burn-in
151
- samples = samples[:, Nb:]
152
- loglike_eval = loglike_eval[Nb:]
153
- accave = acc[Nb:].mean()
154
- print('\nAverage acceptance rate:', accave, '\n')
155
- #
156
- return samples, loglike_eval, accave
157
-
158
- def _sample_adapt(self, N, Nb):
159
- # Set intial scale if not set
160
- if self.scale is None:
161
- self.scale = 0.1
162
-
163
- Ns = N+Nb # number of simulations
164
-
165
- # allocation
166
- samples = np.empty((self.dim, Ns))
167
- loglike_eval = np.empty(Ns)
168
- acc = np.zeros(Ns)
169
-
170
- # initial state
171
- samples[:, 0] = self.x0
172
- loglike_eval[0] = self._loglikelihood(self.x0)
173
- acc[0] = 1
174
-
175
- # initial adaptation params
176
- Na = int(0.1*N) # iterations to adapt
177
- hat_acc = np.empty(int(np.floor(Ns/Na))) # average acceptance rate of the chains
178
- lambd = self.scale
179
- star_acc = 0.44 # target acceptance rate RW
180
- i, idx = 0, 0
181
-
182
- # run MCMC
183
- for s in range(Ns-1):
184
- # run component by component
185
- samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
186
-
187
- # adapt prop spread using acc of past samples
188
- if ((s+1) % Na == 0):
189
- # evaluate average acceptance rate
190
- hat_acc[i] = np.mean(acc[idx:idx+Na])
191
-
192
- # d. compute new scaling parameter
193
- zeta = 1/np.sqrt(i+1) # ensures that the variation of lambda(i) vanishes
194
- lambd = np.exp(np.log(lambd) + zeta*(hat_acc[i]-star_acc))
195
-
196
- # update parameters
197
- self.scale = min(lambd, 1)
198
-
199
- # update counters
200
- i += 1
201
- idx += Na
202
-
203
- # display iterations
204
- if ((s+1) % (max(Ns//100,1))) == 0 or (s+1) == Ns-1:
205
- print("\r",'Sample', s+1, '/', Ns, end="")
206
-
207
- self._call_callback(samples[:, s+1], s+1)
208
-
209
- print("\r",'Sample', s+2, '/', Ns)
210
-
211
- # remove burn-in
212
- samples = samples[:, Nb:]
213
- loglike_eval = loglike_eval[Nb:]
214
- accave = acc[Nb:].mean()
215
- print('\nAverage acceptance rate:', accave, 'MCMC scale:', self.scale, '\n')
216
-
217
- return samples, loglike_eval, accave
218
-
219
- def single_update(self, x_t, loglike_eval_t):
220
- # propose state
221
- xi = self.prior.sample(1).flatten() # sample from the prior
222
- x_star = np.sqrt(1-self.scale**2)*x_t + self.scale*xi # pCN proposal
78
+ def tune(self, skip_len, update_count):
79
+ """
80
+ Tune the scale parameter of the PCN sampler.
81
+ The tuning is based on algorithm 4 in Andrieu, Christophe, and Johannes Thoms.
82
+ "A tutorial on adaptive MCMC." Statistics and computing 18 (2008): 343-373.
83
+ Note: the tuning algorithm here is the same as the one used in MH sampler.
84
+ """
223
85
 
224
- # evaluate target
225
- loglike_eval_star = self._loglikelihood(x_star)
86
+ # average acceptance rate in the past skip_len iterations
87
+ hat_acc = np.mean(self._acc[-skip_len:])
226
88
 
227
- # ratio and acceptance probability
228
- ratio = loglike_eval_star - loglike_eval_t # proposal is symmetric
229
- alpha = min(0, ratio)
89
+ # new scaling parameter zeta to be used in the Robbins-Monro recursion
90
+ zeta = 1/np.sqrt(update_count+1)
230
91
 
231
- # accept/reject
232
- u_theta = np.log(np.random.rand())
233
- if (u_theta <= alpha):
234
- x_next = x_star
235
- loglike_eval_next = loglike_eval_star
236
- acc = 1
237
- else:
238
- x_next = x_t
239
- loglike_eval_next = loglike_eval_t
240
- acc = 0
241
-
242
- return x_next, loglike_eval_next, acc
243
-
244
-
92
+ # Robbins-Monro recursion to ensure that the variation of lambd vanishes
93
+ self.lambd = np.exp(np.log(self.lambd) + zeta*(hat_acc-self.star_acc))
94
+
95
+ # update scale parameter
96
+ self.scale = min(self.lambd, 1)