CUQIpy 1.4.0.post0.dev13__py3-none-any.whl → 1.4.0.post0.dev41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (48) hide show
  1. cuqi/__init__.py +1 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/experimental/__init__.py +1 -2
  4. cuqi/experimental/_recommender.py +4 -4
  5. cuqi/legacy/__init__.py +2 -0
  6. cuqi/legacy/sampler/__init__.py +11 -0
  7. cuqi/legacy/sampler/_conjugate.py +55 -0
  8. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  9. cuqi/legacy/sampler/_cwmh.py +196 -0
  10. cuqi/legacy/sampler/_gibbs.py +231 -0
  11. cuqi/legacy/sampler/_hmc.py +335 -0
  12. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  13. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  14. cuqi/legacy/sampler/_mh.py +190 -0
  15. cuqi/legacy/sampler/_pcn.py +244 -0
  16. cuqi/legacy/sampler/_rto.py +284 -0
  17. cuqi/legacy/sampler/_sampler.py +182 -0
  18. cuqi/problem/_problem.py +87 -80
  19. cuqi/sampler/__init__.py +120 -8
  20. cuqi/sampler/_conjugate.py +376 -35
  21. cuqi/sampler/_conjugate_approx.py +40 -16
  22. cuqi/sampler/_cwmh.py +132 -138
  23. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  24. cuqi/sampler/_gibbs.py +269 -130
  25. cuqi/sampler/_hmc.py +328 -201
  26. cuqi/sampler/_langevin_algorithm.py +282 -98
  27. cuqi/sampler/_laplace_approximation.py +87 -117
  28. cuqi/sampler/_mh.py +47 -157
  29. cuqi/sampler/_pcn.py +56 -211
  30. cuqi/sampler/_rto.py +206 -140
  31. cuqi/sampler/_sampler.py +540 -135
  32. {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/METADATA +1 -1
  33. {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/RECORD +36 -35
  34. cuqi/experimental/mcmc/__init__.py +0 -122
  35. cuqi/experimental/mcmc/_conjugate.py +0 -396
  36. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  37. cuqi/experimental/mcmc/_cwmh.py +0 -190
  38. cuqi/experimental/mcmc/_gibbs.py +0 -366
  39. cuqi/experimental/mcmc/_hmc.py +0 -462
  40. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  41. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  42. cuqi/experimental/mcmc/_mh.py +0 -80
  43. cuqi/experimental/mcmc/_pcn.py +0 -89
  44. cuqi/experimental/mcmc/_rto.py +0 -350
  45. cuqi/experimental/mcmc/_sampler.py +0 -582
  46. {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/WHEEL +0 -0
  47. {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/licenses/LICENSE +0 -0
  48. {cuqipy-1.4.0.post0.dev13.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/top_level.txt +0 -0
cuqi/sampler/_pcn.py CHANGED
@@ -1,244 +1,89 @@
1
1
  import numpy as np
2
2
  import cuqi
3
3
  from cuqi.sampler import Sampler
4
+ from cuqi.array import CUQIarray
4
5
 
5
- class pCN(Sampler):
6
- #Samples target*proposal
7
- #TODO. Check proposal, needs to be Gaussian and zero mean.
8
- """Preconditioned Crank-Nicolson sampler
9
-
10
- Parameters
11
- ----------
12
- target : `cuqi.distribution.Posterior` or tuple of likelihood and prior objects
13
- If target is of type cuqi.distribution.Posterior, it represents the posterior distribution.
14
- If target is a tuple of (cuqi.likelihood.Likelihood, cuqi.distribution.Distribution) objects,
15
- the first element is considered the likelihood and the second is considered the prior.
6
+ class PCN(Sampler): # Refactor to Proposal-based sampler?
16
7
 
17
- scale : int
8
+ _STATE_KEYS = Sampler._STATE_KEYS.union({'scale', 'current_likelihood_logd', 'lambd'})
18
9
 
19
- x0 : `np.ndarray`
20
- Initial point for the sampler
10
+ def __init__(self, target=None, scale=1.0, **kwargs):
21
11
 
22
- callback : callable, *Optional*
23
- If set this function will be called after every sample.
24
- The signature of the callback function is `callback(sample, sample_index)`,
25
- where `sample` is the current sample and `sample_index` is the index of the sample.
26
- An example is shown in demos/demo31_callback.py.
12
+ super().__init__(target, **kwargs)
13
+ self.initial_scale = scale
27
14
 
28
- Example
29
- -------
15
+ def _initialize(self):
16
+ self.scale = self.initial_scale
17
+ self.current_likelihood_logd = self._loglikelihood(self.current_point)
30
18
 
31
- This uses a custom logpdf and sample function.
19
+ # parameters used in the Robbins-Monro recursion for tuning the scale parameter
20
+ # see details and reference in the tune method
21
+ self.lambd = self.scale
22
+ self.star_acc = 0.44 #TODO: 0.234 # target acceptance rate
32
23
 
33
- .. code-block:: python
34
-
35
- # Parameters
36
- dim = 5 # Dimension of distribution
37
- mu = np.arange(dim) # Mean of Gaussian
38
- std = 1 # standard deviation of Gaussian
39
-
40
- # Logpdf function of likelihood
41
- logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
42
-
43
- # sample function of prior N(0,I)
44
- sample_func = lambda : 0 + 1*np.random.randn(dim,1)
45
-
46
- # Define as UserDefinedDistributions
47
- likelihood = cuqi.likelihood.UserDefinedLikelihood(dim=dim, logpdf_func=logpdf_func)
48
- prior = cuqi.distribution.UserDefinedDistribution(dim=dim, sample_func=sample_func)
49
-
50
- # Set up sampler
51
- sampler = cuqi.sampler.pCN((likelihood,prior), scale = 0.1)
52
-
53
- # Sample
54
- samples = sampler.sample(5000)
55
-
56
- Example
57
- -------
58
-
59
- This uses CUQIpy distributions.
60
-
61
- .. code-block:: python
62
-
63
- # Parameters
64
- dim = 5 # Dimension of distribution
65
- mu = np.arange(dim) # Mean of Gaussian
66
- std = 1 # standard deviation of Gaussian
24
+ def validate_target(self):
25
+ if not isinstance(self.target, cuqi.distribution.Posterior):
26
+ raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
27
+ if not isinstance(self.prior, (cuqi.distribution.Gaussian, cuqi.distribution.Normal)):
28
+ raise ValueError("The prior distribution of the target need to be Gaussian")
67
29
 
68
- # Define as UserDefinedDistributions
69
- model = cuqi.model.Model(lambda x: x, range_geometry=dim, domain_geometry=dim)
70
- likelihood = cuqi.distribution.Gaussian(mean=model, cov=np.ones(dim)).to_likelihood(mu)
71
- prior = cuqi.distribution.Gaussian(mean=np.zeros(dim), cov=1)
30
+ def step(self):
31
+ # propose state
32
+ xi = self.prior.sample(1).flatten() # sample from the prior
33
+ x_star = np.sqrt(1-self.scale**2)*self.current_point + self.scale*xi # PCN proposal
72
34
 
73
- target = cuqi.distribution.Posterior(likelihood, prior)
35
+ # evaluate target
36
+ loglike_eval_star = self._loglikelihood(x_star)
74
37
 
75
- # Set up sampler
76
- sampler = cuqi.sampler.pCN(target, scale = 0.1)
38
+ # ratio and acceptance probability
39
+ ratio = loglike_eval_star - self.current_likelihood_logd # proposal is symmetric
40
+ alpha = min(0, ratio)
77
41
 
78
- # Sample
79
- samples = sampler.sample(5000)
42
+ # accept/reject
43
+ acc = 0
44
+ u_theta = np.log(np.random.rand())
45
+ if (u_theta <= alpha):
46
+ self.current_point = x_star
47
+ self.current_likelihood_logd = loglike_eval_star
48
+ acc = 1
80
49
 
81
- """
82
- def __init__(self, target, scale=None, x0=None, **kwargs):
83
- super().__init__(target, x0=x0, dim=None, **kwargs)
84
- self.scale = scale
85
-
50
+ return acc
51
+
86
52
  @property
87
53
  def prior(self):
88
- if isinstance(self.target, cuqi.distribution.Posterior):
89
- return self.target.prior
90
- elif isinstance(self.target,tuple) and len(self.target)==2:
91
- return self.target[1]
54
+ return self.target.prior
92
55
 
93
56
  @property
94
57
  def likelihood(self):
95
- if isinstance(self.target, cuqi.distribution.Posterior):
96
- return self.target.likelihood
97
- elif isinstance(self.target,tuple) and len(self.target)==2:
98
- return self.target[0]
99
-
100
-
101
- @Sampler.target.setter
102
- def target(self, value):
103
- if isinstance(value, cuqi.distribution.Posterior):
104
- self._target = value
105
- self._loglikelihood = lambda x : self.likelihood.logd(x)
106
- elif isinstance(value,tuple) and len(value)==2 and \
107
- (isinstance(value[0], cuqi.likelihood.Likelihood) or isinstance(value[0], cuqi.likelihood.UserDefinedLikelihood)) and \
108
- isinstance(value[1], cuqi.distribution.Distribution):
109
- self._target = value
110
- self._loglikelihood = lambda x : self.likelihood.logd(x)
111
- else:
112
- raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
58
+ return self.target.likelihood
113
59
 
114
- #TODO:
115
- #if not isinstance(self.prior,(cuqi.distribution.Gaussian, cuqi.distribution.Normal)):
116
- # raise ValueError("The prior distribution of the target need to be Gaussian")
60
+ def _loglikelihood(self, x):
61
+ return self.likelihood.logd(x)
117
62
 
118
63
  @property
119
- def dim(self):
64
+ def dim(self): # TODO. Check if we need this. Implemented in base class
120
65
  if hasattr(self,'target') and hasattr(self.target,'dim'):
121
66
  self._dim = self.target.dim
122
67
  elif hasattr(self,'target') and isinstance(self.target,tuple) and len(self.target)==2:
123
68
  self._dim = self.target[0].dim
124
69
  return self._dim
125
70
 
126
- def _sample(self, N, Nb):
127
- if self.scale is None:
128
- raise ValueError("Scale must be set to sample without adaptation. Consider using sample_adapt instead.")
129
-
130
- Ns = N+Nb # number of simulations
131
-
132
- # allocation
133
- samples = np.empty((self.dim, Ns))
134
- loglike_eval = np.empty(Ns)
135
- acc = np.zeros(Ns, dtype=int)
136
-
137
- # initial state
138
- samples[:, 0] = self.x0
139
- loglike_eval[0] = self._loglikelihood(self.x0)
140
- acc[0] = 1
141
-
142
- # run MCMC
143
- for s in range(Ns-1):
144
- # run component by component
145
- samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
146
-
147
- self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
148
- self._call_callback(samples[:, s+1], s+1)
149
-
150
- # remove burn-in
151
- samples = samples[:, Nb:]
152
- loglike_eval = loglike_eval[Nb:]
153
- accave = acc[Nb:].mean()
154
- print('\nAverage acceptance rate:', accave, '\n')
155
- #
156
- return samples, loglike_eval, accave
157
-
158
- def _sample_adapt(self, N, Nb):
159
- # Set intial scale if not set
160
- if self.scale is None:
161
- self.scale = 0.1
162
-
163
- Ns = N+Nb # number of simulations
164
-
165
- # allocation
166
- samples = np.empty((self.dim, Ns))
167
- loglike_eval = np.empty(Ns)
168
- acc = np.zeros(Ns)
169
-
170
- # initial state
171
- samples[:, 0] = self.x0
172
- loglike_eval[0] = self._loglikelihood(self.x0)
173
- acc[0] = 1
174
-
175
- # initial adaptation params
176
- Na = int(0.1*N) # iterations to adapt
177
- hat_acc = np.empty(int(np.floor(Ns/Na))) # average acceptance rate of the chains
178
- lambd = self.scale
179
- star_acc = 0.44 # target acceptance rate RW
180
- i, idx = 0, 0
181
-
182
- # run MCMC
183
- for s in range(Ns-1):
184
- # run component by component
185
- samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
186
-
187
- # adapt prop spread using acc of past samples
188
- if ((s+1) % Na == 0):
189
- # evaluate average acceptance rate
190
- hat_acc[i] = np.mean(acc[idx:idx+Na])
191
-
192
- # d. compute new scaling parameter
193
- zeta = 1/np.sqrt(i+1) # ensures that the variation of lambda(i) vanishes
194
- lambd = np.exp(np.log(lambd) + zeta*(hat_acc[i]-star_acc))
195
-
196
- # update parameters
197
- self.scale = min(lambd, 1)
198
-
199
- # update counters
200
- i += 1
201
- idx += Na
202
-
203
- # display iterations
204
- if ((s+1) % (max(Ns//100,1))) == 0 or (s+1) == Ns-1:
205
- print("\r",'Sample', s+1, '/', Ns, end="")
206
-
207
- self._call_callback(samples[:, s+1], s+1)
208
-
209
- print("\r",'Sample', s+2, '/', Ns)
210
-
211
- # remove burn-in
212
- samples = samples[:, Nb:]
213
- loglike_eval = loglike_eval[Nb:]
214
- accave = acc[Nb:].mean()
215
- print('\nAverage acceptance rate:', accave, 'MCMC scale:', self.scale, '\n')
216
-
217
- return samples, loglike_eval, accave
71
+ def tune(self, skip_len, update_count):
72
+ """
73
+ Tune the scale parameter of the PCN sampler.
74
+ The tuning is based on algorithm 4 in Andrieu, Christophe, and Johannes Thoms.
75
+ "A tutorial on adaptive MCMC." Statistics and computing 18 (2008): 343-373.
76
+ Note: the tuning algorithm here is the same as the one used in MH sampler.
77
+ """
218
78
 
219
- def single_update(self, x_t, loglike_eval_t):
220
- # propose state
221
- xi = self.prior.sample(1).flatten() # sample from the prior
222
- x_star = np.sqrt(1-self.scale**2)*x_t + self.scale*xi # pCN proposal
79
+ # average acceptance rate in the past skip_len iterations
80
+ hat_acc = np.mean(self._acc[-skip_len:])
223
81
 
224
- # evaluate target
225
- loglike_eval_star = self._loglikelihood(x_star)
82
+ # new scaling parameter zeta to be used in the Robbins-Monro recursion
83
+ zeta = 1/np.sqrt(update_count+1)
226
84
 
227
- # ratio and acceptance probability
228
- ratio = loglike_eval_star - loglike_eval_t # proposal is symmetric
229
- alpha = min(0, ratio)
85
+ # Robbins-Monro recursion to ensure that the variation of lambd vanishes
86
+ self.lambd = np.exp(np.log(self.lambd) + zeta*(hat_acc-self.star_acc))
230
87
 
231
- # accept/reject
232
- u_theta = np.log(np.random.rand())
233
- if (u_theta <= alpha):
234
- x_next = x_star
235
- loglike_eval_next = loglike_eval_star
236
- acc = 1
237
- else:
238
- x_next = x_t
239
- loglike_eval_next = loglike_eval_t
240
- acc = 0
241
-
242
- return x_next, loglike_eval_next, acc
243
-
244
-
88
+ # update scale parameter
89
+ self.scale = min(self.lambd, 1)