CUQIpy 1.3.0.post0.dev401__py3-none-any.whl → 1.4.0.post0.dev41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (50) hide show
  1. cuqi/__init__.py +1 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/density/_density.py +9 -1
  4. cuqi/distribution/_joint_distribution.py +96 -11
  5. cuqi/experimental/__init__.py +1 -2
  6. cuqi/experimental/_recommender.py +4 -4
  7. cuqi/legacy/__init__.py +2 -0
  8. cuqi/legacy/sampler/__init__.py +11 -0
  9. cuqi/legacy/sampler/_conjugate.py +55 -0
  10. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  11. cuqi/legacy/sampler/_cwmh.py +196 -0
  12. cuqi/legacy/sampler/_gibbs.py +231 -0
  13. cuqi/legacy/sampler/_hmc.py +335 -0
  14. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  15. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  16. cuqi/legacy/sampler/_mh.py +190 -0
  17. cuqi/legacy/sampler/_pcn.py +244 -0
  18. cuqi/legacy/sampler/_rto.py +284 -0
  19. cuqi/legacy/sampler/_sampler.py +182 -0
  20. cuqi/problem/_problem.py +87 -80
  21. cuqi/sampler/__init__.py +120 -8
  22. cuqi/sampler/_conjugate.py +376 -35
  23. cuqi/sampler/_conjugate_approx.py +40 -16
  24. cuqi/sampler/_cwmh.py +132 -138
  25. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  26. cuqi/sampler/_gibbs.py +269 -130
  27. cuqi/sampler/_hmc.py +328 -201
  28. cuqi/sampler/_langevin_algorithm.py +282 -98
  29. cuqi/sampler/_laplace_approximation.py +87 -117
  30. cuqi/sampler/_mh.py +47 -157
  31. cuqi/sampler/_pcn.py +56 -211
  32. cuqi/sampler/_rto.py +206 -140
  33. cuqi/sampler/_sampler.py +540 -135
  34. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/METADATA +1 -1
  35. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/RECORD +38 -37
  36. cuqi/experimental/mcmc/__init__.py +0 -122
  37. cuqi/experimental/mcmc/_conjugate.py +0 -396
  38. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  39. cuqi/experimental/mcmc/_cwmh.py +0 -190
  40. cuqi/experimental/mcmc/_gibbs.py +0 -366
  41. cuqi/experimental/mcmc/_hmc.py +0 -462
  42. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  43. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  44. cuqi/experimental/mcmc/_mh.py +0 -80
  45. cuqi/experimental/mcmc/_pcn.py +0 -89
  46. cuqi/experimental/mcmc/_rto.py +0 -350
  47. cuqi/experimental/mcmc/_sampler.py +0 -582
  48. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/WHEEL +0 -0
  49. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/licenses/LICENSE +0 -0
  50. {cuqipy-1.3.0.post0.dev401.dist-info → cuqipy-1.4.0.post0.dev41.dist-info}/top_level.txt +0 -0
cuqi/__init__.py CHANGED
@@ -11,6 +11,7 @@ from . import operator
11
11
  from . import pde
12
12
  from . import problem
13
13
  from . import sampler
14
+ from . import legacy
14
15
  from . import array
15
16
  from . import samples
16
17
  from . import solver
cuqi/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-09-21T15:00:42+0200",
11
+ "date": "2025-10-09T13:25:50+0200",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "f08eb2ae4f6f5209d4a06a4e031bc61bb746ce01",
15
- "version": "1.3.0.post0.dev401"
14
+ "full-revisionid": "92bb2e16f3828d8074c008d4ed6a08edcae0889d",
15
+ "version": "1.4.0.post0.dev41"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
cuqi/density/_density.py CHANGED
@@ -143,7 +143,15 @@ class Density(ABC):
143
143
  def enable_FD(self, epsilon=1e-8):
144
144
  """ Enable finite difference approximation for logd gradient. Note
145
145
  that if enabled, the FD approximation will be used even if the
146
- _gradient method is implemented. """
146
+ _gradient method is implemented.
147
+
148
+ Parameters
149
+ ----------
150
+ epsilon : float
151
+
152
+ Spacing (step size) to use for finite difference approximation for logd
153
+ gradient for each variable. Default is 1e-8.
154
+ """
147
155
  self._FD_enabled = True
148
156
  self._FD_epsilon = epsilon
149
157
 
@@ -84,6 +84,8 @@ class JointDistribution:
84
84
  cond_vars = self._get_conditioning_variables()
85
85
  if len(cond_vars) > 0:
86
86
  raise ValueError(f"Every density parameter must have a distribution (prior). Missing prior for {cond_vars}.")
87
+ # Initialize finite difference gradient approximation settings
88
+ self.disable_FD()
87
89
 
88
90
  # --------- Public properties ---------
89
91
  @property
@@ -96,6 +98,38 @@ class JointDistribution:
96
98
  """ Returns the geometries of the joint distribution. """
97
99
  return [dist.geometry for dist in self._distributions]
98
100
 
101
+ @property
102
+ def FD_enabled(self):
103
+ """ Returns a dictionary of keys and booleans indicating for each
104
+ parameter name (key) if finite difference approximation of the logd
105
+ gradient is enabled. """
106
+ par_names = self.get_parameter_names()
107
+ FD_enabled = {
108
+ par_name: self.FD_epsilon[par_name] is not None for par_name in par_names
109
+ }
110
+ return FD_enabled
111
+
112
+ @property
113
+ def FD_epsilon(self):
114
+ """ Returns a dictionary indicating for each parameter name the
115
+ spacing for the finite difference approximation of the logd gradient."""
116
+ return self._FD_epsilon
117
+
118
+ @FD_epsilon.setter
119
+ def FD_epsilon(self, value):
120
+ """ Set the spacing for the finite difference approximation of the
121
+ logd gradient as a dictionary. The keys are the parameter names.
122
+ The value for each key is either None (no FD approximation) or a float
123
+ representing the FD step size.
124
+ """
125
+ par_names = self.get_parameter_names()
126
+ if value is None:
127
+ self._FD_epsilon = {par_name: None for par_name in par_names}
128
+ else:
129
+ if set(value.keys()) != set(par_names):
130
+ raise ValueError("Keys of FD_epsilon must match the parameter names of the distribution "+f" {par_names}")
131
+ self._FD_epsilon = value
132
+
99
133
  # --------- Public methods ---------
100
134
  def logd(self, *args, **kwargs):
101
135
  """ Evaluate the un-normalized log density function. """
@@ -136,6 +170,33 @@ class JointDistribution:
136
170
  # Can reduce to Posterior, Likelihood or Distribution.
137
171
  return new_joint._reduce_to_single_density()
138
172
 
173
+ def enable_FD(self, epsilon=None):
174
+ """ Enable finite difference approximation for logd gradient. Note
175
+ that if enabled, the FD approximation will be used even if the
176
+ _gradient method is implemented. By default, all parameters
177
+ will have FD enabled with a step size of 1e-8.
178
+
179
+ Parameters
180
+ ----------
181
+ epsilon : dict, *optional*
182
+
183
+ Dictionary indicating the spacing (step size) to use for finite
184
+ difference approximation for logd gradient for each variable.
185
+
186
+ Keys are variable names.
187
+ Values are either a float to enable FD with the given value as the FD
188
+ step size, or None to disable FD for that variable. Default is 1e-8 for
189
+ all variables.
190
+ """
191
+ if epsilon is None:
192
+ epsilon = {par_name: 1e-8 for par_name in self.get_parameter_names()}
193
+ self.FD_epsilon = epsilon
194
+
195
+ def disable_FD(self):
196
+ """ Disable finite difference approximation for logd gradient. """
197
+ par_names = self.get_parameter_names()
198
+ self.FD_epsilon = {par_name: None for par_name in par_names}
199
+
139
200
  def get_parameter_names(self) -> List[str]:
140
201
  """ Returns the parameter names of the joint distribution. """
141
202
  return [dist.name for dist in self._distributions]
@@ -202,34 +263,58 @@ class JointDistribution:
202
263
  # Count number of distributions and likelihoods
203
264
  n_dist = len(self._distributions)
204
265
  n_likelihood = len(self._likelihoods)
266
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name] for par_name in self.get_parameter_names()}
267
+ self.enable_FD(epsilon=reduced_FD_epsilon)
205
268
 
206
269
  # Cant reduce if there are multiple distributions or likelihoods
207
270
  if n_dist > 1:
208
271
  return self
209
272
 
273
+ # If only evaluated densities left return joint to ensure logd method is available
274
+ if n_dist == 0 and n_likelihood == 0:
275
+ return self
276
+
277
+ # Extract the parameter name of the distribution
278
+ if n_dist == 1:
279
+ par_name = self._distributions[0].name
280
+ elif n_likelihood == 1:
281
+ par_name = self._likelihoods[0].name
282
+ else:
283
+ par_name = None
284
+
210
285
  # If exactly one distribution and multiple likelihoods reduce
211
286
  if n_dist == 1 and n_likelihood > 1:
212
- return MultipleLikelihoodPosterior(*self._densities)
213
-
287
+ reduced_distribution = MultipleLikelihoodPosterior(*self._densities)
288
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name]}
289
+
214
290
  # If exactly one distribution and one likelihood its a Posterior
215
291
  if n_dist == 1 and n_likelihood == 1:
216
292
  # Ensure parameter names match, otherwise return the joint distribution
217
293
  if set(self._likelihoods[0].get_parameter_names()) != set(self._distributions[0].get_parameter_names()):
218
294
  return self
219
- return self._add_constants_to_density(Posterior(self._likelihoods[0], self._distributions[0]))
295
+ reduced_distribution = Posterior(self._likelihoods[0], self._distributions[0])
296
+ reduced_distribution = self._add_constants_to_density(reduced_distribution)
297
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
220
298
 
221
299
  # If exactly one distribution and no likelihoods its a Distribution
222
300
  if n_dist == 1 and n_likelihood == 0:
223
- return self._add_constants_to_density(self._distributions[0])
224
-
301
+ # Intentionally skip enabling FD here. If the user wants FD, they
302
+ # can enable it for this particular distribution before forming
303
+ # the joint distribution.
304
+ return self._add_constants_to_density(self._distributions[0])
305
+
225
306
  # If no distributions and exactly one likelihood its a Likelihood
226
307
  if n_likelihood == 1 and n_dist == 0:
227
- return self._likelihoods[0]
308
+ # This case seems to not happen in practice, but we include it for
309
+ # completeness.
310
+ reduced_distribution = self._likelihoods[0]
311
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
312
+
313
+ if self.FD_enabled[par_name]:
314
+ reduced_distribution.enable_FD(epsilon=reduced_FD_epsilon)
315
+
316
+ return reduced_distribution
228
317
 
229
- # If only evaluated densities left return joint to ensure logd method is available
230
- if n_dist == 0 and n_likelihood == 0:
231
- return self
232
-
233
318
  def _add_constants_to_density(self, density: Density):
234
319
  """ Add the constants (evaluated densities) to a single density. Used when reducing to single density. """
235
320
 
@@ -274,7 +359,7 @@ class JointDistribution:
274
359
  if len(cond_vars) > 0:
275
360
  msg += f"|{cond_vars}"
276
361
  msg += ")"
277
-
362
+
278
363
  msg += "\n"
279
364
  msg += " Densities: \n"
280
365
 
@@ -1,5 +1,4 @@
1
1
  """ Experimental module for testing new features and ideas. """
2
- from . import mcmc
3
2
  from . import algebra
4
3
  from . import geometry
5
- from ._recommender import SamplerRecommender
4
+ from ._recommender import SamplerRecommender
@@ -3,7 +3,7 @@ import inspect
3
3
  import numpy as np
4
4
 
5
5
  # This import makes suggest_sampler easier to read
6
- import cuqi.experimental.mcmc as samplers
6
+ import cuqi.sampler as samplers
7
7
 
8
8
 
9
9
  class SamplerRecommender(object):
@@ -15,7 +15,7 @@ class SamplerRecommender(object):
15
15
  target: Density or JointDistribution
16
16
  Distribution to get sampler recommendations for.
17
17
 
18
- exceptions: list[cuqi.experimental.mcmc.Sampler], *optional*
18
+ exceptions: list[cuqi.sampler.Sampler], *optional*
19
19
  Samplers not to be recommended.
20
20
 
21
21
  Example
@@ -104,7 +104,7 @@ class SamplerRecommender(object):
104
104
 
105
105
  """
106
106
 
107
- all_samplers = [(name, cls) for name, cls in inspect.getmembers(cuqi.experimental.mcmc, inspect.isclass) if issubclass(cls, cuqi.experimental.mcmc.Sampler)]
107
+ all_samplers = [(name, cls) for name, cls in inspect.getmembers(cuqi.sampler, inspect.isclass) if issubclass(cls, cuqi.sampler.Sampler)]
108
108
  valid_samplers = []
109
109
 
110
110
  for name, sampler in all_samplers:
@@ -116,7 +116,7 @@ class SamplerRecommender(object):
116
116
 
117
117
  # Need a separate case for HybridGibbs
118
118
  if self.valid_HybridGibbs_sampling_strategy() is not None:
119
- valid_samplers += [cuqi.experimental.mcmc.HybridGibbs.__name__ if as_string else cuqi.experimental.mcmc.HybridGibbs]
119
+ valid_samplers += [cuqi.sampler.HybridGibbs.__name__ if as_string else cuqi.sampler.HybridGibbs]
120
120
 
121
121
  return valid_samplers
122
122
 
@@ -0,0 +1,2 @@
1
+ """ Legacy module for functionalities that are no longer supported or developed. """
2
+ from . import sampler
@@ -0,0 +1,11 @@
1
+ from ._sampler import Sampler, ProposalBasedSampler
2
+ from ._conjugate import Conjugate
3
+ from ._conjugate_approx import ConjugateApprox
4
+ from ._cwmh import CWMH
5
+ from ._gibbs import Gibbs
6
+ from ._hmc import NUTS
7
+ from ._langevin_algorithm import ULA, MALA
8
+ from ._laplace_approximation import UGLA
9
+ from ._mh import MH
10
+ from ._pcn import pCN
11
+ from ._rto import LinearRTO, RegularizedLinearRTO
@@ -0,0 +1,55 @@
1
+ from cuqi.distribution import Posterior, Gaussian, Gamma, GMRF
2
+ from cuqi.implicitprior import RegularizedGaussian, RegularizedGMRF
3
+ import numpy as np
4
+
5
+ class Conjugate: # TODO: Subclass from Sampler once updated
6
+ """ Conjugate sampler
7
+
8
+ Sampler for sampling a posterior distribution where the likelihood and prior are conjugate.
9
+
10
+ Currently supported conjugate pairs are:
11
+ - (Gaussian, Gamma)
12
+ - (GMRF, Gamma)
13
+ - (RegularizedGaussian, Gamma) with nonnegativity constraints only
14
+
15
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
16
+
17
+ For implicit regularized Gaussians see:
18
+
19
+ [1] Everink, Jasper M., Yiqiu Dong, and Martin S. Andersen. "Bayesian inference with projected densities." SIAM/ASA Journal on Uncertainty Quantification 11.3 (2023): 1025-1043.
20
+
21
+ """
22
+
23
+ def __init__(self, target: Posterior):
24
+ if not isinstance(target.likelihood.distribution, (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
25
+ raise ValueError("Conjugate sampler only works with a Gaussian-type likelihood function")
26
+ if not isinstance(target.prior, Gamma):
27
+ raise ValueError("Conjugate sampler only works with Gamma prior")
28
+ if not target.prior.dim == 1:
29
+ raise ValueError("Conjugate sampler only works with univariate Gamma prior")
30
+
31
+ if isinstance(target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)) and (target.likelihood.distribution.preset["constraint"] not in ["nonnegativity"] or target.likelihood.distribution.preset["regularization"] is not None) :
32
+ raise ValueError("Conjugate sampler only works implicit regularized Gaussian likelihood with nonnegativity constraints")
33
+
34
+ self.target = target
35
+
36
+ def step(self, x=None):
37
+ # Extract variables
38
+ b = self.target.likelihood.data #mu
39
+ m = self._calc_m_for_Gaussians(b) #n
40
+ Ax = self.target.likelihood.distribution.mean #x_i
41
+ L = self.target.likelihood.distribution(np.array([1])).sqrtprec #L
42
+ alpha = self.target.prior.shape #alpha
43
+ beta = self.target.prior.rate #beta
44
+
45
+ # Create Gamma distribution and sample
46
+ dist = Gamma(shape=m/2+alpha,rate=.5*np.linalg.norm(L@(Ax-b))**2+beta)
47
+
48
+ return dist.sample()
49
+
50
+ def _calc_m_for_Gaussians(self, b):
51
+ """ Helper method to calculate m parameter for Gaussian-Gamma conjugate pair. """
52
+ if isinstance(self.target.likelihood.distribution, (Gaussian, GMRF)):
53
+ return len(b)
54
+ elif isinstance(self.target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)):
55
+ return np.count_nonzero(b) # See
@@ -0,0 +1,52 @@
1
+ from cuqi.distribution import Posterior, LMRF, Gamma
2
+ import numpy as np
3
+ import scipy as sp
4
+
5
+ class ConjugateApprox: # TODO: Subclass from Sampler once updated
6
+ """ Approximate Conjugate sampler
7
+
8
+ Sampler for sampling a posterior distribution where the likelihood and prior can be approximated
9
+ by a conjugate pair.
10
+
11
+ Currently supported pairs are:
12
+ - (LMRF, Gamma): Approximated by (Gaussian, Gamma)
13
+
14
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
15
+
16
+ """
17
+
18
+
19
+ def __init__(self, target: Posterior):
20
+ if not isinstance(target.likelihood.distribution, LMRF):
21
+ raise ValueError("Conjugate sampler only works with Laplace diff likelihood function")
22
+ if not isinstance(target.prior, Gamma):
23
+ raise ValueError("Conjugate sampler only works with Gamma prior")
24
+ self.target = target
25
+
26
+ def step(self, x=None):
27
+ # Extract variables
28
+ # Here we approximate the Laplace diff with a Gaussian
29
+
30
+ # Extract diff_op from target likelihood
31
+ D = self.target.likelihood.distribution._diff_op
32
+ n = D.shape[0]
33
+
34
+ # Gaussian approximation of LMRF prior as function of x_k
35
+ # See Uribe et al. (2022) for details
36
+ # Current has a zero mean assumption on likelihood! TODO
37
+ beta=1e-5
38
+ def Lk_fun(x_k):
39
+ dd = 1/np.sqrt((D @ x_k)**2 + beta*np.ones(n))
40
+ W = sp.sparse.diags(dd)
41
+ return W.sqrt() @ D
42
+
43
+ x = self.target.likelihood.data #x
44
+ d = len(x) #d
45
+ Lx = Lk_fun(x)@x #Lx
46
+ alpha = self.target.prior.shape #alpha
47
+ beta = self.target.prior.rate #beta
48
+
49
+ # Create Gamma distribution and sample
50
+ dist = Gamma(shape=d+alpha, rate=np.linalg.norm(Lx)**2+beta)
51
+
52
+ return dist.sample()
@@ -0,0 +1,196 @@
1
+ import numpy as np
2
+ import cuqi
3
+ from cuqi.legacy.sampler import ProposalBasedSampler
4
+
5
+
6
+ class CWMH(ProposalBasedSampler):
7
+ """Component-wise Metropolis Hastings sampler.
8
+
9
+ Allows sampling of a target distribution by a component-wise random-walk sampling of a proposal distribution along with an accept/reject step.
10
+
11
+ Parameters
12
+ ----------
13
+
14
+ target : `cuqi.distribution.Distribution` or lambda function
15
+ The target distribution to sample. Custom logpdfs are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
16
+
17
+ proposal : `cuqi.distribution.Distribution` or callable method
18
+ The proposal to sample from. If a callable method it should provide a single independent sample from proposal distribution. Defaults to a Gaussian proposal. *Optional*.
19
+
20
+ scale : float
21
+ Scale parameter used to define correlation between previous and proposed sample in random-walk. *Optional*.
22
+
23
+ x0 : ndarray
24
+ Initial parameters. *Optional*
25
+
26
+ dim : int
27
+ Dimension of parameter space. Required if target and proposal are callable functions. *Optional*.
28
+
29
+ callback : callable, *Optional*
30
+ If set this function will be called after every sample.
31
+ The signature of the callback function is `callback(sample, sample_index)`,
32
+ where `sample` is the current sample and `sample_index` is the index of the sample.
33
+ An example is shown in demos/demo31_callback.py.
34
+
35
+ Example
36
+ -------
37
+ .. code-block:: python
38
+
39
+ # Parameters
40
+ dim = 5 # Dimension of distribution
41
+ mu = np.arange(dim) # Mean of Gaussian
42
+ std = 1 # standard deviation of Gaussian
43
+
44
+ # Logpdf function
45
+ logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
46
+
47
+ # Define distribution from logpdf as UserDefinedDistribution (sample and gradients also supported as inputs to UserDefinedDistribution)
48
+ target = cuqi.distribution.UserDefinedDistribution(dim=dim, logpdf_func=logpdf_func)
49
+
50
+ # Set up sampler
51
+ sampler = cuqi.legacy.sampler.CWMH(target, scale=1)
52
+
53
+ # Sample
54
+ samples = sampler.sample(2000)
55
+
56
+ """
57
+ def __init__(self, target, proposal=None, scale=1, x0=None, dim = None, **kwargs):
58
+ super().__init__(target, proposal=proposal, scale=scale, x0=x0, dim=dim, **kwargs)
59
+
60
+ @ProposalBasedSampler.proposal.setter
61
+ def proposal(self, value):
62
+ fail_msg = "Proposal should be either None, cuqi.distribution.Distribution conditioned only on 'location' and 'scale', lambda function, or cuqi.distribution.Normal conditioned only on 'mean' and 'std'"
63
+
64
+ if value is None:
65
+ self._proposal = cuqi.distribution.Normal(mean = lambda location:location,std = lambda scale:scale, geometry=self.dim)
66
+
67
+ elif isinstance(value, cuqi.distribution.Distribution) and sorted(value.get_conditioning_variables())==['location','scale']:
68
+ self._proposal = value
69
+
70
+ elif isinstance(value, cuqi.distribution.Normal) and sorted(value.get_conditioning_variables())==['mean','std']:
71
+ self._proposal = value(mean = lambda location:location, std = lambda scale:scale)
72
+
73
+ elif not isinstance(value, cuqi.distribution.Distribution) and callable(value):
74
+ self._proposal = value
75
+
76
+ else:
77
+ raise ValueError(fail_msg)
78
+
79
+
80
+ def _sample(self, N, Nb):
81
+ Ns = N+Nb # number of simulations
82
+
83
+ # allocation
84
+ samples = np.empty((self.dim, Ns))
85
+ target_eval = np.empty(Ns)
86
+ acc = np.zeros((self.dim, Ns), dtype=int)
87
+
88
+ # initial state
89
+ samples[:, 0] = self.x0
90
+ target_eval[0] = self.target.logd(self.x0)
91
+ acc[:, 0] = np.ones(self.dim)
92
+
93
+ # run MCMC
94
+ for s in range(Ns-1):
95
+ # run component by component
96
+ samples[:, s+1], target_eval[s+1], acc[:, s+1] = self.single_update(samples[:, s], target_eval[s])
97
+
98
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
99
+ self._call_callback(samples[:, s+1], s+1)
100
+
101
+ # remove burn-in
102
+ samples = samples[:, Nb:]
103
+ target_eval = target_eval[Nb:]
104
+ acccomp = acc[:, Nb:].mean(axis=1)
105
+ print('\nAverage acceptance rate all components:', acccomp.mean(), '\n')
106
+
107
+ return samples, target_eval, acccomp
108
+
109
+ def _sample_adapt(self, N, Nb):
110
+ # this follows the vanishing adaptation Algorithm 4 in:
111
+ # Andrieu and Thoms (2008) - A tutorial on adaptive MCMC
112
+ Ns = N+Nb # number of simulations
113
+
114
+ # allocation
115
+ samples = np.empty((self.dim, Ns))
116
+ target_eval = np.empty(Ns)
117
+ acc = np.zeros((self.dim, Ns), dtype=int)
118
+
119
+ # initial state
120
+ samples[:, 0] = self.x0
121
+ target_eval[0] = self.target.logd(self.x0)
122
+ acc[:, 0] = np.ones(self.dim)
123
+
124
+ # initial adaptation params
125
+ Na = int(0.1*N) # iterations to adapt
126
+ hat_acc = np.empty((self.dim, int(np.floor(Ns/Na)))) # average acceptance rate of the chains
127
+ lambd = np.empty((self.dim, int(np.floor(Ns/Na)+1))) # scaling parameter \in (0,1)
128
+ lambd[:, 0] = self.scale
129
+ star_acc = 0.21/self.dim + 0.23 # target acceptance rate RW
130
+ i, idx = 0, 0
131
+
132
+ # run MCMC
133
+ for s in range(Ns-1):
134
+ # run component by component
135
+ samples[:, s+1], target_eval[s+1], acc[:, s+1] = self.single_update(samples[:, s], target_eval[s])
136
+
137
+ # adapt prop spread of each component using acc of past samples
138
+ if ((s+1) % Na == 0):
139
+ # evaluate average acceptance rate
140
+ hat_acc[:, i] = np.mean(acc[:, idx:idx+Na], axis=1)
141
+
142
+ # compute new scaling parameter
143
+ zeta = 1/np.sqrt(i+1) # ensures that the variation of lambda(i) vanishes
144
+ lambd[:, i+1] = np.exp(np.log(lambd[:, i]) + zeta*(hat_acc[:, i]-star_acc))
145
+
146
+ # update parameters
147
+ self.scale = np.minimum(lambd[:, i+1], np.ones(self.dim))
148
+
149
+ # update counters
150
+ i += 1
151
+ idx += Na
152
+
153
+ # display iterations
154
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
155
+ self._call_callback(samples[:, s+1], s+1)
156
+
157
+ # remove burn-in
158
+ samples = samples[:, Nb:]
159
+ target_eval = target_eval[Nb:]
160
+ acccomp = acc[:, Nb:].mean(axis=1)
161
+ print('\nAverage acceptance rate all components:', acccomp.mean(), '\n')
162
+
163
+ return samples, target_eval, acccomp
164
+
165
+ def single_update(self, x_t, target_eval_t):
166
+ if isinstance(self.proposal,cuqi.distribution.Distribution):
167
+ x_i_star = self.proposal(location= x_t, scale = self.scale).sample()
168
+ else:
169
+ x_i_star = self.proposal(x_t, self.scale)
170
+ x_star = x_t.copy()
171
+ acc = np.zeros(self.dim)
172
+
173
+ for j in range(self.dim):
174
+ # propose state
175
+ x_star[j] = x_i_star[j]
176
+
177
+ # evaluate target
178
+ target_eval_star = self.target.logd(x_star)
179
+
180
+ # ratio and acceptance probability
181
+ ratio = target_eval_star - target_eval_t # proposal is symmetric
182
+ alpha = min(0, ratio)
183
+
184
+ # accept/reject
185
+ u_theta = np.log(np.random.rand())
186
+ if (u_theta <= alpha):
187
+ x_t[j] = x_i_star[j]
188
+ target_eval_t = target_eval_star
189
+ acc[j] = 1
190
+ else:
191
+ pass
192
+ # x_t[j] = x_t[j]
193
+ # target_eval_t = target_eval_t
194
+ x_star = x_t.copy()
195
+ #
196
+ return x_t, target_eval_t, acc