CUQIpy 1.1.1.post0.dev36__py3-none-any.whl → 1.4.1.post0.dev124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (92) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/algebra/_abstract_syntax_tree.py +358 -0
  5. cuqi/algebra/_ordered_set.py +82 -0
  6. cuqi/algebra/_random_variable.py +457 -0
  7. cuqi/array/_array.py +4 -13
  8. cuqi/config.py +7 -0
  9. cuqi/density/_density.py +9 -1
  10. cuqi/distribution/__init__.py +3 -2
  11. cuqi/distribution/_beta.py +7 -11
  12. cuqi/distribution/_cauchy.py +2 -2
  13. cuqi/distribution/_custom.py +0 -6
  14. cuqi/distribution/_distribution.py +31 -45
  15. cuqi/distribution/_gamma.py +7 -3
  16. cuqi/distribution/_gaussian.py +2 -12
  17. cuqi/distribution/_inverse_gamma.py +4 -10
  18. cuqi/distribution/_joint_distribution.py +112 -15
  19. cuqi/distribution/_lognormal.py +0 -7
  20. cuqi/distribution/{_modifiedhalfnormal.py → _modified_half_normal.py} +23 -23
  21. cuqi/distribution/_normal.py +34 -7
  22. cuqi/distribution/_posterior.py +9 -0
  23. cuqi/distribution/_truncated_normal.py +129 -0
  24. cuqi/distribution/_uniform.py +47 -1
  25. cuqi/experimental/__init__.py +2 -2
  26. cuqi/experimental/_recommender.py +216 -0
  27. cuqi/geometry/__init__.py +2 -0
  28. cuqi/geometry/_geometry.py +15 -1
  29. cuqi/geometry/_product_geometry.py +181 -0
  30. cuqi/implicitprior/__init__.py +5 -3
  31. cuqi/implicitprior/_regularized_gaussian.py +483 -0
  32. cuqi/implicitprior/{_regularizedGMRF.py → _regularized_gmrf.py} +4 -2
  33. cuqi/implicitprior/{_regularizedUnboundedUniform.py → _regularized_unbounded_uniform.py} +3 -2
  34. cuqi/implicitprior/_restorator.py +269 -0
  35. cuqi/legacy/__init__.py +2 -0
  36. cuqi/{experimental/mcmc → legacy/sampler}/__init__.py +7 -11
  37. cuqi/legacy/sampler/_conjugate.py +55 -0
  38. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  39. cuqi/legacy/sampler/_cwmh.py +196 -0
  40. cuqi/legacy/sampler/_gibbs.py +231 -0
  41. cuqi/legacy/sampler/_hmc.py +335 -0
  42. cuqi/{experimental/mcmc → legacy/sampler}/_langevin_algorithm.py +82 -111
  43. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  44. cuqi/legacy/sampler/_mh.py +190 -0
  45. cuqi/legacy/sampler/_pcn.py +244 -0
  46. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +132 -90
  47. cuqi/legacy/sampler/_sampler.py +182 -0
  48. cuqi/likelihood/_likelihood.py +9 -1
  49. cuqi/model/__init__.py +1 -1
  50. cuqi/model/_model.py +1361 -359
  51. cuqi/pde/__init__.py +4 -0
  52. cuqi/pde/_observation_map.py +36 -0
  53. cuqi/pde/_pde.py +134 -33
  54. cuqi/problem/_problem.py +93 -87
  55. cuqi/sampler/__init__.py +120 -8
  56. cuqi/sampler/_conjugate.py +376 -35
  57. cuqi/sampler/_conjugate_approx.py +40 -16
  58. cuqi/sampler/_cwmh.py +132 -138
  59. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  60. cuqi/sampler/_gibbs.py +288 -130
  61. cuqi/sampler/_hmc.py +328 -201
  62. cuqi/sampler/_langevin_algorithm.py +284 -100
  63. cuqi/sampler/_laplace_approximation.py +87 -117
  64. cuqi/sampler/_mh.py +47 -157
  65. cuqi/sampler/_pcn.py +65 -213
  66. cuqi/sampler/_rto.py +211 -142
  67. cuqi/sampler/_sampler.py +553 -136
  68. cuqi/samples/__init__.py +1 -1
  69. cuqi/samples/_samples.py +24 -18
  70. cuqi/solver/__init__.py +6 -4
  71. cuqi/solver/_solver.py +230 -26
  72. cuqi/testproblem/_testproblem.py +2 -3
  73. cuqi/utilities/__init__.py +6 -1
  74. cuqi/utilities/_get_python_variable_name.py +2 -2
  75. cuqi/utilities/_utilities.py +182 -2
  76. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/METADATA +10 -6
  77. cuqipy-1.4.1.post0.dev124.dist-info/RECORD +101 -0
  78. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/WHEEL +1 -1
  79. CUQIpy-1.1.1.post0.dev36.dist-info/RECORD +0 -92
  80. cuqi/experimental/mcmc/_conjugate.py +0 -197
  81. cuqi/experimental/mcmc/_conjugate_approx.py +0 -81
  82. cuqi/experimental/mcmc/_cwmh.py +0 -191
  83. cuqi/experimental/mcmc/_gibbs.py +0 -268
  84. cuqi/experimental/mcmc/_hmc.py +0 -470
  85. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  86. cuqi/experimental/mcmc/_mh.py +0 -78
  87. cuqi/experimental/mcmc/_pcn.py +0 -89
  88. cuqi/experimental/mcmc/_sampler.py +0 -561
  89. cuqi/experimental/mcmc/_utilities.py +0 -17
  90. cuqi/implicitprior/_regularizedGaussian.py +0 -323
  91. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info/licenses}/LICENSE +0 -0
  92. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/top_level.txt +0 -0
@@ -1,470 +0,0 @@
1
- import numpy as np
2
- import numpy as np
3
- from cuqi.experimental.mcmc import Sampler
4
- from cuqi.array import CUQIarray
5
- from numbers import Number
6
-
7
- class NUTS(Sampler):
8
- """No-U-Turn Sampler (Hoffman and Gelman, 2014).
9
-
10
- Samples a distribution given its logpdf and gradient using a Hamiltonian
11
- Monte Carlo (HMC) algorithm with automatic parameter tuning.
12
-
13
- For more details see: See Hoffman, M. D., & Gelman, A. (2014). The no-U-turn
14
- sampler: Adaptively setting path lengths in Hamiltonian Monte Carlo. Journal
15
- of Machine Learning Research, 15, 1593-1623.
16
-
17
- Parameters
18
- ----------
19
- target : `cuqi.distribution.Distribution`
20
- The target distribution to sample. Must have logpdf and gradient method.
21
- Custom logpdfs and gradients are supported by using a
22
- :class:`cuqi.distribution.UserDefinedDistribution`.
23
-
24
- initial_point : ndarray
25
- Initial parameters. *Optional*. If not provided, the initial point is
26
- an array of ones.
27
-
28
- max_depth : int
29
- Maximum depth of the tree >=0 and the default is 15.
30
-
31
- step_size : None or float
32
- If step_size is provided (as positive float), it will be used as initial
33
- step size. If None, the step size will be estimated by the sampler.
34
-
35
- opt_acc_rate : float
36
- The optimal acceptance rate to reach if using adaptive step size.
37
- Suggested values are 0.6 (default) or 0.8 (as in stan). In principle,
38
- opt_acc_rate should be in (0, 1), however, choosing a value that is very
39
- close to 1 or 0 might lead to poor performance of the sampler.
40
-
41
- callback : callable, *Optional*
42
- If set this function will be called after every sample.
43
- The signature of the callback function is
44
- `callback(sample, sample_index)`,
45
- where `sample` is the current sample and `sample_index` is the index of
46
- the sample.
47
- An example is shown in demos/demo31_callback.py.
48
-
49
- Example
50
- -------
51
- .. code-block:: python
52
-
53
- # Import cuqi
54
- import cuqi
55
-
56
- # Define a target distribution
57
- tp = cuqi.testproblem.WangCubic()
58
- target = tp.posterior
59
-
60
- # Set up sampler
61
- sampler = cuqi.experimental.mcmc.NUTS(target)
62
-
63
- # Sample
64
- sampler.warmup(5000)
65
- sampler.sample(10000)
66
-
67
- # Get samples
68
- samples = sampler.get_samples()
69
-
70
- # Plot samples
71
- samples.plot_pair()
72
-
73
- After running the NUTS sampler, run diagnostics can be accessed via the
74
- following attributes:
75
-
76
- .. code-block:: python
77
-
78
- # Number of tree nodes created each NUTS iteration
79
- sampler.num_tree_node_list
80
-
81
- # Step size used in each NUTS iteration
82
- sampler.epsilon_list
83
-
84
- # Suggested step size during adaptation (the value of this step size is
85
- # only used after adaptation).
86
- sampler.epsilon_bar_list
87
-
88
- """
89
-
90
- _STATE_KEYS = Sampler._STATE_KEYS.union({'_epsilon', '_epsilon_bar',
91
- '_H_bar', '_mu',
92
- '_alpha', '_n_alpha'})
93
-
94
- _HISTORY_KEYS = Sampler._HISTORY_KEYS.union({'num_tree_node_list',
95
- 'epsilon_list',
96
- 'epsilon_bar_list'})
97
-
98
- def __init__(self, target=None, initial_point=None, max_depth=15,
99
- step_size=None, opt_acc_rate=0.6, **kwargs):
100
- super().__init__(target, initial_point=initial_point, **kwargs)
101
-
102
- # Assign parameters as attributes
103
- self.max_depth = max_depth
104
- self.step_size = step_size
105
- self.opt_acc_rate = opt_acc_rate
106
-
107
- # Initialize epsilon and epsilon_bar
108
- # epsilon is the step size used in the current iteration
109
- # after warm up and one sampling step, epsilon is updated
110
- # to epsilon_bar for the remaining sampling steps.
111
- self._epsilon = None
112
- self._epsilon_bar = None
113
- self._H_bar = None
114
-
115
- # Extra parameters for tuning
116
- self._n_alpha = None
117
- self._alpha = None
118
-
119
-
120
- def _initialize(self):
121
-
122
- # Arrays to store acceptance rate
123
- self._acc = [None] # Overwrites acc from Sampler. TODO. Check if this is necessary
124
-
125
- self._alpha = 0 # check if meaningful value
126
- self._n_alpha = 0 # check if meaningful value
127
-
128
- self.current_target_logd, self.current_target_grad = self._nuts_target(self.current_point)
129
-
130
- # parameters dual averaging
131
- if self.step_size is None:
132
- self._epsilon = self._FindGoodEpsilon()
133
- else:
134
- self._epsilon = self.step_size
135
- self._epsilon_bar = "unset"
136
-
137
- # Parameter mu, does not change during the run
138
- self._mu = np.log(10*self._epsilon)
139
-
140
- self._H_bar = 0
141
-
142
- # NUTS run diagnostic:
143
- # number of tree nodes created each NUTS iteration
144
- self._num_tree_node = 0
145
-
146
- # Create lists to store NUTS run diagnostics
147
- self._create_run_diagnostic_attributes()
148
-
149
- #=========================================================================
150
- #============================== Properties ===============================
151
- #=========================================================================
152
- @property
153
- def max_depth(self):
154
- return self._max_depth
155
-
156
- @max_depth.setter
157
- def max_depth(self, value):
158
- if not isinstance(value, int):
159
- raise TypeError('max_depth must be an integer.')
160
- if value < 0:
161
- raise ValueError('max_depth must be >= 0.')
162
- self._max_depth = value
163
-
164
- @property
165
- def step_size(self):
166
- return self._step_size
167
-
168
- @step_size.setter
169
- def step_size(self, value):
170
- if value is None:
171
- pass # NUTS will adapt the step size
172
-
173
- # step_size must be a positive float, raise error otherwise
174
- elif isinstance(value, bool)\
175
- or not isinstance(value, Number)\
176
- or value <= 0:
177
- raise TypeError('step_size must be a positive float or None.')
178
- self._step_size = value
179
-
180
- @property
181
- def opt_acc_rate(self):
182
- return self._opt_acc_rate
183
-
184
- @opt_acc_rate.setter
185
- def opt_acc_rate(self, value):
186
- if not isinstance(value, Number) or value <= 0 or value >= 1:
187
- raise ValueError('opt_acc_rate must be a float in (0, 1).')
188
- self._opt_acc_rate = value
189
-
190
- #=========================================================================
191
- #================== Implement methods required by Sampler =============
192
- #=========================================================================
193
- def validate_target(self):
194
- # Check if the target has logd and gradient methods
195
- try:
196
- current_target_logd, current_target_grad =\
197
- self._nuts_target(np.ones(self.dim))
198
- except:
199
- raise ValueError('Target must have logd and gradient methods.')
200
-
201
- def reinitialize(self):
202
- # Call the parent reset method
203
- super().reinitialize()
204
- # Reset NUTS run diagnostic attributes
205
- self._reset_run_diagnostic_attributes()
206
-
207
- def step(self):
208
- # Convert current_point, logd, and grad to numpy arrays
209
- # if they are CUQIarray objects
210
- if isinstance(self.current_point, CUQIarray):
211
- self.current_point = self.current_point.to_numpy()
212
- if isinstance(self.current_target_logd, CUQIarray):
213
- self.current_target_logd = self.current_target_logd.to_numpy()
214
- if isinstance(self.current_target_grad, CUQIarray):
215
- self.current_target_grad = self.current_target_grad.to_numpy()
216
-
217
- # reset number of tree nodes for each iteration
218
- self._num_tree_node = 0
219
-
220
- # copy current point, logd, and grad in local variables
221
- point_k = self.current_point.copy() # initial position (parameters)
222
- logd_k = self.current_target_logd
223
- grad_k = self.current_target_grad.copy() # initial gradient
224
-
225
- # compute r_k and Hamiltonian
226
- r_k = self._Kfun(1, 'sample') # resample momentum vector
227
- Ham = logd_k - self._Kfun(r_k, 'eval') # Hamiltonian
228
-
229
- # slice variable
230
- log_u = Ham - np.random.exponential(1, size=1)
231
-
232
- # initialization
233
- j, s, n = 0, 1, 1
234
- point_minus, point_plus = np.copy(point_k), np.copy(point_k)
235
- grad_minus, grad_plus = np.copy(grad_k), np.copy(grad_k)
236
- r_minus, r_plus = np.copy(r_k), np.copy(r_k)
237
-
238
- # run NUTS
239
- while (s == 1) and (j <= self.max_depth):
240
- # sample a direction
241
- v = int(2*(np.random.rand() < 0.5)-1)
242
-
243
- # build tree: doubling procedure
244
- if (v == -1):
245
- point_minus, r_minus, grad_minus, _, _, _, \
246
- point_prime, logd_prime, grad_prime,\
247
- n_prime, s_prime, alpha, n_alpha = \
248
- self._BuildTree(point_minus, r_minus, grad_minus,
249
- Ham, log_u, v, j, self._epsilon)
250
- else:
251
- _, _, _, point_plus, r_plus, grad_plus, \
252
- point_prime, logd_prime, grad_prime,\
253
- n_prime, s_prime, alpha, n_alpha = \
254
- self._BuildTree(point_plus, r_plus, grad_plus,
255
- Ham, log_u, v, j, self._epsilon)
256
-
257
- # Metropolis step
258
- alpha2 = min(1, (n_prime/n)) #min(0, np.log(n_p) - np.log(n))
259
- if (s_prime == 1) and (np.random.rand() <= alpha2):
260
- self.current_point = point_prime
261
- self.current_target_logd = logd_prime
262
- self.current_target_grad = np.copy(grad_prime)
263
- self._acc.append(1)
264
- else:
265
- self._acc.append(0)
266
-
267
- # update number of particles, tree level, and stopping criterion
268
- n += n_prime
269
- dpoints = point_plus - point_minus
270
- s = s_prime *\
271
- int((dpoints @ r_minus.T) >= 0) * int((dpoints @ r_plus.T) >= 0)
272
- j += 1
273
- self._alpha = alpha
274
- self._n_alpha = n_alpha
275
-
276
- # update run diagnostic attributes
277
- self._update_run_diagnostic_attributes(
278
- self._num_tree_node, self._epsilon, self._epsilon_bar)
279
-
280
- self._epsilon = self._epsilon_bar
281
- if np.isnan(self.current_target_logd):
282
- raise NameError('NaN potential func')
283
-
284
- def tune(self, skip_len, update_count):
285
- """ adapt epsilon during burn-in using dual averaging"""
286
- k = update_count+1
287
-
288
- # Fixed parameters that do not change during the run
289
- gamma, t_0, kappa = 0.05, 10, 0.75 # kappa in (0.5, 1]
290
-
291
- eta1 = 1/(k + t_0)
292
- self._H_bar = (1-eta1)*self._H_bar +\
293
- eta1*(self.opt_acc_rate - (self._alpha/self._n_alpha))
294
- self._epsilon = np.exp(self._mu - (np.sqrt(k)/gamma)*self._H_bar)
295
- eta = k**(-kappa)
296
- self._epsilon_bar =\
297
- np.exp(eta*np.log(self._epsilon) +(1-eta)*np.log(self._epsilon_bar))
298
-
299
- def _pre_warmup(self):
300
-
301
- # Set up tuning parameters (only first time tuning is called)
302
- # Note:
303
- # Parameters changes during the tune run
304
- # self._epsilon_bar
305
- # self._H_bar
306
- # self._epsilon
307
- # Parameters that does not change during the run
308
- # self._mu
309
-
310
- if self._epsilon_bar == "unset": # Initial value of epsilon_bar for tuning
311
- self._epsilon_bar = 1
312
-
313
- def _pre_sample(self):
314
-
315
- if self._epsilon_bar == "unset": # Initial value of epsilon_bar for sampling
316
- self._epsilon_bar = self._epsilon
317
-
318
-
319
- #=========================================================================
320
- def _nuts_target(self, x): # returns logposterior tuple evaluation-gradient
321
- return self.target.logd(x), self.target.gradient(x)
322
-
323
- #=========================================================================
324
- # auxiliary standard Gaussian PDF: kinetic energy function
325
- # d_log_2pi = d*np.log(2*np.pi)
326
- def _Kfun(self, r, flag):
327
- if flag == 'eval': # evaluate
328
- return 0.5*(r.T @ r) #+ d_log_2pi
329
- if flag == 'sample': # sample
330
- return np.random.standard_normal(size=self.dim)
331
-
332
- #=========================================================================
333
- def _FindGoodEpsilon(self, epsilon=1):
334
- point_k = self.current_point
335
- self.current_target_logd, self.current_target_grad = self._nuts_target(
336
- point_k)
337
- logd = self.current_target_logd
338
- grad = self.current_target_grad
339
-
340
- r = self._Kfun(1, 'sample') # resample a momentum
341
- Ham = logd - self._Kfun(r, 'eval') # initial Hamiltonian
342
- _, r_prime, logd_prime, grad_prime = self._Leapfrog(
343
- point_k, r, grad, epsilon)
344
-
345
- # trick to make sure the step is not huge, leading to infinite values of
346
- # the likelihood
347
- k = 1
348
- while np.isinf(logd_prime) or np.isinf(grad_prime).any():
349
- k *= 0.5
350
- _, r_prime, logd_prime, grad_prime = self._Leapfrog(
351
- point_k, r, grad, epsilon*k)
352
- epsilon = 0.5*k*epsilon
353
-
354
- # doubles/halves the value of epsilon until the accprob of the Langevin
355
- # proposal crosses 0.5
356
- Ham_prime = logd_prime - self._Kfun(r_prime, 'eval')
357
- log_ratio = Ham_prime - Ham
358
- a = 1 if log_ratio > np.log(0.5) else -1
359
- while (a*log_ratio > -a*np.log(2)):
360
- epsilon = (2**a)*epsilon
361
- _, r_prime, logd_prime, _ = self._Leapfrog(
362
- point_k, r, grad, epsilon)
363
- Ham_prime = logd_prime - self._Kfun(r_prime, 'eval')
364
- log_ratio = Ham_prime - Ham
365
- return epsilon
366
-
367
- #=========================================================================
368
- def _Leapfrog(self, point_old, r_old, grad_old, epsilon):
369
- # symplectic integrator: trajectories preserve phase space volumen
370
- r_new = r_old + 0.5*epsilon*grad_old # half-step
371
- point_new = point_old + epsilon*r_new # full-step
372
- logd_new, grad_new = self._nuts_target(point_new) # new gradient
373
- r_new += 0.5*epsilon*grad_new # half-step
374
- return point_new, r_new, logd_new, grad_new
375
-
376
- #=========================================================================
377
- def _BuildTree(
378
- self, point_k, r, grad, Ham, log_u, v, j, epsilon, Delta_max=1000):
379
- # Increment the number of tree nodes counter
380
- self._num_tree_node += 1
381
-
382
- if (j == 0): # base case
383
- # single leapfrog step in the direction v
384
- point_prime, r_prime, logd_prime, grad_prime = self._Leapfrog(
385
- point_k, r, grad, v*epsilon)
386
- Ham_prime = logd_prime - self._Kfun(r_prime, 'eval') # Hamiltonian
387
- # eval
388
- n_prime = int(log_u <= Ham_prime) # if particle is in the slice
389
- s_prime = int(log_u < Delta_max + Ham_prime) # check U-turn
390
- #
391
- diff_Ham = Ham_prime - Ham
392
-
393
- # Compute the acceptance probability
394
- # alpha_prime = min(1, np.exp(diff_Ham))
395
- # written in a stable way to avoid overflow when computing
396
- # exp(diff_Ham) for large values of diff_Ham
397
- alpha_prime = 1 if diff_Ham > 0 else np.exp(diff_Ham)
398
- n_alpha_prime = 1
399
- #
400
- point_minus, point_plus = point_prime, point_prime
401
- r_minus, r_plus = r_prime, r_prime
402
- grad_minus, grad_plus = grad_prime, grad_prime
403
- else:
404
- # recursion: build the left/right subtrees
405
- point_minus, r_minus, grad_minus, point_plus, r_plus, grad_plus, \
406
- point_prime, logd_prime, grad_prime,\
407
- n_prime, s_prime, alpha_prime, n_alpha_prime = \
408
- self._BuildTree(point_k, r, grad,
409
- Ham, log_u, v, j-1, epsilon)
410
- if (s_prime == 1): # do only if the stopping criteria does not
411
- # verify at the first subtree
412
- if (v == -1):
413
- point_minus, r_minus, grad_minus, _, _, _, \
414
- point_2prime, logd_2prime, grad_2prime,\
415
- n_2prime, s_2prime, alpha_2prime, n_alpha_2prime = \
416
- self._BuildTree(point_minus, r_minus, grad_minus,
417
- Ham, log_u, v, j-1, epsilon)
418
- else:
419
- _, _, _, point_plus, r_plus, grad_plus, \
420
- point_2prime, logd_2prime, grad_2prime,\
421
- n_2prime, s_2prime, alpha_2prime, n_alpha_2prime = \
422
- self._BuildTree(point_plus, r_plus, grad_plus,
423
- Ham, log_u, v, j-1, epsilon)
424
-
425
- # Metropolis step
426
- alpha2 = n_2prime / max(1, (n_prime + n_2prime))
427
- if (np.random.rand() <= alpha2):
428
- point_prime = np.copy(point_2prime)
429
- logd_prime = np.copy(logd_2prime)
430
- grad_prime = np.copy(grad_2prime)
431
-
432
- # update number of particles and stopping criterion
433
- alpha_prime += alpha_2prime
434
- n_alpha_prime += n_alpha_2prime
435
- dpoints = point_plus - point_minus
436
- s_prime = s_2prime *\
437
- int((dpoints@r_minus.T)>=0) * int((dpoints@r_plus.T)>=0)
438
- n_prime += n_2prime
439
-
440
- return point_minus, r_minus, grad_minus, point_plus, r_plus, grad_plus,\
441
- point_prime, logd_prime, grad_prime,\
442
- n_prime, s_prime, alpha_prime, n_alpha_prime
443
-
444
- #=========================================================================
445
- #======================== Diagnostic methods =============================
446
- #=========================================================================
447
-
448
- def _create_run_diagnostic_attributes(self):
449
- """A method to create attributes to store NUTS run diagnostic."""
450
- self._reset_run_diagnostic_attributes()
451
-
452
- def _reset_run_diagnostic_attributes(self):
453
- """A method to reset attributes to store NUTS run diagnostic."""
454
- # List to store number of tree nodes created each NUTS iteration
455
- self.num_tree_node_list = []
456
- # List of step size used in each NUTS iteration
457
- self.epsilon_list = []
458
- # List of burn-in step size suggestion during adaptation
459
- # only used when adaptation is done
460
- # remains fixed after adaptation (after burn-in)
461
- self.epsilon_bar_list = []
462
-
463
- def _update_run_diagnostic_attributes(self, n_tree, eps, eps_bar):
464
- """A method to update attributes to store NUTS run diagnostic."""
465
- # Store the number of tree nodes created in iteration k
466
- self.num_tree_node_list.append(n_tree)
467
- # Store the step size used in iteration k
468
- self.epsilon_list.append(eps)
469
- # Store the step size suggestion during adaptation in iteration k
470
- self.epsilon_bar_list.append(eps_bar)
@@ -1,156 +0,0 @@
1
- import scipy as sp
2
- import numpy as np
3
- import cuqi
4
- from cuqi.solver import CGLS
5
- from cuqi.experimental.mcmc import Sampler
6
-
7
- class UGLA(Sampler):
8
- """ Unadjusted (Gaussian) Laplace Approximation sampler
9
-
10
- Samples an approximate posterior where the prior is approximated
11
- by a Gaussian distribution. The likelihood must be Gaussian.
12
-
13
- Currently only works for LMRF priors.
14
-
15
- The inner solver is Conjugate Gradient Least Squares (CGLS) solver.
16
-
17
- For more details see: Uribe, Felipe, et al. A hybrid Gibbs sampler for edge-preserving
18
- tomographic reconstruction with uncertain view angles. SIAM/ASA Journal on UQ,
19
- https://doi.org/10.1137/21M1412268 (2022).
20
-
21
- Parameters
22
- ----------
23
- target : `cuqi.distribution.Posterior`
24
- The target posterior distribution to sample.
25
-
26
- initial_point : ndarray, *Optional*
27
- Initial parameters.
28
- If not provided, it defaults to zeros.
29
-
30
- maxit : int
31
- Maximum number of inner iterations for solver when generating one sample.
32
- If not provided, it defaults to 50.
33
-
34
- tol : float
35
- Tolerance for inner solver.
36
- The inner solvers will stop before maxit if convergence check reaches tol.
37
- If not provided, it defaults to 1e-4.
38
-
39
- beta : float
40
- Smoothing parameter for the Gaussian approximation of the Laplace distribution.
41
- A small value in the range of 1e-7 to 1e-3 is recommended, though values out of this
42
- range might give better results in some cases. Generally, a larger beta value makes
43
- sampling easier but results in a worse approximation. See details in Section 3.3 of the paper.
44
- If not provided, it defaults to 1e-5.
45
-
46
- callback : callable, *Optional*
47
- If set, this function will be called after every sample.
48
- The signature of the callback function is `callback(sample, sample_index)`,
49
- where `sample` is the current sample and `sample_index` is the index of the sample.
50
- An example is shown in demos/demo31_callback.py.
51
- """
52
- def __init__(self, target=None, initial_point=None, maxit=50, tol=1e-4, beta=1e-5, **kwargs):
53
-
54
- super().__init__(target=target, initial_point=initial_point, **kwargs)
55
-
56
- # Parameters
57
- self.maxit = maxit
58
- self.tol = tol
59
- self.beta = beta
60
-
61
- def _initialize(self):
62
- self._precompute()
63
-
64
- @property
65
- def prior(self):
66
- return self.target.prior
67
-
68
- @property
69
- def likelihood(self):
70
- return self.target.likelihood
71
-
72
- @property
73
- def model(self):
74
- return self.target.model
75
-
76
- @property
77
- def data(self):
78
- return self.target.data
79
-
80
- def _precompute(self):
81
-
82
- D = self.prior._diff_op
83
- n = D.shape[0]
84
-
85
- # Gaussian approximation of LMRF prior as function of x_k
86
- def Lk_fun(x_k):
87
- dd = 1/np.sqrt((D @ x_k)**2 + self.beta*np.ones(n))
88
- W = sp.sparse.diags(dd)
89
- return W.sqrt() @ D
90
- self.Lk_fun = Lk_fun
91
-
92
- self._m = len(self.data)
93
- self._L1 = self.likelihood.distribution.sqrtprec
94
-
95
- # If prior location is scalar, repeat it to match dimensions
96
- if len(self.prior.location) == 1:
97
- self._priorloc = np.repeat(self.prior.location, self.dim)
98
- else:
99
- self._priorloc = self.prior.location
100
-
101
- # Initial Laplace approx
102
- self._L2 = Lk_fun(self.initial_point)
103
- self._L2mu = self._L2@self._priorloc
104
- self._b_tild = np.hstack([self._L1@self.data, self._L2mu])
105
-
106
- # Least squares form
107
- def M(x, flag):
108
- if flag == 1:
109
- out1 = self._L1 @ self.model.forward(x)
110
- out2 = np.sqrt(1/self.prior.scale)*(self._L2 @ x)
111
- out = np.hstack([out1, out2])
112
- elif flag == 2:
113
- idx = int(self._m)
114
- out1 = self.model.adjoint(self._L1.T@x[:idx])
115
- out2 = np.sqrt(1/self.prior.scale)*(self._L2.T @ x[idx:])
116
- out = out1 + out2
117
- return out
118
- self.M = M
119
-
120
- def step(self):
121
- # Update Laplace approximation
122
- self._L2 = self.Lk_fun(self.current_point)
123
- self._L2mu = self._L2@self._priorloc
124
- self._b_tild = np.hstack([self._L1@self.data, self._L2mu])
125
-
126
- # Sample from approximate posterior
127
- e = np.random.randn(len(self._b_tild))
128
- y = self._b_tild + e # Perturb data
129
- sim = CGLS(self.M, y, self.current_point, self.maxit, self.tol)
130
- self.current_point, _ = sim.solve()
131
- acc = 1
132
- return acc
133
-
134
- def tune(self, skip_len, update_count):
135
- pass
136
-
137
- def validate_target(self):
138
- # Check target type
139
- if not isinstance(self.target, cuqi.distribution.Posterior):
140
- raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
141
-
142
- # Check Linear model
143
- if not isinstance(self.likelihood.model, cuqi.model.LinearModel):
144
- raise TypeError("Model needs to be linear")
145
-
146
- # Check Gaussian likelihood
147
- if not hasattr(self.likelihood.distribution, "sqrtprec"):
148
- raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
149
-
150
- # Check that prior is LMRF
151
- if not isinstance(self.prior, cuqi.distribution.LMRF):
152
- raise ValueError('Unadjusted Gaussian Laplace approximation (UGLA) requires LMRF prior')
153
-
154
- def _get_default_initial_point(self, dim):
155
- """ Get the default initial point for the sampler. Defaults to an array of zeros. """
156
- return np.zeros(dim)