CUQIpy 1.1.1.post0.dev36__py3-none-any.whl → 1.4.1.post0.dev124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (92) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/algebra/_abstract_syntax_tree.py +358 -0
  5. cuqi/algebra/_ordered_set.py +82 -0
  6. cuqi/algebra/_random_variable.py +457 -0
  7. cuqi/array/_array.py +4 -13
  8. cuqi/config.py +7 -0
  9. cuqi/density/_density.py +9 -1
  10. cuqi/distribution/__init__.py +3 -2
  11. cuqi/distribution/_beta.py +7 -11
  12. cuqi/distribution/_cauchy.py +2 -2
  13. cuqi/distribution/_custom.py +0 -6
  14. cuqi/distribution/_distribution.py +31 -45
  15. cuqi/distribution/_gamma.py +7 -3
  16. cuqi/distribution/_gaussian.py +2 -12
  17. cuqi/distribution/_inverse_gamma.py +4 -10
  18. cuqi/distribution/_joint_distribution.py +112 -15
  19. cuqi/distribution/_lognormal.py +0 -7
  20. cuqi/distribution/{_modifiedhalfnormal.py → _modified_half_normal.py} +23 -23
  21. cuqi/distribution/_normal.py +34 -7
  22. cuqi/distribution/_posterior.py +9 -0
  23. cuqi/distribution/_truncated_normal.py +129 -0
  24. cuqi/distribution/_uniform.py +47 -1
  25. cuqi/experimental/__init__.py +2 -2
  26. cuqi/experimental/_recommender.py +216 -0
  27. cuqi/geometry/__init__.py +2 -0
  28. cuqi/geometry/_geometry.py +15 -1
  29. cuqi/geometry/_product_geometry.py +181 -0
  30. cuqi/implicitprior/__init__.py +5 -3
  31. cuqi/implicitprior/_regularized_gaussian.py +483 -0
  32. cuqi/implicitprior/{_regularizedGMRF.py → _regularized_gmrf.py} +4 -2
  33. cuqi/implicitprior/{_regularizedUnboundedUniform.py → _regularized_unbounded_uniform.py} +3 -2
  34. cuqi/implicitprior/_restorator.py +269 -0
  35. cuqi/legacy/__init__.py +2 -0
  36. cuqi/{experimental/mcmc → legacy/sampler}/__init__.py +7 -11
  37. cuqi/legacy/sampler/_conjugate.py +55 -0
  38. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  39. cuqi/legacy/sampler/_cwmh.py +196 -0
  40. cuqi/legacy/sampler/_gibbs.py +231 -0
  41. cuqi/legacy/sampler/_hmc.py +335 -0
  42. cuqi/{experimental/mcmc → legacy/sampler}/_langevin_algorithm.py +82 -111
  43. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  44. cuqi/legacy/sampler/_mh.py +190 -0
  45. cuqi/legacy/sampler/_pcn.py +244 -0
  46. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +132 -90
  47. cuqi/legacy/sampler/_sampler.py +182 -0
  48. cuqi/likelihood/_likelihood.py +9 -1
  49. cuqi/model/__init__.py +1 -1
  50. cuqi/model/_model.py +1361 -359
  51. cuqi/pde/__init__.py +4 -0
  52. cuqi/pde/_observation_map.py +36 -0
  53. cuqi/pde/_pde.py +134 -33
  54. cuqi/problem/_problem.py +93 -87
  55. cuqi/sampler/__init__.py +120 -8
  56. cuqi/sampler/_conjugate.py +376 -35
  57. cuqi/sampler/_conjugate_approx.py +40 -16
  58. cuqi/sampler/_cwmh.py +132 -138
  59. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  60. cuqi/sampler/_gibbs.py +288 -130
  61. cuqi/sampler/_hmc.py +328 -201
  62. cuqi/sampler/_langevin_algorithm.py +284 -100
  63. cuqi/sampler/_laplace_approximation.py +87 -117
  64. cuqi/sampler/_mh.py +47 -157
  65. cuqi/sampler/_pcn.py +65 -213
  66. cuqi/sampler/_rto.py +211 -142
  67. cuqi/sampler/_sampler.py +553 -136
  68. cuqi/samples/__init__.py +1 -1
  69. cuqi/samples/_samples.py +24 -18
  70. cuqi/solver/__init__.py +6 -4
  71. cuqi/solver/_solver.py +230 -26
  72. cuqi/testproblem/_testproblem.py +2 -3
  73. cuqi/utilities/__init__.py +6 -1
  74. cuqi/utilities/_get_python_variable_name.py +2 -2
  75. cuqi/utilities/_utilities.py +182 -2
  76. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/METADATA +10 -6
  77. cuqipy-1.4.1.post0.dev124.dist-info/RECORD +101 -0
  78. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/WHEEL +1 -1
  79. CUQIpy-1.1.1.post0.dev36.dist-info/RECORD +0 -92
  80. cuqi/experimental/mcmc/_conjugate.py +0 -197
  81. cuqi/experimental/mcmc/_conjugate_approx.py +0 -81
  82. cuqi/experimental/mcmc/_cwmh.py +0 -191
  83. cuqi/experimental/mcmc/_gibbs.py +0 -268
  84. cuqi/experimental/mcmc/_hmc.py +0 -470
  85. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  86. cuqi/experimental/mcmc/_mh.py +0 -78
  87. cuqi/experimental/mcmc/_pcn.py +0 -89
  88. cuqi/experimental/mcmc/_sampler.py +0 -561
  89. cuqi/experimental/mcmc/_utilities.py +0 -17
  90. cuqi/implicitprior/_regularizedGaussian.py +0 -323
  91. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info/licenses}/LICENSE +0 -0
  92. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,18 @@
1
1
  import numpy as np
2
2
  import cuqi
3
3
  from cuqi.sampler import Sampler
4
+ from cuqi.implicitprior import RestorationPrior, MoreauYoshidaPrior
5
+ from cuqi.array import CUQIarray
6
+ from copy import copy
4
7
 
5
- class ULA(Sampler):
8
+ class ULA(Sampler): # Refactor to Proposal-based sampler?
6
9
  """Unadjusted Langevin algorithm (ULA) (Roberts and Tweedie, 1996)
7
10
 
8
- Samples a distribution given its logpdf and gradient (up to a constant) based on
9
- Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where L_t is
10
- the Langevin diffusion and W_t is the `dim`-dimensional standard Brownian motion.
11
+ It approximately samples a distribution given its logpdf gradient based on
12
+ the Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where
13
+ W_t is the `dim`-dimensional standard Brownian motion.
14
+ ULA results from the Euler-Maruyama discretization of this Langevin stochastic
15
+ differential equation (SDE).
11
16
 
12
17
  For more details see: Roberts, G. O., & Tweedie, R. L. (1996). Exponential convergence
13
18
  of Langevin distributions and their discrete approximations. Bernoulli, 341-363.
@@ -19,22 +24,17 @@ class ULA(Sampler):
19
24
  The target distribution to sample. Must have logd and gradient method. Custom logpdfs
20
25
  and gradients are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
21
26
 
22
- x0 : ndarray
27
+ initial_point : ndarray
23
28
  Initial parameters. *Optional*
24
29
 
25
- scale : int
26
- The Langevin diffusion discretization time step (In practice, a scale of 1/dim**2 is
27
- recommended but not guaranteed to be the optimal choice).
30
+ scale : float
31
+ The Langevin diffusion discretization time step (In practice, scale must
32
+ be smaller than 1/L, where L is the Lipschitz of the gradient of the log
33
+ target density, logd).
28
34
 
29
- dim : int
30
- Dimension of parameter space. Required if target logpdf and gradient are callable
31
- functions. *Optional*.
32
-
33
- callback : callable, *Optional*
34
- If set this function will be called after every sample.
35
- The signature of the callback function is `callback(sample, sample_index)`,
36
- where `sample` is the current sample and `sample_index` is the index of the sample.
37
- An example is shown in demos/demo31_callback.py.
35
+ callback : callable, optional
36
+ A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
37
+ The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
38
38
 
39
39
 
40
40
  Example
@@ -58,64 +58,91 @@ class ULA(Sampler):
58
58
  sampler = cuqi.sampler.ULA(target, scale=1/dim**2)
59
59
 
60
60
  # Sample
61
- samples = sampler.sample(2000)
61
+ sampler.sample(2000)
62
62
 
63
- A Deblur example can be found in demos/demo27_ULA.py
63
+ A Deblur example can be found in demos/demo27_ula.py
64
+ # TODO: update demo once sampler merged
64
65
  """
65
- def __init__(self, target, scale, x0=None, dim=None, rng=None, **kwargs):
66
- super().__init__(target, x0=x0, dim=dim, **kwargs)
67
- self.scale = scale
68
- self.rng = rng
69
-
70
- def _sample_adapt(self, N, Nb):
71
- return self._sample(N, Nb)
72
-
73
- def _sample(self, N, Nb):
74
- # allocation
75
- Ns = Nb+N
76
- samples = np.empty((self.dim, Ns))
77
- target_eval = np.empty(Ns)
78
- g_target_eval = np.empty((self.dim, Ns))
79
- acc = np.zeros(Ns)
80
-
81
- # initial state
82
- samples[:, 0] = self.x0
83
- target_eval[0], g_target_eval[:,0] = self.target.logd(self.x0), self.target.gradient(self.x0)
84
- acc[0] = 1
85
-
86
- # ULA
87
- for s in range(Ns-1):
88
- samples[:, s+1], target_eval[s+1], g_target_eval[:,s+1], acc[s+1] = \
89
- self.single_update(samples[:, s], target_eval[s], g_target_eval[:,s])
90
- self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
91
- self._call_callback(samples[:, s+1], s+1)
92
-
93
- # apply burn-in
94
- samples = samples[:, Nb:]
95
- target_eval = target_eval[Nb:]
96
- acc = acc[Nb:]
97
- return samples, target_eval, np.mean(acc)
98
66
 
99
- def single_update(self, x_t, target_eval_t, g_target_eval_t):
100
- # approximate Langevin diffusion
101
- xi = cuqi.distribution.Normal(mean=np.zeros(self.dim), std=np.sqrt(self.scale)).sample(rng=self.rng)
102
- x_star = x_t + 0.5*self.scale*g_target_eval_t + xi
103
- logpi_eval_star, g_logpi_star = self.target.logd(x_star), self.target.gradient(x_star)
67
+ _STATE_KEYS = Sampler._STATE_KEYS.union({'scale', 'current_target_grad'})
68
+
69
+ def __init__(self, target=None, scale=1.0, **kwargs):
70
+
71
+ super().__init__(target, **kwargs)
72
+ self.initial_scale = scale
73
+
74
+ def _initialize(self):
75
+ self.scale = self.initial_scale
76
+ self.current_target_grad = self._eval_target_grad(self.current_point)
77
+
78
+ def validate_target(self):
79
+ try:
80
+ self._eval_target_grad(np.ones(self.dim))
81
+ pass
82
+ except (NotImplementedError, AttributeError):
83
+ raise ValueError("The target needs to have a gradient method")
84
+
85
+ def _eval_target_logd(self, x):
86
+ return None
87
+
88
+ def _eval_target_grad(self, x):
89
+ return self.target.gradient(x)
90
+
91
+ def _accept_or_reject(self, x_star, target_eval_star, target_grad_star):
92
+ """
93
+ Accepts the proposed state and updates the sampler's state accordingly, i.e.,
94
+ current_point, current_target_eval, and current_target_grad_eval.
95
+
96
+ Parameters
97
+ ----------
98
+ x_star :
99
+ The proposed state
100
+
101
+ target_eval_star:
102
+ The log likelihood evaluated at x_star
103
+
104
+ target_grad_star:
105
+ The gradient of log likelihood evaluated at x_star
106
+
107
+ Returns
108
+ -------
109
+ scalar
110
+ 1 (accepted)
111
+ """
112
+
113
+ self.current_point = x_star
114
+ self.current_target_grad = target_grad_star
115
+ acc = 1
104
116
 
105
- # msg
106
- if np.isnan(logpi_eval_star):
107
- raise NameError('NaN potential func. Consider using smaller scale parameter')
117
+ return acc
108
118
 
109
- return x_star, logpi_eval_star, g_logpi_star, 1 # sample always accepted without Metropolis correction
119
+ def step(self):
120
+ # propose state
121
+ xi = cuqi.distribution.Normal(mean=np.zeros(self.dim), std=np.sqrt(self.scale)).sample()
122
+ x_star = self.current_point + 0.5*self.scale*self.current_target_grad + xi
110
123
 
124
+ # evaluate target
125
+ target_eval_star = self._eval_target_logd(x_star)
126
+ target_grad_star = self._eval_target_grad(x_star)
111
127
 
112
- class MALA(ULA):
128
+ # accept or reject proposal
129
+ acc = self._accept_or_reject(x_star, target_eval_star, target_grad_star)
130
+
131
+ return acc
132
+
133
+ def tune(self, skip_len, update_count):
134
+ pass
135
+
136
+
137
+ class MALA(ULA): # Refactor to Proposal-based sampler?
113
138
  """ Metropolis-adjusted Langevin algorithm (MALA) (Roberts and Tweedie, 1996)
114
139
 
115
140
  Samples a distribution given its logd and gradient (up to a constant) based on
116
- Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where L_t is
117
- the Langevin diffusion and W_t is the `dim`-dimensional standard Brownian motion.
118
- The sample is then accepted or rejected according to Metropolis–Hastings algorithm.
141
+ Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt,
142
+ W_t is the `dim`-dimensional standard Brownian motion.
143
+ A sample is firstly proposed by ULA and is then accepted or rejected according
144
+ to a Metropolis–Hastings step.
145
+ This accept-reject step allows us to remove the asymptotic bias of ULA.
119
146
 
120
147
  For more details see: Roberts, G. O., & Tweedie, R. L. (1996). Exponential convergence
121
148
  of Langevin distributions and their discrete approximations. Bernoulli, 341-363.
@@ -127,21 +154,17 @@ class MALA(ULA):
127
154
  The target distribution to sample. Must have logpdf and gradient method. Custom logpdfs
128
155
  and gradients are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
129
156
 
130
- x0 : ndarray
157
+ initial_point : ndarray
131
158
  Initial parameters. *Optional*
132
159
 
133
- scale : int
134
- The Langevin diffusion discretization time step.
160
+ scale : float
161
+ The Langevin diffusion discretization time step (In practice, scale must
162
+ be smaller than 1/L, where L is the Lipschitz of the gradient of the log
163
+ target density, logd).
135
164
 
136
- dim : int
137
- Dimension of parameter space. Required if target logpdf and gradient are callable
138
- functions. *Optional*.
139
-
140
- callback : callable, *Optional*
141
- If set this function will be called after every sample.
142
- The signature of the callback function is `callback(sample, sample_index)`,
143
- where `sample` is the current sample and `sample_index` is the index of the sample.
144
- An example is shown in demos/demo31_callback.py.
165
+ callback : callable, optional
166
+ A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
167
+ The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
145
168
 
146
169
 
147
170
  Example
@@ -165,34 +188,195 @@ class MALA(ULA):
165
188
  sampler = cuqi.sampler.MALA(target, scale=1/5**2)
166
189
 
167
190
  # Sample
168
- samples = sampler.sample(2000)
191
+ sampler.sample(2000)
169
192
 
170
- A Deblur example can be found in demos/demo28_MALA.py
193
+ A Deblur example can be found in demos/demo28_mala.py
194
+ # TODO: update demo once sampler merged
171
195
  """
172
- def __init__(self, target, scale, x0=None, dim=None, rng=None, **kwargs):
173
- super().__init__(target, scale, x0=x0, dim=dim, rng=rng, **kwargs)
174
-
175
- def single_update(self, x_t, target_eval_t, g_target_eval_t):
176
- # approximate Langevin diffusion
177
- xi = cuqi.distribution.Normal(mean=np.zeros(self.dim), std=np.sqrt(self.scale)).sample(rng=self.rng)
178
- x_star = x_t + (self.scale/2)*g_target_eval_t + xi
179
- logpi_eval_star, g_logpi_star = self.target.logd(x_star), self.target.gradient(x_star)
180
-
181
- # Metropolis step
182
- log_target_ratio = logpi_eval_star - target_eval_t
183
- log_prop_ratio = self.log_proposal(x_t, x_star, g_logpi_star) \
184
- - self.log_proposal(x_star, x_t, g_target_eval_t)
185
- log_alpha = min(0, log_target_ratio + log_prop_ratio)
186
196
 
187
- # accept/reject
188
- log_u = np.log(cuqi.distribution.Uniform(low=0, high=1).sample(rng=self.rng))
189
- if (log_u <= log_alpha) and (np.isnan(logpi_eval_star) == False):
190
- return x_star, logpi_eval_star, g_logpi_star, 1
191
- else:
192
- return x_t.copy(), target_eval_t, g_target_eval_t.copy(), 0
197
+ _STATE_KEYS = ULA._STATE_KEYS.union({'current_target_logd'})
198
+
199
+ def _initialize(self):
200
+ super()._initialize()
201
+ self.current_target_logd = self.target.logd(self.current_point)
202
+
203
+ def _eval_target_logd(self, x):
204
+ return self.target.logd(x)
205
+
206
+ def _accept_or_reject(self, x_star, target_eval_star, target_grad_star):
207
+ """
208
+ Accepts the proposed state according to a Metropolis step and updates
209
+ the sampler's state accordingly, i.e., current_point, current_target_eval,
210
+ and current_target_grad_eval.
211
+
212
+ Parameters
213
+ ----------
214
+ x_star :
215
+ The proposed state
216
+
217
+ target_eval_star:
218
+ The log likelihood evaluated at x_star
219
+
220
+ target_grad_star:
221
+ The gradient of log likelihood evaluated at x_star
222
+
223
+ Returns
224
+ -------
225
+ scaler
226
+ 1 if accepted, 0 otherwise
227
+ """
228
+ log_target_ratio = target_eval_star - self.current_target_logd
229
+ log_prop_ratio = self._log_proposal(self.current_point, x_star, target_grad_star) \
230
+ - self._log_proposal(x_star, self.current_point, self.current_target_grad)
231
+ log_alpha = min(0, log_target_ratio + log_prop_ratio)
193
232
 
194
- def log_proposal(self, theta_star, theta_k, g_logpi_k):
233
+ # accept/reject with Metropolis
234
+ acc = 0
235
+ log_u = np.log(np.random.rand())
236
+ if (log_u <= log_alpha) and \
237
+ (not np.isnan(target_eval_star)) and \
238
+ (not np.isinf(target_eval_star)):
239
+ self.current_point = x_star
240
+ self.current_target_logd = target_eval_star
241
+ self.current_target_grad = target_grad_star
242
+ acc = 1
243
+ return acc
244
+
245
+ def tune(self, skip_len, update_count):
246
+ pass
247
+
248
+ def _log_proposal(self, theta_star, theta_k, g_logpi_k):
195
249
  mu = theta_k + ((self.scale)/2)*g_logpi_k
196
250
  misfit = theta_star - mu
197
251
  return -0.5*((1/(self.scale))*(misfit.T @ misfit))
198
252
 
253
+
254
+ class MYULA(ULA):
255
+ """Moreau-Yoshida Unadjusted Langevin algorithm (MYUULA) (Durmus et al., 2018)
256
+
257
+ Samples a smoothed target distribution given its smoothed logpdf gradient.
258
+ It is based on the Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt,
259
+ where W_t is a `dim`-dimensional standard Brownian motion.
260
+ It targets a differentiable density (partially) smoothed by the Moreau-Yoshida
261
+ envelope. The smoothed target density can be made arbitrarily closed to the
262
+ true unsmoothed target density.
263
+
264
+ For more details see: Durmus, Alain, Eric Moulines, and Marcelo Pereyra.
265
+ "Efficient Bayesian
266
+ computation by proximal Markov chain Monte Carlo: when Langevin meets Moreau."
267
+ SIAM Journal on Imaging Sciences 11.1 (2018): 473-506.
268
+
269
+ Parameters
270
+ ----------
271
+
272
+ target : `cuqi.distribution.Distribution`
273
+ The target distribution to sample from. The target distribution results from
274
+ a differentiable likelihood and prior of type RestorationPrior.
275
+
276
+ initial_point : ndarray
277
+ Initial parameters. *Optional*
278
+
279
+ scale : float
280
+ The Langevin diffusion discretization time step (In practice, scale must
281
+ be smaller than 1/L, where L is the Lipschitz of the gradient of the log
282
+ target density, logd).
283
+
284
+ smoothing_strength : float
285
+ This parameter controls the smoothing strength of MYULA.
286
+
287
+ callback : callable, optional
288
+ A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
289
+ The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
290
+
291
+ A Deblur example can be found in demos/howtos/myula.py
292
+ # TODO: update demo once sampler merged
293
+ """
294
+ def __init__(self, target=None, scale=1.0, smoothing_strength=0.1, **kwargs):
295
+ self.smoothing_strength = smoothing_strength
296
+ super().__init__(target=target, scale=scale, **kwargs)
297
+
298
+ @Sampler.target.setter
299
+ def target(self, value):
300
+ """ Set the target density. Runs validation of the target. """
301
+ self._target = value
302
+
303
+ if self._target is not None:
304
+ # Create a smoothed target
305
+ self._smoothed_target = self._create_smoothed_target(value)
306
+
307
+ # Validate the target
308
+ self.validate_target()
309
+
310
+ def _create_smoothed_target(self, value):
311
+ """ Create a smoothed target using a Moreau-Yoshida envelope. """
312
+ copied_value = copy(value)
313
+ if isinstance(copied_value.prior, RestorationPrior):
314
+ # Acceess the prior name
315
+ name = value.prior.name
316
+ copied_value.prior = MoreauYoshidaPrior(
317
+ copied_value.prior,
318
+ self.smoothing_strength,
319
+ name=name)
320
+ return copied_value
321
+
322
+ def validate_target(self):
323
+ # Call ULA target validation
324
+ super().validate_target()
325
+
326
+ # Additional validation for MYULA target
327
+ if isinstance(self.target.prior, MoreauYoshidaPrior):
328
+ raise ValueError(("The prior is already smoothed, apply"
329
+ " ULA when using a MoreauYoshidaPrior."))
330
+ if not hasattr(self.target.prior, "restore"):
331
+ raise NotImplementedError(
332
+ ("Using MYULA with a prior that does not have a restore method"
333
+ " is not supported.")
334
+ )
335
+
336
+ def _eval_target_grad(self, x):
337
+ return self._smoothed_target.gradient(x)
338
+
339
+ class PnPULA(MYULA):
340
+ """Plug-and-Play Unadjusted Langevin algorithm (PnP-ULA)
341
+ (Laumont et al., 2022)
342
+
343
+ Samples a smoothed target distribution given its smoothed logpdf gradient based on
344
+ Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where W_t is
345
+ a `dim`-dimensional standard Brownian motion.
346
+ It targets a differentiable density (partially) smoothed by a convolution
347
+ with Gaussian kernel with zero mean and smoothing_strength variance. The
348
+ smoothed target density can be made arbitrarily closed to the
349
+ true unsmoothed target density.
350
+
351
+ For more details see: Laumont, R., Bortoli, V. D., Almansa, A., Delon, J.,
352
+ Durmus, A., & Pereyra, M. (2022). Bayesian imaging using plug & play priors:
353
+ when Langevin meets Tweedie. SIAM Journal on Imaging Sciences, 15(2), 701-737.
354
+
355
+ Parameters
356
+ ----------
357
+
358
+ target : `cuqi.distribution.Distribution`
359
+ The target distribution to sample. The target distribution result from
360
+ a differentiable likelihood and prior of type RestorationPrior.
361
+
362
+ initial_point : ndarray
363
+ Initial parameters. *Optional*
364
+
365
+ scale : float
366
+ The Langevin diffusion discretization time step (In practice, a scale of
367
+ 1/L, where L is the Lipschitz of the gradient of the log target density
368
+ is recommended but not guaranteed to be the optimal choice).
369
+
370
+ smoothing_strength : float
371
+ This parameter controls the smoothing strength of PnP-ULA.
372
+
373
+
374
+ callback : callable, optional
375
+ A function that will be called after each sampling step. It can be useful for monitoring the sampler during sampling.
376
+ The function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature is: `callback(sampler, sample_index, num_of_samples)`.
377
+
378
+ # TODO: update demo once sampler merged
379
+ """
380
+ def __init__ (self, target=None, scale=1.0, smoothing_strength=0.1, **kwargs):
381
+ super().__init__(target=target, scale=scale,
382
+ smoothing_strength=smoothing_strength, **kwargs)