CUQIpy 1.3.0.post0.dev298__py3-none-any.whl → 1.4.0.post0.dev92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/{experimental/algebra/_randomvariable.py → algebra/_random_variable.py} +4 -4
  5. cuqi/density/_density.py +9 -1
  6. cuqi/distribution/_distribution.py +25 -16
  7. cuqi/distribution/_joint_distribution.py +99 -14
  8. cuqi/distribution/_posterior.py +9 -0
  9. cuqi/experimental/__init__.py +1 -4
  10. cuqi/experimental/_recommender.py +4 -4
  11. cuqi/geometry/__init__.py +2 -0
  12. cuqi/{experimental/geometry/_productgeometry.py → geometry/_product_geometry.py} +1 -1
  13. cuqi/implicitprior/__init__.py +1 -1
  14. cuqi/implicitprior/_restorator.py +35 -1
  15. cuqi/legacy/__init__.py +2 -0
  16. cuqi/legacy/sampler/__init__.py +11 -0
  17. cuqi/legacy/sampler/_conjugate.py +55 -0
  18. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  19. cuqi/legacy/sampler/_cwmh.py +196 -0
  20. cuqi/legacy/sampler/_gibbs.py +231 -0
  21. cuqi/legacy/sampler/_hmc.py +335 -0
  22. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  23. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  24. cuqi/legacy/sampler/_mh.py +190 -0
  25. cuqi/legacy/sampler/_pcn.py +244 -0
  26. cuqi/legacy/sampler/_rto.py +284 -0
  27. cuqi/legacy/sampler/_sampler.py +182 -0
  28. cuqi/likelihood/_likelihood.py +1 -1
  29. cuqi/model/_model.py +225 -90
  30. cuqi/pde/__init__.py +4 -0
  31. cuqi/pde/_observation_map.py +36 -0
  32. cuqi/pde/_pde.py +52 -21
  33. cuqi/problem/_problem.py +87 -80
  34. cuqi/sampler/__init__.py +120 -8
  35. cuqi/sampler/_conjugate.py +376 -35
  36. cuqi/sampler/_conjugate_approx.py +40 -16
  37. cuqi/sampler/_cwmh.py +132 -138
  38. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  39. cuqi/sampler/_gibbs.py +276 -130
  40. cuqi/sampler/_hmc.py +328 -201
  41. cuqi/sampler/_langevin_algorithm.py +282 -98
  42. cuqi/sampler/_laplace_approximation.py +87 -117
  43. cuqi/sampler/_mh.py +47 -157
  44. cuqi/sampler/_pcn.py +65 -213
  45. cuqi/sampler/_rto.py +206 -140
  46. cuqi/sampler/_sampler.py +540 -135
  47. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/METADATA +1 -1
  48. cuqipy-1.4.0.post0.dev92.dist-info/RECORD +101 -0
  49. cuqi/experimental/algebra/__init__.py +0 -2
  50. cuqi/experimental/geometry/__init__.py +0 -1
  51. cuqi/experimental/mcmc/__init__.py +0 -122
  52. cuqi/experimental/mcmc/_conjugate.py +0 -396
  53. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  54. cuqi/experimental/mcmc/_cwmh.py +0 -190
  55. cuqi/experimental/mcmc/_gibbs.py +0 -374
  56. cuqi/experimental/mcmc/_hmc.py +0 -460
  57. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  58. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  59. cuqi/experimental/mcmc/_mh.py +0 -80
  60. cuqi/experimental/mcmc/_pcn.py +0 -89
  61. cuqi/experimental/mcmc/_rto.py +0 -306
  62. cuqi/experimental/mcmc/_sampler.py +0 -564
  63. cuqipy-1.3.0.post0.dev298.dist-info/RECORD +0 -100
  64. /cuqi/{experimental/algebra/_ast.py → algebra/_abstract_syntax_tree.py} +0 -0
  65. /cuqi/{experimental/algebra/_orderedset.py → algebra/_ordered_set.py} +0 -0
  66. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/WHEEL +0 -0
  67. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/licenses/LICENSE +0 -0
  68. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/top_level.txt +0 -0
cuqi/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from . import data
2
2
  from . import density
3
3
  from . import diagnostics
4
+ from . import algebra
4
5
  from . import distribution
5
6
  from . import experimental
6
7
  from . import geometry
@@ -11,6 +12,7 @@ from . import operator
11
12
  from . import pde
12
13
  from . import problem
13
14
  from . import sampler
15
+ from . import legacy
14
16
  from . import array
15
17
  from . import samples
16
18
  from . import solver
cuqi/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-09-04T09:48:37+0300",
11
+ "date": "2025-11-24T16:04:21+0100",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "a0d227d0981362a09d24dc976ad65459ba8bf798",
15
- "version": "1.3.0.post0.dev298"
14
+ "full-revisionid": "d6622bf24d4986794de75feebd950e695d1da212",
15
+ "version": "1.4.0.post0.dev92"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -0,0 +1,2 @@
1
+ from ._abstract_syntax_tree import VariableNode, Node
2
+ from ._random_variable import RandomVariable
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
  from typing import List, Any, Union
3
- from ._ast import VariableNode, Node
4
- from ._orderedset import _OrderedSet
3
+ from ._abstract_syntax_tree import VariableNode, Node
4
+ from ._ordered_set import _OrderedSet
5
5
  import operator
6
6
  import cuqi
7
7
  from cuqi.distribution import Distribution
@@ -58,7 +58,7 @@ class RandomVariable:
58
58
 
59
59
  from cuqi.testproblem import Deconvolution1D
60
60
  from cuqi.distribution import Gaussian, Gamma, GMRF
61
- from cuqi.experimental.algebra import RandomVariable
61
+ from cuqi.algebra import RandomVariable
62
62
  from cuqi.problem import BayesianProblem
63
63
 
64
64
  import numpy as np
@@ -79,7 +79,7 @@ class RandomVariable:
79
79
  .. code-block:: python
80
80
 
81
81
  from cuqi.distribution import Gaussian, Gamma
82
- from cuqi.experimental.algebra import RandomVariable, VariableNode
82
+ from cuqi.algebra import RandomVariable, VariableNode
83
83
 
84
84
  # Define the variables
85
85
  x = VariableNode('x')
cuqi/density/_density.py CHANGED
@@ -143,7 +143,15 @@ class Density(ABC):
143
143
  def enable_FD(self, epsilon=1e-8):
144
144
  """ Enable finite difference approximation for logd gradient. Note
145
145
  that if enabled, the FD approximation will be used even if the
146
- _gradient method is implemented. """
146
+ _gradient method is implemented.
147
+
148
+ Parameters
149
+ ----------
150
+ epsilon : float
151
+
152
+ Spacing (step size) to use for finite difference approximation for logd
153
+ gradient for each variable. Default is 1e-8.
154
+ """
147
155
  self._FD_enabled = True
148
156
  self._FD_epsilon = epsilon
149
157
 
@@ -105,7 +105,7 @@ class Distribution(Density, ABC):
105
105
  f"Inconsistent distribution geometry attribute {self._geometry} and inferred "
106
106
  f"dimension from distribution variables {inferred_dim}."
107
107
  )
108
-
108
+
109
109
  # If Geometry dimension is None, update it with the inferred dimension
110
110
  if inferred_dim and self._geometry.par_dim is None:
111
111
  self.geometry = inferred_dim
@@ -117,7 +117,7 @@ class Distribution(Density, ABC):
117
117
  # We do not use self.name to potentially infer it from python stack.
118
118
  if self._name:
119
119
  self._geometry._variable_name = self._name
120
-
120
+
121
121
  return self._geometry
122
122
 
123
123
  @geometry.setter
@@ -160,7 +160,7 @@ class Distribution(Density, ABC):
160
160
  f"{self.logd.__qualname__}: To evaluate the log density all conditioning variables and main"
161
161
  f" parameter must be specified. Conditioning variables are: {cond_vars}"
162
162
  )
163
-
163
+
164
164
  # Check if all conditioning variables are specified
165
165
  all_cond_vars_specified = all([key in kwargs for key in cond_vars])
166
166
  if not all_cond_vars_specified:
@@ -168,7 +168,7 @@ class Distribution(Density, ABC):
168
168
  f"{self.logd.__qualname__}: To evaluate the log density all conditioning variables must be"
169
169
  f" specified. Conditioning variables are: {cond_vars}"
170
170
  )
171
-
171
+
172
172
  # Extract exactly the conditioning variables from kwargs
173
173
  cond_kwargs = {key: kwargs[key] for key in cond_vars}
174
174
 
@@ -186,7 +186,7 @@ class Distribution(Density, ABC):
186
186
  # Not conditional distribution, simply evaluate log density directly
187
187
  else:
188
188
  return super().logd(*args, **kwargs)
189
-
189
+
190
190
  def _logd(self, *args):
191
191
  return self.logpdf(*args) # Currently all distributions implement logpdf so we simply call this method.
192
192
 
@@ -216,7 +216,7 @@ class Distribution(Density, ABC):
216
216
  # Get samples from the distribution sample method
217
217
  s = self._sample(N,*args,**kwargs)
218
218
 
219
- #Store samples in cuqi samples object if more than 1 sample
219
+ # Store samples in cuqi samples object if more than 1 sample
220
220
  if N==1:
221
221
  if len(s) == 1 and isinstance(s,np.ndarray): #Extract single value from numpy array
222
222
  s = s.ravel()[0]
@@ -264,7 +264,7 @@ class Distribution(Density, ABC):
264
264
  # Go through every mutable variable and assign value from kwargs if present
265
265
  for var_key in mutable_vars:
266
266
 
267
- #If keyword directly specifies new value of variable we simply reassign
267
+ # If keyword directly specifies new value of variable we simply reassign
268
268
  if var_key in kwargs:
269
269
  setattr(new_dist, var_key, kwargs.get(var_key))
270
270
  processed_kwargs.add(var_key)
@@ -291,9 +291,18 @@ class Distribution(Density, ABC):
291
291
 
292
292
  elif len(var_args)>0: #Some keywords found
293
293
  # Define new partial function with partially defined args
294
- func = partial(var_val, **var_args)
294
+ if (
295
+ hasattr(var_val, "_supports_partial_eval")
296
+ and var_val._supports_partial_eval
297
+ ):
298
+ func = var_val(**var_args)
299
+ else:
300
+ # If the callable does not support partial evaluation,
301
+ # we use the partial function to set the variable
302
+ func = partial(var_val, **var_args)
303
+
295
304
  setattr(new_dist, var_key, func)
296
-
305
+
297
306
  # Store processed keywords
298
307
  processed_kwargs.update(var_args.keys())
299
308
 
@@ -329,7 +338,7 @@ class Distribution(Density, ABC):
329
338
 
330
339
  def get_conditioning_variables(self):
331
340
  """Return the conditioning variables of this distribution (if any)."""
332
-
341
+
333
342
  # Get all mutable variables
334
343
  mutable_vars = self.get_mutable_variables()
335
344
 
@@ -338,7 +347,7 @@ class Distribution(Density, ABC):
338
347
 
339
348
  # Add any variables defined through callable functions
340
349
  cond_vars += get_indirect_variables(self)
341
-
350
+
342
351
  return cond_vars
343
352
 
344
353
  def get_mutable_variables(self):
@@ -347,10 +356,10 @@ class Distribution(Density, ABC):
347
356
  # If mutable variables are already cached, return them
348
357
  if hasattr(self, '_mutable_vars'):
349
358
  return self._mutable_vars
350
-
359
+
351
360
  # Define list of ignored attributes and properties
352
361
  ignore_vars = ['name', 'is_symmetric', 'geometry', 'dim']
353
-
362
+
354
363
  # Get public attributes
355
364
  attributes = get_writeable_attributes(self)
356
365
 
@@ -396,7 +405,7 @@ class Distribution(Density, ABC):
396
405
  raise ValueError(f"{self._condition.__qualname__}: {ordered_keys[index]} passed as both argument and keyword argument.\nArguments follow the listed conditioning variable order: {self.get_conditioning_variables()}")
397
406
  kwargs[ordered_keys[index]] = arg
398
407
  return kwargs
399
-
408
+
400
409
  def _check_geometry_consistency(self):
401
410
  """ Checks that the geometry of the distribution is consistent by calling the geometry property. Should be called at the end of __init__ of subclasses. """
402
411
  self.geometry
@@ -410,5 +419,5 @@ class Distribution(Density, ABC):
410
419
  @property
411
420
  def rv(self):
412
421
  """ Return a random variable object representing the distribution. """
413
- from cuqi.experimental.algebra import RandomVariable
414
- return RandomVariable(self)
422
+ from cuqi.algebra import RandomVariable
423
+ return RandomVariable(self)
@@ -62,16 +62,16 @@ class JointDistribution:
62
62
  posterior = joint(y=y_obs)
63
63
 
64
64
  """
65
- def __init__(self, *densities: [Density, cuqi.experimental.algebra.RandomVariable]):
65
+ def __init__(self, *densities: [Density, cuqi.algebra.RandomVariable]):
66
66
  """ Create a joint distribution from the given densities. """
67
67
 
68
68
  # Check if all RandomVariables are simple (not-transformed)
69
69
  for density in densities:
70
- if isinstance(density, cuqi.experimental.algebra.RandomVariable) and density.is_transformed:
70
+ if isinstance(density, cuqi.algebra.RandomVariable) and density.is_transformed:
71
71
  raise ValueError(f"To be used in {self.__class__.__name__}, all RandomVariables must be untransformed.")
72
72
 
73
73
  # Convert potential random variables to their underlying distribution
74
- densities = [density.distribution if isinstance(density, cuqi.experimental.algebra.RandomVariable) else density for density in densities]
74
+ densities = [density.distribution if isinstance(density, cuqi.algebra.RandomVariable) else density for density in densities]
75
75
 
76
76
  # Ensure all densities have unique names
77
77
  names = [density.name for density in densities]
@@ -84,6 +84,8 @@ class JointDistribution:
84
84
  cond_vars = self._get_conditioning_variables()
85
85
  if len(cond_vars) > 0:
86
86
  raise ValueError(f"Every density parameter must have a distribution (prior). Missing prior for {cond_vars}.")
87
+ # Initialize finite difference gradient approximation settings
88
+ self.disable_FD()
87
89
 
88
90
  # --------- Public properties ---------
89
91
  @property
@@ -96,6 +98,38 @@ class JointDistribution:
96
98
  """ Returns the geometries of the joint distribution. """
97
99
  return [dist.geometry for dist in self._distributions]
98
100
 
101
+ @property
102
+ def FD_enabled(self):
103
+ """ Returns a dictionary of keys and booleans indicating for each
104
+ parameter name (key) if finite difference approximation of the logd
105
+ gradient is enabled. """
106
+ par_names = self.get_parameter_names()
107
+ FD_enabled = {
108
+ par_name: self.FD_epsilon[par_name] is not None for par_name in par_names
109
+ }
110
+ return FD_enabled
111
+
112
+ @property
113
+ def FD_epsilon(self):
114
+ """ Returns a dictionary indicating for each parameter name the
115
+ spacing for the finite difference approximation of the logd gradient."""
116
+ return self._FD_epsilon
117
+
118
+ @FD_epsilon.setter
119
+ def FD_epsilon(self, value):
120
+ """ Set the spacing for the finite difference approximation of the
121
+ logd gradient as a dictionary. The keys are the parameter names.
122
+ The value for each key is either None (no FD approximation) or a float
123
+ representing the FD step size.
124
+ """
125
+ par_names = self.get_parameter_names()
126
+ if value is None:
127
+ self._FD_epsilon = {par_name: None for par_name in par_names}
128
+ else:
129
+ if set(value.keys()) != set(par_names):
130
+ raise ValueError("Keys of FD_epsilon must match the parameter names of the distribution "+f" {par_names}")
131
+ self._FD_epsilon = value
132
+
99
133
  # --------- Public methods ---------
100
134
  def logd(self, *args, **kwargs):
101
135
  """ Evaluate the un-normalized log density function. """
@@ -136,6 +170,33 @@ class JointDistribution:
136
170
  # Can reduce to Posterior, Likelihood or Distribution.
137
171
  return new_joint._reduce_to_single_density()
138
172
 
173
+ def enable_FD(self, epsilon=None):
174
+ """ Enable finite difference approximation for logd gradient. Note
175
+ that if enabled, the FD approximation will be used even if the
176
+ _gradient method is implemented. By default, all parameters
177
+ will have FD enabled with a step size of 1e-8.
178
+
179
+ Parameters
180
+ ----------
181
+ epsilon : dict, *optional*
182
+
183
+ Dictionary indicating the spacing (step size) to use for finite
184
+ difference approximation for logd gradient for each variable.
185
+
186
+ Keys are variable names.
187
+ Values are either a float to enable FD with the given value as the FD
188
+ step size, or None to disable FD for that variable. Default is 1e-8 for
189
+ all variables.
190
+ """
191
+ if epsilon is None:
192
+ epsilon = {par_name: 1e-8 for par_name in self.get_parameter_names()}
193
+ self.FD_epsilon = epsilon
194
+
195
+ def disable_FD(self):
196
+ """ Disable finite difference approximation for logd gradient. """
197
+ par_names = self.get_parameter_names()
198
+ self.FD_epsilon = {par_name: None for par_name in par_names}
199
+
139
200
  def get_parameter_names(self) -> List[str]:
140
201
  """ Returns the parameter names of the joint distribution. """
141
202
  return [dist.name for dist in self._distributions]
@@ -202,34 +263,58 @@ class JointDistribution:
202
263
  # Count number of distributions and likelihoods
203
264
  n_dist = len(self._distributions)
204
265
  n_likelihood = len(self._likelihoods)
266
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name] for par_name in self.get_parameter_names()}
267
+ self.enable_FD(epsilon=reduced_FD_epsilon)
205
268
 
206
269
  # Cant reduce if there are multiple distributions or likelihoods
207
270
  if n_dist > 1:
208
271
  return self
209
272
 
273
+ # If only evaluated densities left return joint to ensure logd method is available
274
+ if n_dist == 0 and n_likelihood == 0:
275
+ return self
276
+
277
+ # Extract the parameter name of the distribution
278
+ if n_dist == 1:
279
+ par_name = self._distributions[0].name
280
+ elif n_likelihood == 1:
281
+ par_name = self._likelihoods[0].name
282
+ else:
283
+ par_name = None
284
+
210
285
  # If exactly one distribution and multiple likelihoods reduce
211
286
  if n_dist == 1 and n_likelihood > 1:
212
- return MultipleLikelihoodPosterior(*self._densities)
213
-
287
+ reduced_distribution = MultipleLikelihoodPosterior(*self._densities)
288
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name]}
289
+
214
290
  # If exactly one distribution and one likelihood its a Posterior
215
291
  if n_dist == 1 and n_likelihood == 1:
216
292
  # Ensure parameter names match, otherwise return the joint distribution
217
293
  if set(self._likelihoods[0].get_parameter_names()) != set(self._distributions[0].get_parameter_names()):
218
294
  return self
219
- return self._add_constants_to_density(Posterior(self._likelihoods[0], self._distributions[0]))
295
+ reduced_distribution = Posterior(self._likelihoods[0], self._distributions[0])
296
+ reduced_distribution = self._add_constants_to_density(reduced_distribution)
297
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
220
298
 
221
299
  # If exactly one distribution and no likelihoods its a Distribution
222
300
  if n_dist == 1 and n_likelihood == 0:
223
- return self._add_constants_to_density(self._distributions[0])
224
-
301
+ # Intentionally skip enabling FD here. If the user wants FD, they
302
+ # can enable it for this particular distribution before forming
303
+ # the joint distribution.
304
+ return self._add_constants_to_density(self._distributions[0])
305
+
225
306
  # If no distributions and exactly one likelihood its a Likelihood
226
307
  if n_likelihood == 1 and n_dist == 0:
227
- return self._likelihoods[0]
308
+ # This case seems to not happen in practice, but we include it for
309
+ # completeness.
310
+ reduced_distribution = self._likelihoods[0]
311
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
312
+
313
+ if self.FD_enabled[par_name]:
314
+ reduced_distribution.enable_FD(epsilon=reduced_FD_epsilon)
315
+
316
+ return reduced_distribution
228
317
 
229
- # If only evaluated densities left return joint to ensure logd method is available
230
- if n_dist == 0 and n_likelihood == 0:
231
- return self
232
-
233
318
  def _add_constants_to_density(self, density: Density):
234
319
  """ Add the constants (evaluated densities) to a single density. Used when reducing to single density. """
235
320
 
@@ -274,7 +359,7 @@ class JointDistribution:
274
359
  if len(cond_vars) > 0:
275
360
  msg += f"|{cond_vars}"
276
361
  msg += ")"
277
-
362
+
278
363
  msg += "\n"
279
364
  msg += " Densities: \n"
280
365
 
@@ -1,5 +1,6 @@
1
1
  from cuqi.geometry import _DefaultGeometry, _get_identity_geometries
2
2
  from cuqi.distribution import Distribution
3
+ from cuqi.density import Density
3
4
 
4
5
  # ========================================================================
5
6
  class Posterior(Distribution):
@@ -25,6 +26,14 @@ class Posterior(Distribution):
25
26
  self.prior = prior
26
27
  super().__init__(**kwargs)
27
28
 
29
+ def get_density(self, name) -> Density:
30
+ """ Return a density with the given name. """
31
+ if name == self.likelihood.name:
32
+ return self.likelihood
33
+ if name == self.prior.name:
34
+ return self.prior
35
+ raise ValueError(f"No density with name {name}.")
36
+
28
37
  @property
29
38
  def data(self):
30
39
  return self.likelihood.data
@@ -1,5 +1,2 @@
1
1
  """ Experimental module for testing new features and ideas. """
2
- from . import mcmc
3
- from . import algebra
4
- from . import geometry
5
- from ._recommender import SamplerRecommender
2
+ from ._recommender import SamplerRecommender
@@ -3,7 +3,7 @@ import inspect
3
3
  import numpy as np
4
4
 
5
5
  # This import makes suggest_sampler easier to read
6
- import cuqi.experimental.mcmc as samplers
6
+ import cuqi.sampler as samplers
7
7
 
8
8
 
9
9
  class SamplerRecommender(object):
@@ -15,7 +15,7 @@ class SamplerRecommender(object):
15
15
  target: Density or JointDistribution
16
16
  Distribution to get sampler recommendations for.
17
17
 
18
- exceptions: list[cuqi.experimental.mcmc.Sampler], *optional*
18
+ exceptions: list[cuqi.sampler.Sampler], *optional*
19
19
  Samplers not to be recommended.
20
20
 
21
21
  Example
@@ -104,7 +104,7 @@ class SamplerRecommender(object):
104
104
 
105
105
  """
106
106
 
107
- all_samplers = [(name, cls) for name, cls in inspect.getmembers(cuqi.experimental.mcmc, inspect.isclass) if issubclass(cls, cuqi.experimental.mcmc.Sampler)]
107
+ all_samplers = [(name, cls) for name, cls in inspect.getmembers(cuqi.sampler, inspect.isclass) if issubclass(cls, cuqi.sampler.Sampler)]
108
108
  valid_samplers = []
109
109
 
110
110
  for name, sampler in all_samplers:
@@ -116,7 +116,7 @@ class SamplerRecommender(object):
116
116
 
117
117
  # Need a separate case for HybridGibbs
118
118
  if self.valid_HybridGibbs_sampling_strategy() is not None:
119
- valid_samplers += [cuqi.experimental.mcmc.HybridGibbs.__name__ if as_string else cuqi.experimental.mcmc.HybridGibbs]
119
+ valid_samplers += [cuqi.sampler.HybridGibbs.__name__ if as_string else cuqi.sampler.HybridGibbs]
120
120
 
121
121
  return valid_samplers
122
122
 
cuqi/geometry/__init__.py CHANGED
@@ -16,6 +16,8 @@ from ._geometry import (
16
16
  StepExpansion
17
17
  )
18
18
 
19
+ from ._product_geometry import _ProductGeometry
20
+
19
21
 
20
22
  # TODO: We will remove the use of identity geometries in the future
21
23
  _identity_geometries = [_DefaultGeometry1D, _DefaultGeometry2D, Continuous1D, Continuous2D, Discrete, Image2D]
@@ -17,7 +17,7 @@ class _ProductGeometry(Geometry):
17
17
  .. code-block:: python
18
18
  import numpy as np
19
19
  from cuqi.geometry import Continuous1D, Discrete
20
- from cuqi.experimental.geometry import _ProductGeometry
20
+ from cuqi.geometry import _ProductGeometry
21
21
  geometry1 = Continuous1D(np.linspace(0, 1, 100))
22
22
  geometry2 = Discrete(["sound_speed"])
23
23
  product_geometry = _ProductGeometry(geometry1, geometry2)
@@ -1,5 +1,5 @@
1
1
  from ._regularizedGaussian import RegularizedGaussian, ConstrainedGaussian, NonnegativeGaussian
2
2
  from ._regularizedGMRF import RegularizedGMRF, ConstrainedGMRF, NonnegativeGMRF
3
3
  from ._regularizedUnboundedUniform import RegularizedUnboundedUniform
4
- from ._restorator import RestorationPrior, MoreauYoshidaPrior
4
+ from ._restorator import RestorationPrior, MoreauYoshidaPrior, TweediePrior
5
5
 
@@ -232,4 +232,38 @@ class MoreauYoshidaPrior(Distribution):
232
232
  """ Returns the conditioning variables of the distribution. """
233
233
  # Currently conditioning variables are not supported for user-defined
234
234
  # distributions.
235
- return []
235
+ return []
236
+
237
+ class TweediePrior(MoreauYoshidaPrior):
238
+ """
239
+ Alias for MoreauYoshidaPrior following Tweedie's formula framework. TweediePrior
240
+ defines priors where gradients are computed based on Tweedie's identity that links
241
+ MMSE (Minimum Mean Square Error) denoisers with the underlying smoothed prior, see:
242
+ - Laumont et al. https://arxiv.org/abs/2103.04715 or https://doi.org/10.1137/21M1406349
243
+
244
+ Tweedie's Formula
245
+ -------------------------
246
+ In the context of denoising, Tweedie's identity states that for a signal x
247
+ corrupted by Gaussian noise:
248
+
249
+ ∇_x log p_e(x) = (D_e(x) - x) / e
250
+
251
+ where D_e(x) is the MMSE denoiser output and e is the noise variance.
252
+ This enables us to perform gradient-based sampling with algorithms like ULA.
253
+
254
+ At implementation level, TweediePrior shares identical functionality with MoreauYoshidaPrior.
255
+ Thus, it is implemented as an alias of MoreauYoshidaPrior, meaning all methods,
256
+ properties, and behavior are identical. The separate name provides clarity when
257
+ working specifically with Tweedie's formula-based approaches.
258
+
259
+ Parameters
260
+ ----------
261
+ prior : RestorationPrior
262
+ Prior of the RestorationPrior type containing a denoiser/restorator.
263
+
264
+ smoothing_strength : float, default=0.1
265
+ Corresponds to the noise variance e in Tweedie's formula context.
266
+
267
+ See MoreauYoshidaPrior for the underlying implementation with complete documentation.
268
+ """
269
+ pass
@@ -0,0 +1,2 @@
1
+ """ Legacy module for functionalities that are no longer supported or developed. """
2
+ from . import sampler
@@ -0,0 +1,11 @@
1
+ from ._sampler import Sampler, ProposalBasedSampler
2
+ from ._conjugate import Conjugate
3
+ from ._conjugate_approx import ConjugateApprox
4
+ from ._cwmh import CWMH
5
+ from ._gibbs import Gibbs
6
+ from ._hmc import NUTS
7
+ from ._langevin_algorithm import ULA, MALA
8
+ from ._laplace_approximation import UGLA
9
+ from ._mh import MH
10
+ from ._pcn import pCN
11
+ from ._rto import LinearRTO, RegularizedLinearRTO
@@ -0,0 +1,55 @@
1
+ from cuqi.distribution import Posterior, Gaussian, Gamma, GMRF
2
+ from cuqi.implicitprior import RegularizedGaussian, RegularizedGMRF
3
+ import numpy as np
4
+
5
+ class Conjugate: # TODO: Subclass from Sampler once updated
6
+ """ Conjugate sampler
7
+
8
+ Sampler for sampling a posterior distribution where the likelihood and prior are conjugate.
9
+
10
+ Currently supported conjugate pairs are:
11
+ - (Gaussian, Gamma)
12
+ - (GMRF, Gamma)
13
+ - (RegularizedGaussian, Gamma) with nonnegativity constraints only
14
+
15
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
16
+
17
+ For implicit regularized Gaussians see:
18
+
19
+ [1] Everink, Jasper M., Yiqiu Dong, and Martin S. Andersen. "Bayesian inference with projected densities." SIAM/ASA Journal on Uncertainty Quantification 11.3 (2023): 1025-1043.
20
+
21
+ """
22
+
23
+ def __init__(self, target: Posterior):
24
+ if not isinstance(target.likelihood.distribution, (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
25
+ raise ValueError("Conjugate sampler only works with a Gaussian-type likelihood function")
26
+ if not isinstance(target.prior, Gamma):
27
+ raise ValueError("Conjugate sampler only works with Gamma prior")
28
+ if not target.prior.dim == 1:
29
+ raise ValueError("Conjugate sampler only works with univariate Gamma prior")
30
+
31
+ if isinstance(target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)) and (target.likelihood.distribution.preset["constraint"] not in ["nonnegativity"] or target.likelihood.distribution.preset["regularization"] is not None) :
32
+ raise ValueError("Conjugate sampler only works implicit regularized Gaussian likelihood with nonnegativity constraints")
33
+
34
+ self.target = target
35
+
36
+ def step(self, x=None):
37
+ # Extract variables
38
+ b = self.target.likelihood.data #mu
39
+ m = self._calc_m_for_Gaussians(b) #n
40
+ Ax = self.target.likelihood.distribution.mean #x_i
41
+ L = self.target.likelihood.distribution(np.array([1])).sqrtprec #L
42
+ alpha = self.target.prior.shape #alpha
43
+ beta = self.target.prior.rate #beta
44
+
45
+ # Create Gamma distribution and sample
46
+ dist = Gamma(shape=m/2+alpha,rate=.5*np.linalg.norm(L@(Ax-b))**2+beta)
47
+
48
+ return dist.sample()
49
+
50
+ def _calc_m_for_Gaussians(self, b):
51
+ """ Helper method to calculate m parameter for Gaussian-Gamma conjugate pair. """
52
+ if isinstance(self.target.likelihood.distribution, (Gaussian, GMRF)):
53
+ return len(b)
54
+ elif isinstance(self.target.likelihood.distribution, (RegularizedGaussian, RegularizedGMRF)):
55
+ return np.count_nonzero(b) # See
@@ -0,0 +1,52 @@
1
+ from cuqi.distribution import Posterior, LMRF, Gamma
2
+ import numpy as np
3
+ import scipy as sp
4
+
5
+ class ConjugateApprox: # TODO: Subclass from Sampler once updated
6
+ """ Approximate Conjugate sampler
7
+
8
+ Sampler for sampling a posterior distribution where the likelihood and prior can be approximated
9
+ by a conjugate pair.
10
+
11
+ Currently supported pairs are:
12
+ - (LMRF, Gamma): Approximated by (Gaussian, Gamma)
13
+
14
+ For more information on conjugate pairs, see https://en.wikipedia.org/wiki/Conjugate_prior.
15
+
16
+ """
17
+
18
+
19
+ def __init__(self, target: Posterior):
20
+ if not isinstance(target.likelihood.distribution, LMRF):
21
+ raise ValueError("Conjugate sampler only works with Laplace diff likelihood function")
22
+ if not isinstance(target.prior, Gamma):
23
+ raise ValueError("Conjugate sampler only works with Gamma prior")
24
+ self.target = target
25
+
26
+ def step(self, x=None):
27
+ # Extract variables
28
+ # Here we approximate the Laplace diff with a Gaussian
29
+
30
+ # Extract diff_op from target likelihood
31
+ D = self.target.likelihood.distribution._diff_op
32
+ n = D.shape[0]
33
+
34
+ # Gaussian approximation of LMRF prior as function of x_k
35
+ # See Uribe et al. (2022) for details
36
+ # Current has a zero mean assumption on likelihood! TODO
37
+ beta=1e-5
38
+ def Lk_fun(x_k):
39
+ dd = 1/np.sqrt((D @ x_k)**2 + beta*np.ones(n))
40
+ W = sp.sparse.diags(dd)
41
+ return W.sqrt() @ D
42
+
43
+ x = self.target.likelihood.data #x
44
+ d = len(x) #d
45
+ Lx = Lk_fun(x)@x #Lx
46
+ alpha = self.target.prior.shape #alpha
47
+ beta = self.target.prior.rate #beta
48
+
49
+ # Create Gamma distribution and sample
50
+ dist = Gamma(shape=d+alpha, rate=np.linalg.norm(Lx)**2+beta)
51
+
52
+ return dist.sample()