CUQIpy 1.1.1.post0.dev36__py3-none-any.whl → 1.4.1.post0.dev124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (92) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/algebra/_abstract_syntax_tree.py +358 -0
  5. cuqi/algebra/_ordered_set.py +82 -0
  6. cuqi/algebra/_random_variable.py +457 -0
  7. cuqi/array/_array.py +4 -13
  8. cuqi/config.py +7 -0
  9. cuqi/density/_density.py +9 -1
  10. cuqi/distribution/__init__.py +3 -2
  11. cuqi/distribution/_beta.py +7 -11
  12. cuqi/distribution/_cauchy.py +2 -2
  13. cuqi/distribution/_custom.py +0 -6
  14. cuqi/distribution/_distribution.py +31 -45
  15. cuqi/distribution/_gamma.py +7 -3
  16. cuqi/distribution/_gaussian.py +2 -12
  17. cuqi/distribution/_inverse_gamma.py +4 -10
  18. cuqi/distribution/_joint_distribution.py +112 -15
  19. cuqi/distribution/_lognormal.py +0 -7
  20. cuqi/distribution/{_modifiedhalfnormal.py → _modified_half_normal.py} +23 -23
  21. cuqi/distribution/_normal.py +34 -7
  22. cuqi/distribution/_posterior.py +9 -0
  23. cuqi/distribution/_truncated_normal.py +129 -0
  24. cuqi/distribution/_uniform.py +47 -1
  25. cuqi/experimental/__init__.py +2 -2
  26. cuqi/experimental/_recommender.py +216 -0
  27. cuqi/geometry/__init__.py +2 -0
  28. cuqi/geometry/_geometry.py +15 -1
  29. cuqi/geometry/_product_geometry.py +181 -0
  30. cuqi/implicitprior/__init__.py +5 -3
  31. cuqi/implicitprior/_regularized_gaussian.py +483 -0
  32. cuqi/implicitprior/{_regularizedGMRF.py → _regularized_gmrf.py} +4 -2
  33. cuqi/implicitprior/{_regularizedUnboundedUniform.py → _regularized_unbounded_uniform.py} +3 -2
  34. cuqi/implicitprior/_restorator.py +269 -0
  35. cuqi/legacy/__init__.py +2 -0
  36. cuqi/{experimental/mcmc → legacy/sampler}/__init__.py +7 -11
  37. cuqi/legacy/sampler/_conjugate.py +55 -0
  38. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  39. cuqi/legacy/sampler/_cwmh.py +196 -0
  40. cuqi/legacy/sampler/_gibbs.py +231 -0
  41. cuqi/legacy/sampler/_hmc.py +335 -0
  42. cuqi/{experimental/mcmc → legacy/sampler}/_langevin_algorithm.py +82 -111
  43. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  44. cuqi/legacy/sampler/_mh.py +190 -0
  45. cuqi/legacy/sampler/_pcn.py +244 -0
  46. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +132 -90
  47. cuqi/legacy/sampler/_sampler.py +182 -0
  48. cuqi/likelihood/_likelihood.py +9 -1
  49. cuqi/model/__init__.py +1 -1
  50. cuqi/model/_model.py +1361 -359
  51. cuqi/pde/__init__.py +4 -0
  52. cuqi/pde/_observation_map.py +36 -0
  53. cuqi/pde/_pde.py +134 -33
  54. cuqi/problem/_problem.py +93 -87
  55. cuqi/sampler/__init__.py +120 -8
  56. cuqi/sampler/_conjugate.py +376 -35
  57. cuqi/sampler/_conjugate_approx.py +40 -16
  58. cuqi/sampler/_cwmh.py +132 -138
  59. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  60. cuqi/sampler/_gibbs.py +288 -130
  61. cuqi/sampler/_hmc.py +328 -201
  62. cuqi/sampler/_langevin_algorithm.py +284 -100
  63. cuqi/sampler/_laplace_approximation.py +87 -117
  64. cuqi/sampler/_mh.py +47 -157
  65. cuqi/sampler/_pcn.py +65 -213
  66. cuqi/sampler/_rto.py +211 -142
  67. cuqi/sampler/_sampler.py +553 -136
  68. cuqi/samples/__init__.py +1 -1
  69. cuqi/samples/_samples.py +24 -18
  70. cuqi/solver/__init__.py +6 -4
  71. cuqi/solver/_solver.py +230 -26
  72. cuqi/testproblem/_testproblem.py +2 -3
  73. cuqi/utilities/__init__.py +6 -1
  74. cuqi/utilities/_get_python_variable_name.py +2 -2
  75. cuqi/utilities/_utilities.py +182 -2
  76. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/METADATA +10 -6
  77. cuqipy-1.4.1.post0.dev124.dist-info/RECORD +101 -0
  78. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/WHEEL +1 -1
  79. CUQIpy-1.1.1.post0.dev36.dist-info/RECORD +0 -92
  80. cuqi/experimental/mcmc/_conjugate.py +0 -197
  81. cuqi/experimental/mcmc/_conjugate_approx.py +0 -81
  82. cuqi/experimental/mcmc/_cwmh.py +0 -191
  83. cuqi/experimental/mcmc/_gibbs.py +0 -268
  84. cuqi/experimental/mcmc/_hmc.py +0 -470
  85. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  86. cuqi/experimental/mcmc/_mh.py +0 -78
  87. cuqi/experimental/mcmc/_pcn.py +0 -89
  88. cuqi/experimental/mcmc/_sampler.py +0 -561
  89. cuqi/experimental/mcmc/_utilities.py +0 -17
  90. cuqi/implicitprior/_regularizedGaussian.py +0 -323
  91. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info/licenses}/LICENSE +0 -0
  92. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/top_level.txt +0 -0
@@ -28,37 +28,6 @@ class Distribution(Density, ABC):
28
28
  is_symmetric : bool, default None
29
29
  Indicator if distribution is symmetric.
30
30
 
31
- Attributes
32
- ----------
33
- dim : int or None
34
- Dimension of distribution.
35
-
36
- name : str or None
37
- Name of distribution.
38
-
39
- geometry : Geometry or None
40
- Geometry of distribution.
41
-
42
- is_cond : bool
43
- Indicator if distribution is conditional.
44
-
45
- Methods
46
- -------
47
- pdf():
48
- Evaluate the probability density function.
49
-
50
- logpdf():
51
- Evaluate the log probability density function.
52
-
53
- sample():
54
- Generate one or more random samples.
55
-
56
- get_conditioning_variables():
57
- Return the conditioning variables of distribution.
58
-
59
- get_mutable_variables():
60
- Return the mutable variables (attributes and properties) of distribution.
61
-
62
31
  Notes
63
32
  -----
64
33
  A distribution can be conditional if one or more mutable variables are unspecified.
@@ -136,7 +105,7 @@ class Distribution(Density, ABC):
136
105
  f"Inconsistent distribution geometry attribute {self._geometry} and inferred "
137
106
  f"dimension from distribution variables {inferred_dim}."
138
107
  )
139
-
108
+
140
109
  # If Geometry dimension is None, update it with the inferred dimension
141
110
  if inferred_dim and self._geometry.par_dim is None:
142
111
  self.geometry = inferred_dim
@@ -148,7 +117,7 @@ class Distribution(Density, ABC):
148
117
  # We do not use self.name to potentially infer it from python stack.
149
118
  if self._name:
150
119
  self._geometry._variable_name = self._name
151
-
120
+
152
121
  return self._geometry
153
122
 
154
123
  @geometry.setter
@@ -191,7 +160,7 @@ class Distribution(Density, ABC):
191
160
  f"{self.logd.__qualname__}: To evaluate the log density all conditioning variables and main"
192
161
  f" parameter must be specified. Conditioning variables are: {cond_vars}"
193
162
  )
194
-
163
+
195
164
  # Check if all conditioning variables are specified
196
165
  all_cond_vars_specified = all([key in kwargs for key in cond_vars])
197
166
  if not all_cond_vars_specified:
@@ -199,7 +168,7 @@ class Distribution(Density, ABC):
199
168
  f"{self.logd.__qualname__}: To evaluate the log density all conditioning variables must be"
200
169
  f" specified. Conditioning variables are: {cond_vars}"
201
170
  )
202
-
171
+
203
172
  # Extract exactly the conditioning variables from kwargs
204
173
  cond_kwargs = {key: kwargs[key] for key in cond_vars}
205
174
 
@@ -217,7 +186,7 @@ class Distribution(Density, ABC):
217
186
  # Not conditional distribution, simply evaluate log density directly
218
187
  else:
219
188
  return super().logd(*args, **kwargs)
220
-
189
+
221
190
  def _logd(self, *args):
222
191
  return self.logpdf(*args) # Currently all distributions implement logpdf so we simply call this method.
223
192
 
@@ -239,6 +208,7 @@ class Distribution(Density, ABC):
239
208
  "enable_FD().")
240
209
 
241
210
  def sample(self,N=1,*args,**kwargs):
211
+ """ Sample from the distribution. """
242
212
 
243
213
  if self.is_cond:
244
214
  raise ValueError(f"Cannot sample from conditional distribution. Missing conditioning variables: {self.get_conditioning_variables()}")
@@ -246,7 +216,7 @@ class Distribution(Density, ABC):
246
216
  # Get samples from the distribution sample method
247
217
  s = self._sample(N,*args,**kwargs)
248
218
 
249
- #Store samples in cuqi samples object if more than 1 sample
219
+ # Store samples in cuqi samples object if more than 1 sample
250
220
  if N==1:
251
221
  if len(s) == 1 and isinstance(s,np.ndarray): #Extract single value from numpy array
252
222
  s = s.ravel()[0]
@@ -263,6 +233,7 @@ class Distribution(Density, ABC):
263
233
  pass
264
234
 
265
235
  def pdf(self,x):
236
+ """ Evaluate the log probability density function of the distribution. """
266
237
  return np.exp(self.logpdf(x))
267
238
 
268
239
  def _condition(self, *args, **kwargs):
@@ -293,7 +264,7 @@ class Distribution(Density, ABC):
293
264
  # Go through every mutable variable and assign value from kwargs if present
294
265
  for var_key in mutable_vars:
295
266
 
296
- #If keyword directly specifies new value of variable we simply reassign
267
+ # If keyword directly specifies new value of variable we simply reassign
297
268
  if var_key in kwargs:
298
269
  setattr(new_dist, var_key, kwargs.get(var_key))
299
270
  processed_kwargs.add(var_key)
@@ -320,9 +291,18 @@ class Distribution(Density, ABC):
320
291
 
321
292
  elif len(var_args)>0: #Some keywords found
322
293
  # Define new partial function with partially defined args
323
- func = partial(var_val, **var_args)
294
+ if (
295
+ hasattr(var_val, "_supports_partial_eval")
296
+ and var_val._supports_partial_eval
297
+ ):
298
+ func = var_val(**var_args)
299
+ else:
300
+ # If the callable does not support partial evaluation,
301
+ # we use the partial function to set the variable
302
+ func = partial(var_val, **var_args)
303
+
324
304
  setattr(new_dist, var_key, func)
325
-
305
+
326
306
  # Store processed keywords
327
307
  processed_kwargs.update(var_args.keys())
328
308
 
@@ -358,7 +338,7 @@ class Distribution(Density, ABC):
358
338
 
359
339
  def get_conditioning_variables(self):
360
340
  """Return the conditioning variables of this distribution (if any)."""
361
-
341
+
362
342
  # Get all mutable variables
363
343
  mutable_vars = self.get_mutable_variables()
364
344
 
@@ -367,7 +347,7 @@ class Distribution(Density, ABC):
367
347
 
368
348
  # Add any variables defined through callable functions
369
349
  cond_vars += get_indirect_variables(self)
370
-
350
+
371
351
  return cond_vars
372
352
 
373
353
  def get_mutable_variables(self):
@@ -376,10 +356,10 @@ class Distribution(Density, ABC):
376
356
  # If mutable variables are already cached, return them
377
357
  if hasattr(self, '_mutable_vars'):
378
358
  return self._mutable_vars
379
-
359
+
380
360
  # Define list of ignored attributes and properties
381
361
  ignore_vars = ['name', 'is_symmetric', 'geometry', 'dim']
382
-
362
+
383
363
  # Get public attributes
384
364
  attributes = get_writeable_attributes(self)
385
365
 
@@ -425,7 +405,7 @@ class Distribution(Density, ABC):
425
405
  raise ValueError(f"{self._condition.__qualname__}: {ordered_keys[index]} passed as both argument and keyword argument.\nArguments follow the listed conditioning variable order: {self.get_conditioning_variables()}")
426
406
  kwargs[ordered_keys[index]] = arg
427
407
  return kwargs
428
-
408
+
429
409
  def _check_geometry_consistency(self):
430
410
  """ Checks that the geometry of the distribution is consistent by calling the geometry property. Should be called at the end of __init__ of subclasses. """
431
411
  self.geometry
@@ -435,3 +415,9 @@ class Distribution(Density, ABC):
435
415
  return "CUQI {}. Conditioning variables {}.".format(self.__class__.__name__,self.get_conditioning_variables())
436
416
  else:
437
417
  return "CUQI {}.".format(self.__class__.__name__)
418
+
419
+ @property
420
+ def rv(self):
421
+ """ Return a random variable object representing the distribution. """
422
+ from cuqi.algebra import RandomVariable
423
+ return RandomVariable(self)
@@ -6,14 +6,18 @@ from cuqi.utilities import force_ndarray
6
6
  class Gamma(Distribution):
7
7
  """
8
8
  Represents a multivariate Gamma distribution characterized by shape and rate parameters of independent random variables x_i. Each is distributed according to the PDF function
9
+
10
+ .. math::
9
11
 
10
- f(x_i; shape, rate) = rate^shape * x_i^(shape-1) * exp(-rate * x_i) / Gamma(shape)
12
+ f(x_i; \\alpha, \\beta) = \\beta^\\alpha x_i^{\\alpha-1} \\exp(-\\beta x_i) / \Gamma(\\alpha)
11
13
 
12
- where `shape` and `rate` are the parameters of the distribution, and Gamma is the Gamma function.
14
+ where shape :math:`\\alpha` and rate :math:`\\beta` are the parameters of the distribution, and :math:`\Gamma` is the Gamma function.
13
15
 
14
16
  In case shape and/or rate are arrays, the pdf looks like
15
17
 
16
- f(x_i; shape_i, rate_i) = rate_i^shape_i * x_i^(shape_i-1) * exp(-rate_i * x_i) / Gamma(shape_i)
18
+ .. math::
19
+
20
+ f(x_i; \\alpha_i, \\beta_i) = \\beta_i^{\\alpha_i} x_i^{\\alpha_i-1} \\exp(-\\beta_i x_i) / \Gamma(\\alpha_i)
17
21
 
18
22
  Parameters
19
23
  ----------
@@ -730,18 +730,6 @@ class JointGaussianSqrtPrec(Distribution):
730
730
  ------------
731
731
  means: List of means for each Gaussian distribution.
732
732
  sqrtprecs: List of sqrt precision matricies for each Gaussian distribution.
733
-
734
- Attributes
735
- ------------
736
- sqrtprec: Returns the sqrt precision matrix of the joined gaussian in stacked form.
737
- sqrtprecTimesMean: Returns the sqrt precision matrix times the mean of the distribution.
738
-
739
- Methods
740
- -----------
741
- sample: generate one or more random samples (NotImplemented)
742
- pdf: evaluate probability density function (NotImplemented)
743
- logpdf: evaluate log probability density function (NotImplemented)
744
- cdf: evaluate cumulative probability function (NotImplemented)
745
733
  """
746
734
  def __init__(self,means=None,sqrtprecs=None,is_symmetric=True,**kwargs):
747
735
 
@@ -783,6 +771,7 @@ class JointGaussianSqrtPrec(Distribution):
783
771
 
784
772
  @property
785
773
  def sqrtprec(self):
774
+ """ Returns the sqrt precision matrix of the joined gaussian in stacked form. """
786
775
  if spa.issparse(self._sqrtprecs[0]):
787
776
  return spa.vstack((self._sqrtprecs))
788
777
  else:
@@ -790,6 +779,7 @@ class JointGaussianSqrtPrec(Distribution):
790
779
 
791
780
  @property
792
781
  def sqrtprecTimesMean(self):
782
+ """ Returns the sqrt precision matrix times the mean of the distribution."""
793
783
  result = []
794
784
  for i in range(len(self._means)):
795
785
  result.append((self._sqrtprecs[i]@self._means[i]).flatten())
@@ -8,9 +8,11 @@ class InverseGamma(Distribution):
8
8
  """
9
9
  Multivariate inverse gamma distribution of independent random variables x_i. Each is distributed according to the PDF function
10
10
 
11
- f(x) = (x-location)^(-shape-1) * exp(-scale/(x-location)) / (scale^(-shape)*Gamma(shape))
11
+ .. math::
12
12
 
13
- where shape, location and scale are the shape, location and scale of x_i, respectively. And Gamma is the Gamma function.
13
+ f(x) = (x-\\beta)^{(-\\alpha-1)} * \exp(-\\gamma/(x-\\beta)) / (\\gamma^{(-\\alpha)}*\Gamma(\\alpha))
14
+
15
+ where shape :math:`\\alpha`, location :math:`\\beta` and scale :math:`\\gamma` are the shape, location and scale of x_i, respectively. And :math:`\Gamma` is the Gamma function.
14
16
 
15
17
  Parameters
16
18
  ------------
@@ -23,14 +25,6 @@ class InverseGamma(Distribution):
23
25
  scale: float or array_like
24
26
  The scale of the inverse gamma distribution (non-negative)
25
27
 
26
-
27
- Methods
28
- -----------
29
- sample: generate one or more random samples
30
- pdf: evaluate probability density function
31
- logpdf: evaluate log probability density function
32
- cdf: evaluate cumulative probability function
33
-
34
28
  Example
35
29
  -------
36
30
  .. code-block:: python
@@ -5,6 +5,7 @@ from cuqi.density import Density, EvaluatedDensity
5
5
  from cuqi.distribution import Distribution, Posterior
6
6
  from cuqi.likelihood import Likelihood
7
7
  from cuqi.geometry import Geometry, _DefaultGeometry1D
8
+ import cuqi
8
9
  import numpy as np # for splitting array. Can avoid.
9
10
 
10
11
  class JointDistribution:
@@ -13,9 +14,11 @@ class JointDistribution:
13
14
 
14
15
  Parameters
15
16
  ----------
16
- densities : Density
17
+ densities : RandomVariable or Density
17
18
  The densities to include in the joint distribution.
18
- Each density is passed as comma-separated arguments.
19
+ Each density is passed as comma-separated arguments,
20
+ and can be either a :class:'Density' such as :class:'Distribution'
21
+ or :class:`RandomVariable`.
19
22
 
20
23
  Notes
21
24
  -----
@@ -59,7 +62,16 @@ class JointDistribution:
59
62
  posterior = joint(y=y_obs)
60
63
 
61
64
  """
62
- def __init__(self, *densities: Density):
65
+ def __init__(self, *densities: [Density, cuqi.algebra.RandomVariable]):
66
+ """ Create a joint distribution from the given densities. """
67
+
68
+ # Check if all RandomVariables are simple (not-transformed)
69
+ for density in densities:
70
+ if isinstance(density, cuqi.algebra.RandomVariable) and density.is_transformed:
71
+ raise ValueError(f"To be used in {self.__class__.__name__}, all RandomVariables must be untransformed.")
72
+
73
+ # Convert potential random variables to their underlying distribution
74
+ densities = [density.distribution if isinstance(density, cuqi.algebra.RandomVariable) else density for density in densities]
63
75
 
64
76
  # Ensure all densities have unique names
65
77
  names = [density.name for density in densities]
@@ -72,6 +84,8 @@ class JointDistribution:
72
84
  cond_vars = self._get_conditioning_variables()
73
85
  if len(cond_vars) > 0:
74
86
  raise ValueError(f"Every density parameter must have a distribution (prior). Missing prior for {cond_vars}.")
87
+ # Initialize finite difference gradient approximation settings
88
+ self.disable_FD()
75
89
 
76
90
  # --------- Public properties ---------
77
91
  @property
@@ -84,6 +98,38 @@ class JointDistribution:
84
98
  """ Returns the geometries of the joint distribution. """
85
99
  return [dist.geometry for dist in self._distributions]
86
100
 
101
+ @property
102
+ def FD_enabled(self):
103
+ """ Returns a dictionary of keys and booleans indicating for each
104
+ parameter name (key) if finite difference approximation of the logd
105
+ gradient is enabled. """
106
+ par_names = self.get_parameter_names()
107
+ FD_enabled = {
108
+ par_name: self.FD_epsilon[par_name] is not None for par_name in par_names
109
+ }
110
+ return FD_enabled
111
+
112
+ @property
113
+ def FD_epsilon(self):
114
+ """ Returns a dictionary indicating for each parameter name the
115
+ spacing for the finite difference approximation of the logd gradient."""
116
+ return self._FD_epsilon
117
+
118
+ @FD_epsilon.setter
119
+ def FD_epsilon(self, value):
120
+ """ Set the spacing for the finite difference approximation of the
121
+ logd gradient as a dictionary. The keys are the parameter names.
122
+ The value for each key is either None (no FD approximation) or a float
123
+ representing the FD step size.
124
+ """
125
+ par_names = self.get_parameter_names()
126
+ if value is None:
127
+ self._FD_epsilon = {par_name: None for par_name in par_names}
128
+ else:
129
+ if set(value.keys()) != set(par_names):
130
+ raise ValueError("Keys of FD_epsilon must match the parameter names of the distribution "+f" {par_names}")
131
+ self._FD_epsilon = value
132
+
87
133
  # --------- Public methods ---------
88
134
  def logd(self, *args, **kwargs):
89
135
  """ Evaluate the un-normalized log density function. """
@@ -124,6 +170,33 @@ class JointDistribution:
124
170
  # Can reduce to Posterior, Likelihood or Distribution.
125
171
  return new_joint._reduce_to_single_density()
126
172
 
173
+ def enable_FD(self, epsilon=None):
174
+ """ Enable finite difference approximation for logd gradient. Note
175
+ that if enabled, the FD approximation will be used even if the
176
+ _gradient method is implemented. By default, all parameters
177
+ will have FD enabled with a step size of 1e-8.
178
+
179
+ Parameters
180
+ ----------
181
+ epsilon : dict, *optional*
182
+
183
+ Dictionary indicating the spacing (step size) to use for finite
184
+ difference approximation for logd gradient for each variable.
185
+
186
+ Keys are variable names.
187
+ Values are either a float to enable FD with the given value as the FD
188
+ step size, or None to disable FD for that variable. Default is 1e-8 for
189
+ all variables.
190
+ """
191
+ if epsilon is None:
192
+ epsilon = {par_name: 1e-8 for par_name in self.get_parameter_names()}
193
+ self.FD_epsilon = epsilon
194
+
195
+ def disable_FD(self):
196
+ """ Disable finite difference approximation for logd gradient. """
197
+ par_names = self.get_parameter_names()
198
+ self.FD_epsilon = {par_name: None for par_name in par_names}
199
+
127
200
  def get_parameter_names(self) -> List[str]:
128
201
  """ Returns the parameter names of the joint distribution. """
129
202
  return [dist.name for dist in self._distributions]
@@ -190,34 +263,58 @@ class JointDistribution:
190
263
  # Count number of distributions and likelihoods
191
264
  n_dist = len(self._distributions)
192
265
  n_likelihood = len(self._likelihoods)
266
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name] for par_name in self.get_parameter_names()}
267
+ self.enable_FD(epsilon=reduced_FD_epsilon)
193
268
 
194
269
  # Cant reduce if there are multiple distributions or likelihoods
195
270
  if n_dist > 1:
196
271
  return self
197
272
 
273
+ # If only evaluated densities left return joint to ensure logd method is available
274
+ if n_dist == 0 and n_likelihood == 0:
275
+ return self
276
+
277
+ # Extract the parameter name of the distribution
278
+ if n_dist == 1:
279
+ par_name = self._distributions[0].name
280
+ elif n_likelihood == 1:
281
+ par_name = self._likelihoods[0].name
282
+ else:
283
+ par_name = None
284
+
198
285
  # If exactly one distribution and multiple likelihoods reduce
199
286
  if n_dist == 1 and n_likelihood > 1:
200
- return MultipleLikelihoodPosterior(*self._densities)
201
-
287
+ reduced_distribution = MultipleLikelihoodPosterior(*self._densities)
288
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name]}
289
+
202
290
  # If exactly one distribution and one likelihood its a Posterior
203
291
  if n_dist == 1 and n_likelihood == 1:
204
292
  # Ensure parameter names match, otherwise return the joint distribution
205
293
  if set(self._likelihoods[0].get_parameter_names()) != set(self._distributions[0].get_parameter_names()):
206
294
  return self
207
- return self._add_constants_to_density(Posterior(self._likelihoods[0], self._distributions[0]))
295
+ reduced_distribution = Posterior(self._likelihoods[0], self._distributions[0])
296
+ reduced_distribution = self._add_constants_to_density(reduced_distribution)
297
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
208
298
 
209
299
  # If exactly one distribution and no likelihoods its a Distribution
210
300
  if n_dist == 1 and n_likelihood == 0:
211
- return self._add_constants_to_density(self._distributions[0])
212
-
301
+ # Intentionally skip enabling FD here. If the user wants FD, they
302
+ # can enable it for this particular distribution before forming
303
+ # the joint distribution.
304
+ return self._add_constants_to_density(self._distributions[0])
305
+
213
306
  # If no distributions and exactly one likelihood its a Likelihood
214
307
  if n_likelihood == 1 and n_dist == 0:
215
- return self._likelihoods[0]
308
+ # This case seems to not happen in practice, but we include it for
309
+ # completeness.
310
+ reduced_distribution = self._likelihoods[0]
311
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
312
+
313
+ if self.FD_enabled[par_name]:
314
+ reduced_distribution.enable_FD(epsilon=reduced_FD_epsilon)
315
+
316
+ return reduced_distribution
216
317
 
217
- # If only evaluated densities left return joint to ensure logd method is available
218
- if n_dist == 0 and n_likelihood == 0:
219
- return self
220
-
221
318
  def _add_constants_to_density(self, density: Density):
222
319
  """ Add the constants (evaluated densities) to a single density. Used when reducing to single density. """
223
320
 
@@ -262,7 +359,7 @@ class JointDistribution:
262
359
  if len(cond_vars) > 0:
263
360
  msg += f"|{cond_vars}"
264
361
  msg += ")"
265
-
362
+
266
363
  msg += "\n"
267
364
  msg += " Densities: \n"
268
365
 
@@ -271,7 +368,7 @@ class JointDistribution:
271
368
  msg += f"\t{density.name} ~ {density}\n"
272
369
 
273
370
  # Wrap up
274
- msg += ")"
371
+ msg += " )"
275
372
 
276
373
  return msg
277
374
 
@@ -16,13 +16,6 @@ class Lognormal(Distribution):
16
16
  cov: np.ndarray
17
17
  Covariance matrix of the normal distribution used to define the lognormal distribution
18
18
 
19
- Methods
20
- -----------
21
- sample: generate one or more random samples
22
- pdf: evaluate probability density function
23
- logpdf: evaluate log probability density function
24
- cdf: evaluate cumulative probability function
25
-
26
19
  Example
27
20
  -------
28
21
  .. code-block:: python
@@ -7,37 +7,40 @@ from cuqi.utilities import force_ndarray
7
7
  class ModifiedHalfNormal(Distribution):
8
8
  """
9
9
  Represents a modified half-normal (MHN) distribution, a three-parameter family of distributions generalizing the Gamma distribution.
10
- The distribution is continuous with pdf
11
- f(x; alpha, beta, gamma) propto x^(alpha-1) * exp(-beta * x^2 + gamma * x)
10
+ The distribution is continuous with pdf
11
+
12
+ .. math::
13
+
14
+ f(x; \\alpha, \\beta, \\gamma) \propto x^{(\\alpha-1)} * \exp(-\\beta * x^2 + \\gamma * x)
12
15
 
13
16
  The MHN generalizes the half-normal distribution, because
14
- f(x; 1, beta, 0) propto exp(-beta * x^2)
17
+ :math:`f(x; 1, \\beta, 0) \propto \exp(-\\beta * x^2)`
15
18
 
16
19
  The MHN generalizes the gamma distribution because
17
- f(x; alpha, 0, -gamma) propto x^(alpha-1) * exp(- gamma * x)
20
+ :math:`f(x; \\alpha, 0, -\\gamma) \propto x^{(\\alpha-1)} * \exp(- \\gamma * x)`
18
21
 
19
22
  Reference:
20
23
  [1] Sun, et al. "The Modified-Half-Normal distribution: Properties and an efficient sampling scheme." Communications in Statistics-Theory and Methods
21
24
 
22
25
  Parameters
23
26
  ----------
24
- alpha : float
25
- The polynomial exponent parameter of the MHN distribution. Must be positive.
27
+ alpha : float or array_like
28
+ The polynomial exponent parameter :math:`\\alpha` of the MHN distribution. Must be positive.
26
29
 
27
- beta : float
28
- The quadratic exponential parameter of the MHN distribution. Must be positive.
30
+ beta : float or array_like
31
+ The quadratic exponential parameter :math:`\\beta` of the MHN distribution. Must be positive.
29
32
 
30
- gamma : float
31
- The linear exponential parameter of the MHN distribution.
33
+ gamma : float or array_like
34
+ The linear exponential parameter :math:`\\gamma` of the MHN distribution.
32
35
 
33
36
  """
34
37
  def __init__(self, alpha=None, beta=None, gamma=None, is_symmetric=False, **kwargs):
35
38
  # Init from abstract distribution class
36
39
  super().__init__(is_symmetric=is_symmetric, **kwargs)
37
40
 
38
- self._alpha = alpha
39
- self._beta = beta
40
- self._gamma = gamma
41
+ self.alpha = alpha
42
+ self.beta = beta
43
+ self.gamma = gamma
41
44
 
42
45
  @property
43
46
  def alpha(self):
@@ -45,13 +48,13 @@ class ModifiedHalfNormal(Distribution):
45
48
  return self._alpha
46
49
 
47
50
  @alpha.setter
48
- def shape(self, value):
49
- self._shape = force_ndarray(value, flatten=True)
51
+ def alpha(self, value):
52
+ self._alpha = force_ndarray(value, flatten=True)
50
53
 
51
54
  @property
52
55
  def beta(self):
53
56
  """ The quadratic exponential parameter of the MHN distribution. Must be positive. """
54
- return self._alpha
57
+ return self._beta
55
58
 
56
59
  @beta.setter
57
60
  def beta(self, value):
@@ -60,7 +63,7 @@ class ModifiedHalfNormal(Distribution):
60
63
  @property
61
64
  def gamma(self):
62
65
  """ The linear exponential parameter of the MHN distribution. """
63
- return self._alpha
66
+ return self._gamma
64
67
 
65
68
  @gamma.setter
66
69
  def gamma(self, value):
@@ -75,11 +78,8 @@ class ModifiedHalfNormal(Distribution):
75
78
  return (self.alpha - 1)/val - 2*self.beta*val + self.gamma
76
79
 
77
80
  def _gradient(self, val, *args, **kwargs):
78
- if hasattr(self.alpha, '__iter__'):
79
- return np.array([self._gradient_scalar(v) for v in val])
80
- else:
81
- return np.array([self.dim*[self._gradient_scalar(v)] for v in val])
82
-
81
+ return np.array([self._gradient_scalar(v) for v in val])
82
+
83
83
  def _MHN_sample_gamma_proposal(self, alpha, beta, gamma, rng, delta=None):
84
84
  """
85
85
  Sample from a modified half-normal distribution using a Gamma distribution proposal.
@@ -177,7 +177,7 @@ class ModifiedHalfNormal(Distribution):
177
177
 
178
178
  def _sample(self, N, rng=None):
179
179
  if hasattr(self.alpha, '__getitem__'):
180
- return np.array([self._MHN_sample(self.alpha[i], self.beta[i], self.gamma[i], rng=rng) for i in range(N)])
180
+ return np.array([[self._MHN_sample(self.alpha[i], self.beta[i], self.gamma[i], rng=rng) for i in range(len(self.alpha))] for _ in range(N)])
181
181
  else:
182
182
  return np.array([self._MHN_sample(self.alpha, self.beta, self.gamma, rng=rng) for i in range(N)])
183
183
 
@@ -1,5 +1,8 @@
1
1
  import numpy as np
2
+ import numbers
2
3
  from scipy.special import erf
4
+ from cuqi.geometry import _get_identity_geometries
5
+ from cuqi.utilities import force_ndarray
3
6
  from cuqi.distribution import Distribution
4
7
 
5
8
  class Normal(Distribution):
@@ -12,13 +15,6 @@ class Normal(Distribution):
12
15
  mean: mean of distribution
13
16
  std: standard deviation
14
17
 
15
- Methods
16
- -----------
17
- sample: generate one or more random samples
18
- pdf: evaluate probability density function
19
- logpdf: evaluate log probability density function
20
- cdf: evaluate cumulative probability function
21
-
22
18
  Example
23
19
  -----------
24
20
  .. code-block:: python
@@ -34,6 +30,24 @@ class Normal(Distribution):
34
30
  self.mean = mean
35
31
  self.std = std
36
32
 
33
+ @property
34
+ def mean(self):
35
+ """ Mean of the distribution """
36
+ return self._mean
37
+
38
+ @mean.setter
39
+ def mean(self, value):
40
+ self._mean = force_ndarray(value, flatten=True)
41
+
42
+ @property
43
+ def std(self):
44
+ """ Std of the distribution """
45
+ return self._std
46
+
47
+ @std.setter
48
+ def std(self, value):
49
+ self._std = force_ndarray(value, flatten=True)
50
+
37
51
  def pdf(self, x):
38
52
  return np.prod(1/(self.std*np.sqrt(2*np.pi))*np.exp(-0.5*((x-self.mean)/self.std)**2))
39
53
 
@@ -43,6 +57,19 @@ class Normal(Distribution):
43
57
  def cdf(self, x):
44
58
  return np.prod(0.5*(1 + erf((x-self.mean)/(self.std*np.sqrt(2)))))
45
59
 
60
+ def _gradient(self, val, *args, **kwargs):
61
+ if not type(self.geometry) in _get_identity_geometries():
62
+ raise NotImplementedError("Gradient not implemented for distribution {} with geometry {}".format(self,self.geometry))
63
+ if not callable(self.mean):
64
+ return -(val-self.mean)/(self.std**2)
65
+ elif hasattr(self.mean, "gradient"): # for likelihood
66
+ model = self.mean
67
+ dev = val - model.forward(*args, **kwargs)
68
+ print(dev)
69
+ return model.gradient(1.0/(np.array(self.std)) @ dev, *args, **kwargs)
70
+ else:
71
+ raise NotImplementedError("Gradient not implemented for distribution {} with location {}".format(self,self.mean))
72
+
46
73
  def _sample(self,N=1, rng=None):
47
74
 
48
75
  """
@@ -1,5 +1,6 @@
1
1
  from cuqi.geometry import _DefaultGeometry, _get_identity_geometries
2
2
  from cuqi.distribution import Distribution
3
+ from cuqi.density import Density
3
4
 
4
5
  # ========================================================================
5
6
  class Posterior(Distribution):
@@ -25,6 +26,14 @@ class Posterior(Distribution):
25
26
  self.prior = prior
26
27
  super().__init__(**kwargs)
27
28
 
29
+ def get_density(self, name) -> Density:
30
+ """ Return a density with the given name. """
31
+ if name == self.likelihood.name:
32
+ return self.likelihood
33
+ if name == self.prior.name:
34
+ return self.prior
35
+ raise ValueError(f"No density with name {name}.")
36
+
28
37
  @property
29
38
  def data(self):
30
39
  return self.likelihood.data