CUQIpy 1.3.0__py3-none-any.whl → 1.4.0.post0.dev61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. cuqi/__init__.py +1 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/density/_density.py +9 -1
  4. cuqi/distribution/__init__.py +1 -1
  5. cuqi/distribution/_beta.py +1 -1
  6. cuqi/distribution/_cauchy.py +2 -2
  7. cuqi/distribution/_distribution.py +24 -15
  8. cuqi/distribution/_joint_distribution.py +97 -12
  9. cuqi/distribution/_posterior.py +9 -0
  10. cuqi/distribution/_truncated_normal.py +3 -3
  11. cuqi/distribution/_uniform.py +36 -2
  12. cuqi/experimental/__init__.py +1 -1
  13. cuqi/experimental/_recommender.py +216 -0
  14. cuqi/experimental/geometry/_productgeometry.py +3 -3
  15. cuqi/geometry/_geometry.py +12 -1
  16. cuqi/implicitprior/__init__.py +1 -1
  17. cuqi/implicitprior/_regularizedGaussian.py +40 -4
  18. cuqi/implicitprior/_restorator.py +35 -1
  19. cuqi/legacy/__init__.py +2 -0
  20. cuqi/legacy/sampler/__init__.py +11 -0
  21. cuqi/legacy/sampler/_conjugate.py +55 -0
  22. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  23. cuqi/legacy/sampler/_cwmh.py +196 -0
  24. cuqi/legacy/sampler/_gibbs.py +231 -0
  25. cuqi/legacy/sampler/_hmc.py +335 -0
  26. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  27. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  28. cuqi/legacy/sampler/_mh.py +190 -0
  29. cuqi/legacy/sampler/_pcn.py +244 -0
  30. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +134 -152
  31. cuqi/legacy/sampler/_sampler.py +182 -0
  32. cuqi/likelihood/_likelihood.py +1 -1
  33. cuqi/model/_model.py +1248 -357
  34. cuqi/pde/__init__.py +4 -0
  35. cuqi/pde/_observation_map.py +36 -0
  36. cuqi/pde/_pde.py +133 -32
  37. cuqi/problem/_problem.py +88 -82
  38. cuqi/sampler/__init__.py +120 -8
  39. cuqi/sampler/_conjugate.py +376 -35
  40. cuqi/sampler/_conjugate_approx.py +40 -16
  41. cuqi/sampler/_cwmh.py +132 -138
  42. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  43. cuqi/sampler/_gibbs.py +269 -130
  44. cuqi/sampler/_hmc.py +328 -201
  45. cuqi/sampler/_langevin_algorithm.py +282 -98
  46. cuqi/sampler/_laplace_approximation.py +87 -117
  47. cuqi/sampler/_mh.py +47 -157
  48. cuqi/sampler/_pcn.py +56 -211
  49. cuqi/sampler/_rto.py +206 -140
  50. cuqi/sampler/_sampler.py +540 -135
  51. cuqi/solver/_solver.py +6 -2
  52. cuqi/testproblem/_testproblem.py +2 -3
  53. cuqi/utilities/__init__.py +3 -1
  54. cuqi/utilities/_utilities.py +94 -12
  55. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/METADATA +6 -4
  56. cuqipy-1.4.0.post0.dev61.dist-info/RECORD +102 -0
  57. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/WHEEL +1 -1
  58. CUQIpy-1.3.0.dist-info/RECORD +0 -100
  59. cuqi/experimental/mcmc/__init__.py +0 -123
  60. cuqi/experimental/mcmc/_conjugate.py +0 -345
  61. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  62. cuqi/experimental/mcmc/_cwmh.py +0 -193
  63. cuqi/experimental/mcmc/_gibbs.py +0 -318
  64. cuqi/experimental/mcmc/_hmc.py +0 -464
  65. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -392
  66. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  67. cuqi/experimental/mcmc/_mh.py +0 -80
  68. cuqi/experimental/mcmc/_pcn.py +0 -89
  69. cuqi/experimental/mcmc/_sampler.py +0 -566
  70. cuqi/experimental/mcmc/_utilities.py +0 -17
  71. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info/licenses}/LICENSE +0 -0
  72. {CUQIpy-1.3.0.dist-info → cuqipy-1.4.0.post0.dev61.dist-info}/top_level.txt +0 -0
cuqi/__init__.py CHANGED
@@ -11,6 +11,7 @@ from . import operator
11
11
  from . import pde
12
12
  from . import problem
13
13
  from . import sampler
14
+ from . import legacy
14
15
  from . import array
15
16
  from . import samples
16
17
  from . import solver
cuqi/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-01-31T15:19:46+0100",
11
+ "date": "2025-11-06T09:26:50+0300",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "ef3d19707582cdca0d5ae948c99047b04daab707",
15
- "version": "1.3.0"
14
+ "full-revisionid": "e8fd90f6c716b96b04543b4fe62dd637033ccd32",
15
+ "version": "1.4.0.post0.dev61"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
cuqi/density/_density.py CHANGED
@@ -143,7 +143,15 @@ class Density(ABC):
143
143
  def enable_FD(self, epsilon=1e-8):
144
144
  """ Enable finite difference approximation for logd gradient. Note
145
145
  that if enabled, the FD approximation will be used even if the
146
- _gradient method is implemented. """
146
+ _gradient method is implemented.
147
+
148
+ Parameters
149
+ ----------
150
+ epsilon : float
151
+
152
+ Spacing (step size) to use for finite difference approximation for logd
153
+ gradient for each variable. Default is 1e-8.
154
+ """
147
155
  self._FD_enabled = True
148
156
  self._FD_epsilon = epsilon
149
157
 
@@ -14,6 +14,6 @@ from ._lognormal import Lognormal
14
14
  from ._normal import Normal
15
15
  from ._truncated_normal import TruncatedNormal
16
16
  from ._posterior import Posterior
17
- from ._uniform import Uniform
17
+ from ._uniform import Uniform, UnboundedUniform
18
18
  from ._custom import UserDefinedDistribution, DistributionGallery
19
19
  from ._joint_distribution import JointDistribution, _StackedJointDistribution, MultipleLikelihoodPosterior
@@ -48,7 +48,7 @@ class Beta(Distribution):
48
48
 
49
49
  # Check bounds
50
50
  if np.any(x<=0) or np.any(x>=1) or np.any(self.alpha<=0) or np.any(self.beta<=0):
51
- return -np.Inf
51
+ return -np.inf
52
52
 
53
53
  # Compute logpdf
54
54
  return np.sum(sps.beta.logpdf(x, a=self.alpha, b=self.beta))
@@ -75,14 +75,14 @@ class Cauchy(Distribution):
75
75
  def logpdf(self, x):
76
76
 
77
77
  if self._is_out_of_bounds(x):
78
- return -np.Inf
78
+ return -np.inf
79
79
 
80
80
  return np.sum(-np.log(np.pi*self.scale*(1+((x-self.location)/self.scale)**2)))
81
81
 
82
82
  def cdf(self, x):
83
83
 
84
84
  if self._is_out_of_bounds(x):
85
- return -np.Inf
85
+ return -np.inf
86
86
 
87
87
  return np.sum(sps.cauchy.cdf(x, loc=self.location, scale=self.scale))
88
88
 
@@ -105,7 +105,7 @@ class Distribution(Density, ABC):
105
105
  f"Inconsistent distribution geometry attribute {self._geometry} and inferred "
106
106
  f"dimension from distribution variables {inferred_dim}."
107
107
  )
108
-
108
+
109
109
  # If Geometry dimension is None, update it with the inferred dimension
110
110
  if inferred_dim and self._geometry.par_dim is None:
111
111
  self.geometry = inferred_dim
@@ -117,7 +117,7 @@ class Distribution(Density, ABC):
117
117
  # We do not use self.name to potentially infer it from python stack.
118
118
  if self._name:
119
119
  self._geometry._variable_name = self._name
120
-
120
+
121
121
  return self._geometry
122
122
 
123
123
  @geometry.setter
@@ -160,7 +160,7 @@ class Distribution(Density, ABC):
160
160
  f"{self.logd.__qualname__}: To evaluate the log density all conditioning variables and main"
161
161
  f" parameter must be specified. Conditioning variables are: {cond_vars}"
162
162
  )
163
-
163
+
164
164
  # Check if all conditioning variables are specified
165
165
  all_cond_vars_specified = all([key in kwargs for key in cond_vars])
166
166
  if not all_cond_vars_specified:
@@ -168,7 +168,7 @@ class Distribution(Density, ABC):
168
168
  f"{self.logd.__qualname__}: To evaluate the log density all conditioning variables must be"
169
169
  f" specified. Conditioning variables are: {cond_vars}"
170
170
  )
171
-
171
+
172
172
  # Extract exactly the conditioning variables from kwargs
173
173
  cond_kwargs = {key: kwargs[key] for key in cond_vars}
174
174
 
@@ -186,7 +186,7 @@ class Distribution(Density, ABC):
186
186
  # Not conditional distribution, simply evaluate log density directly
187
187
  else:
188
188
  return super().logd(*args, **kwargs)
189
-
189
+
190
190
  def _logd(self, *args):
191
191
  return self.logpdf(*args) # Currently all distributions implement logpdf so we simply call this method.
192
192
 
@@ -216,7 +216,7 @@ class Distribution(Density, ABC):
216
216
  # Get samples from the distribution sample method
217
217
  s = self._sample(N,*args,**kwargs)
218
218
 
219
- #Store samples in cuqi samples object if more than 1 sample
219
+ # Store samples in cuqi samples object if more than 1 sample
220
220
  if N==1:
221
221
  if len(s) == 1 and isinstance(s,np.ndarray): #Extract single value from numpy array
222
222
  s = s.ravel()[0]
@@ -264,7 +264,7 @@ class Distribution(Density, ABC):
264
264
  # Go through every mutable variable and assign value from kwargs if present
265
265
  for var_key in mutable_vars:
266
266
 
267
- #If keyword directly specifies new value of variable we simply reassign
267
+ # If keyword directly specifies new value of variable we simply reassign
268
268
  if var_key in kwargs:
269
269
  setattr(new_dist, var_key, kwargs.get(var_key))
270
270
  processed_kwargs.add(var_key)
@@ -291,9 +291,18 @@ class Distribution(Density, ABC):
291
291
 
292
292
  elif len(var_args)>0: #Some keywords found
293
293
  # Define new partial function with partially defined args
294
- func = partial(var_val, **var_args)
294
+ if (
295
+ hasattr(var_val, "_supports_partial_eval")
296
+ and var_val._supports_partial_eval
297
+ ):
298
+ func = var_val(**var_args)
299
+ else:
300
+ # If the callable does not support partial evaluation,
301
+ # we use the partial function to set the variable
302
+ func = partial(var_val, **var_args)
303
+
295
304
  setattr(new_dist, var_key, func)
296
-
305
+
297
306
  # Store processed keywords
298
307
  processed_kwargs.update(var_args.keys())
299
308
 
@@ -329,7 +338,7 @@ class Distribution(Density, ABC):
329
338
 
330
339
  def get_conditioning_variables(self):
331
340
  """Return the conditioning variables of this distribution (if any)."""
332
-
341
+
333
342
  # Get all mutable variables
334
343
  mutable_vars = self.get_mutable_variables()
335
344
 
@@ -338,7 +347,7 @@ class Distribution(Density, ABC):
338
347
 
339
348
  # Add any variables defined through callable functions
340
349
  cond_vars += get_indirect_variables(self)
341
-
350
+
342
351
  return cond_vars
343
352
 
344
353
  def get_mutable_variables(self):
@@ -347,10 +356,10 @@ class Distribution(Density, ABC):
347
356
  # If mutable variables are already cached, return them
348
357
  if hasattr(self, '_mutable_vars'):
349
358
  return self._mutable_vars
350
-
359
+
351
360
  # Define list of ignored attributes and properties
352
361
  ignore_vars = ['name', 'is_symmetric', 'geometry', 'dim']
353
-
362
+
354
363
  # Get public attributes
355
364
  attributes = get_writeable_attributes(self)
356
365
 
@@ -396,7 +405,7 @@ class Distribution(Density, ABC):
396
405
  raise ValueError(f"{self._condition.__qualname__}: {ordered_keys[index]} passed as both argument and keyword argument.\nArguments follow the listed conditioning variable order: {self.get_conditioning_variables()}")
397
406
  kwargs[ordered_keys[index]] = arg
398
407
  return kwargs
399
-
408
+
400
409
  def _check_geometry_consistency(self):
401
410
  """ Checks that the geometry of the distribution is consistent by calling the geometry property. Should be called at the end of __init__ of subclasses. """
402
411
  self.geometry
@@ -411,4 +420,4 @@ class Distribution(Density, ABC):
411
420
  def rv(self):
412
421
  """ Return a random variable object representing the distribution. """
413
422
  from cuqi.experimental.algebra import RandomVariable
414
- return RandomVariable(self)
423
+ return RandomVariable(self)
@@ -84,6 +84,8 @@ class JointDistribution:
84
84
  cond_vars = self._get_conditioning_variables()
85
85
  if len(cond_vars) > 0:
86
86
  raise ValueError(f"Every density parameter must have a distribution (prior). Missing prior for {cond_vars}.")
87
+ # Initialize finite difference gradient approximation settings
88
+ self.disable_FD()
87
89
 
88
90
  # --------- Public properties ---------
89
91
  @property
@@ -96,6 +98,38 @@ class JointDistribution:
96
98
  """ Returns the geometries of the joint distribution. """
97
99
  return [dist.geometry for dist in self._distributions]
98
100
 
101
+ @property
102
+ def FD_enabled(self):
103
+ """ Returns a dictionary of keys and booleans indicating for each
104
+ parameter name (key) if finite difference approximation of the logd
105
+ gradient is enabled. """
106
+ par_names = self.get_parameter_names()
107
+ FD_enabled = {
108
+ par_name: self.FD_epsilon[par_name] is not None for par_name in par_names
109
+ }
110
+ return FD_enabled
111
+
112
+ @property
113
+ def FD_epsilon(self):
114
+ """ Returns a dictionary indicating for each parameter name the
115
+ spacing for the finite difference approximation of the logd gradient."""
116
+ return self._FD_epsilon
117
+
118
+ @FD_epsilon.setter
119
+ def FD_epsilon(self, value):
120
+ """ Set the spacing for the finite difference approximation of the
121
+ logd gradient as a dictionary. The keys are the parameter names.
122
+ The value for each key is either None (no FD approximation) or a float
123
+ representing the FD step size.
124
+ """
125
+ par_names = self.get_parameter_names()
126
+ if value is None:
127
+ self._FD_epsilon = {par_name: None for par_name in par_names}
128
+ else:
129
+ if set(value.keys()) != set(par_names):
130
+ raise ValueError("Keys of FD_epsilon must match the parameter names of the distribution "+f" {par_names}")
131
+ self._FD_epsilon = value
132
+
99
133
  # --------- Public methods ---------
100
134
  def logd(self, *args, **kwargs):
101
135
  """ Evaluate the un-normalized log density function. """
@@ -136,6 +170,33 @@ class JointDistribution:
136
170
  # Can reduce to Posterior, Likelihood or Distribution.
137
171
  return new_joint._reduce_to_single_density()
138
172
 
173
+ def enable_FD(self, epsilon=None):
174
+ """ Enable finite difference approximation for logd gradient. Note
175
+ that if enabled, the FD approximation will be used even if the
176
+ _gradient method is implemented. By default, all parameters
177
+ will have FD enabled with a step size of 1e-8.
178
+
179
+ Parameters
180
+ ----------
181
+ epsilon : dict, *optional*
182
+
183
+ Dictionary indicating the spacing (step size) to use for finite
184
+ difference approximation for logd gradient for each variable.
185
+
186
+ Keys are variable names.
187
+ Values are either a float to enable FD with the given value as the FD
188
+ step size, or None to disable FD for that variable. Default is 1e-8 for
189
+ all variables.
190
+ """
191
+ if epsilon is None:
192
+ epsilon = {par_name: 1e-8 for par_name in self.get_parameter_names()}
193
+ self.FD_epsilon = epsilon
194
+
195
+ def disable_FD(self):
196
+ """ Disable finite difference approximation for logd gradient. """
197
+ par_names = self.get_parameter_names()
198
+ self.FD_epsilon = {par_name: None for par_name in par_names}
199
+
139
200
  def get_parameter_names(self) -> List[str]:
140
201
  """ Returns the parameter names of the joint distribution. """
141
202
  return [dist.name for dist in self._distributions]
@@ -202,34 +263,58 @@ class JointDistribution:
202
263
  # Count number of distributions and likelihoods
203
264
  n_dist = len(self._distributions)
204
265
  n_likelihood = len(self._likelihoods)
266
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name] for par_name in self.get_parameter_names()}
267
+ self.enable_FD(epsilon=reduced_FD_epsilon)
205
268
 
206
269
  # Cant reduce if there are multiple distributions or likelihoods
207
270
  if n_dist > 1:
208
271
  return self
209
272
 
273
+ # If only evaluated densities left return joint to ensure logd method is available
274
+ if n_dist == 0 and n_likelihood == 0:
275
+ return self
276
+
277
+ # Extract the parameter name of the distribution
278
+ if n_dist == 1:
279
+ par_name = self._distributions[0].name
280
+ elif n_likelihood == 1:
281
+ par_name = self._likelihoods[0].name
282
+ else:
283
+ par_name = None
284
+
210
285
  # If exactly one distribution and multiple likelihoods reduce
211
286
  if n_dist == 1 and n_likelihood > 1:
212
- return MultipleLikelihoodPosterior(*self._densities)
213
-
287
+ reduced_distribution = MultipleLikelihoodPosterior(*self._densities)
288
+ reduced_FD_epsilon = {par_name:self.FD_epsilon[par_name]}
289
+
214
290
  # If exactly one distribution and one likelihood its a Posterior
215
291
  if n_dist == 1 and n_likelihood == 1:
216
292
  # Ensure parameter names match, otherwise return the joint distribution
217
293
  if set(self._likelihoods[0].get_parameter_names()) != set(self._distributions[0].get_parameter_names()):
218
294
  return self
219
- return self._add_constants_to_density(Posterior(self._likelihoods[0], self._distributions[0]))
295
+ reduced_distribution = Posterior(self._likelihoods[0], self._distributions[0])
296
+ reduced_distribution = self._add_constants_to_density(reduced_distribution)
297
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
220
298
 
221
299
  # If exactly one distribution and no likelihoods its a Distribution
222
300
  if n_dist == 1 and n_likelihood == 0:
223
- return self._add_constants_to_density(self._distributions[0])
224
-
301
+ # Intentionally skip enabling FD here. If the user wants FD, they
302
+ # can enable it for this particular distribution before forming
303
+ # the joint distribution.
304
+ return self._add_constants_to_density(self._distributions[0])
305
+
225
306
  # If no distributions and exactly one likelihood its a Likelihood
226
307
  if n_likelihood == 1 and n_dist == 0:
227
- return self._likelihoods[0]
308
+ # This case seems to not happen in practice, but we include it for
309
+ # completeness.
310
+ reduced_distribution = self._likelihoods[0]
311
+ reduced_FD_epsilon = self.FD_epsilon[par_name]
312
+
313
+ if self.FD_enabled[par_name]:
314
+ reduced_distribution.enable_FD(epsilon=reduced_FD_epsilon)
315
+
316
+ return reduced_distribution
228
317
 
229
- # If only evaluated densities left return joint to ensure logd method is available
230
- if n_dist == 0 and n_likelihood == 0:
231
- return self
232
-
233
318
  def _add_constants_to_density(self, density: Density):
234
319
  """ Add the constants (evaluated densities) to a single density. Used when reducing to single density. """
235
320
 
@@ -274,7 +359,7 @@ class JointDistribution:
274
359
  if len(cond_vars) > 0:
275
360
  msg += f"|{cond_vars}"
276
361
  msg += ")"
277
-
362
+
278
363
  msg += "\n"
279
364
  msg += " Densities: \n"
280
365
 
@@ -283,7 +368,7 @@ class JointDistribution:
283
368
  msg += f"\t{density.name} ~ {density}\n"
284
369
 
285
370
  # Wrap up
286
- msg += ")"
371
+ msg += " )"
287
372
 
288
373
  return msg
289
374
 
@@ -1,5 +1,6 @@
1
1
  from cuqi.geometry import _DefaultGeometry, _get_identity_geometries
2
2
  from cuqi.distribution import Distribution
3
+ from cuqi.density import Density
3
4
 
4
5
  # ========================================================================
5
6
  class Posterior(Distribution):
@@ -25,6 +26,14 @@ class Posterior(Distribution):
25
26
  self.prior = prior
26
27
  super().__init__(**kwargs)
27
28
 
29
+ def get_density(self, name) -> Density:
30
+ """ Return a density with the given name. """
31
+ if name == self.likelihood.name:
32
+ return self.likelihood
33
+ if name == self.prior.name:
34
+ return self.prior
35
+ raise ValueError(f"No density with name {name}.")
36
+
28
37
  @property
29
38
  def data(self):
30
39
  return self.likelihood.data
@@ -37,7 +37,7 @@ class TruncatedNormal(Distribution):
37
37
  p = cuqi.distribution.TruncatedNormal(mean=0, std=1, low=-2, high=2)
38
38
  samples = p.sample(5000)
39
39
  """
40
- def __init__(self, mean=None, std=None, low=-np.Inf, high=np.Inf, is_symmetric=False, **kwargs):
40
+ def __init__(self, mean=None, std=None, low=-np.inf, high=np.inf, is_symmetric=False, **kwargs):
41
41
  # Init from abstract distribution class
42
42
  super().__init__(is_symmetric=is_symmetric, **kwargs)
43
43
 
@@ -97,7 +97,7 @@ class TruncatedNormal(Distribution):
97
97
  # the unnormalized logpdf
98
98
  # check if x falls in the range between np.array a and b
99
99
  if np.any(x < self.low) or np.any(x > self.high):
100
- return -np.Inf
100
+ return -np.inf
101
101
  else:
102
102
  return self._normal.logpdf(x)
103
103
 
@@ -107,7 +107,7 @@ class TruncatedNormal(Distribution):
107
107
  """
108
108
  # check if x falls in the range between np.array a and b
109
109
  if np.any(x < self.low) or np.any(x > self.high):
110
- return np.NaN*np.ones_like(x)
110
+ return np.nan*np.ones_like(x)
111
111
  else:
112
112
  return self._normal.gradient(x, *args, **kwargs)
113
113
 
@@ -1,5 +1,6 @@
1
1
  import numpy as np
2
2
  from cuqi.distribution import Distribution
3
+ from cuqi.geometry import Geometry
3
4
 
4
5
  class Uniform(Distribution):
5
6
 
@@ -46,7 +47,7 @@ class Uniform(Distribution):
46
47
  Computes the gradient of logpdf at the given values of x.
47
48
  """
48
49
  if np.any(x < self.low) or np.any(x > self.high):
49
- return np.NaN*np.ones_like(x)
50
+ return np.nan*np.ones_like(x)
50
51
  else:
51
52
  return np.zeros_like(x)
52
53
 
@@ -57,4 +58,37 @@ class Uniform(Distribution):
57
58
  else:
58
59
  s = np.random.uniform(self.low, self.high, (N,self.dim)).T
59
60
 
60
- return s
61
+ return s
62
+
63
+ class UnboundedUniform(Distribution):
64
+ """
65
+ Unbounded uniform distribution. This is a special case of the
66
+ Uniform distribution, where the lower and upper bounds are set to
67
+ -inf and inf, respectively. This distribution is not normalizable,
68
+ and therefore cannot be sampled from. It is mainly used for
69
+ initializing non-informative priors.
70
+ Parameters
71
+ ----------
72
+ geometry : int or Geometry
73
+ The geometry of the distribution. If an integer is given, it is
74
+ interpreted as the dimension of the distribution. If a
75
+ Geometry object is given, its par_dim attribute is used.
76
+ """
77
+ def __init__(self, geometry, is_symmetric=True, **kwargs):
78
+ super().__init__(geometry=geometry, is_symmetric=is_symmetric, **kwargs)
79
+
80
+ def logpdf(self, x):
81
+ """
82
+ Evaluate the logarithm of the unnormalized PDF at the given values of x.
83
+ """
84
+ # Always return 1.0 (the unnormalized log PDF)
85
+ return 1.0
86
+
87
+ def gradient(self, x):
88
+ """
89
+ Computes the gradient of logpdf at the given values of x.
90
+ """
91
+ return np.zeros_like(x)
92
+
93
+ def _sample(self, N=1, rng=None):
94
+ raise NotImplementedError("Cannot sample from UnboundedUniform distribution")
@@ -1,4 +1,4 @@
1
1
  """ Experimental module for testing new features and ideas. """
2
- from . import mcmc
3
2
  from . import algebra
4
3
  from . import geometry
4
+ from ._recommender import SamplerRecommender