inference-tools 0.13.3__py3-none-any.whl → 0.13.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
inference/priors.py CHANGED
@@ -2,14 +2,113 @@
2
2
  .. moduleauthor:: Chris Bowman <chris.bowman.physics@gmail.com>
3
3
  """
4
4
 
5
+ from abc import ABC, abstractmethod
5
6
  from typing import Union, Iterable
6
-
7
- from numpy import array, log, pi, zeros, concatenate, float64, where
7
+ from numpy import atleast_1d, log, pi, zeros, concatenate, where, ndarray, isfinite
8
8
  from numpy.random import normal, exponential, uniform
9
9
  from itertools import chain
10
10
 
11
11
 
12
- class JointPrior:
12
+ class BasePrior(ABC):
13
+ variables: list[int]
14
+
15
+ @staticmethod
16
+ def validate_variable_indices(
17
+ variable_inds: Union[int, Iterable[int]],
18
+ n_parameters: int,
19
+ class_name="BasePrior",
20
+ ) -> list[int]:
21
+ indices_type_error = TypeError(
22
+ f"""\n
23
+ \r[ {class_name} error ]
24
+ \r>> 'variable_inds' argument of {class_name} must be
25
+ \r>> given as an integer or list of integers
26
+ """
27
+ )
28
+
29
+ if not isinstance(variable_inds, (int, Iterable)):
30
+ raise indices_type_error
31
+
32
+ if isinstance(variable_inds, int):
33
+ variable_inds = [variable_inds]
34
+
35
+ if not all(isinstance(p, int) for p in variable_inds):
36
+ raise indices_type_error
37
+
38
+ if not isinstance(variable_inds, list):
39
+ variable_inds = list(variable_inds)
40
+
41
+ if n_parameters != len(variable_inds):
42
+ raise ValueError(
43
+ f"""\n
44
+ \r[ {class_name} error ]
45
+ \r>> The total number of variables specified via the 'variable_indices' argument
46
+ \r>> is inconsistent with the number specified by the other arguments.
47
+ """
48
+ )
49
+
50
+ if len(variable_inds) != len(set(variable_inds)):
51
+ raise ValueError(
52
+ f"""\n
53
+ \r[ {class_name} error ]
54
+ \r>> All integers given via the 'variable_indices' must be unique.
55
+ \r>> Two or more of the given integers are duplicates.
56
+ """
57
+ )
58
+
59
+ return variable_inds
60
+
61
+ @abstractmethod
62
+ def __call__(self, theta: ndarray) -> float:
63
+ pass
64
+
65
+ @abstractmethod
66
+ def gradient(self, theta: ndarray) -> ndarray:
67
+ pass
68
+
69
+ def cost(self, theta: ndarray) -> float:
70
+ """
71
+ Returns the 'cost', equal to the negative prior log-probability, for the
72
+ provided set of model parameters.
73
+
74
+ :param theta: \
75
+ The model parameters as a 1D ``numpy.ndarray``.
76
+
77
+ :returns: \
78
+ The negative prior log-probability value.
79
+ """
80
+ return -self(theta)
81
+
82
+ def cost_gradient(self, theta: ndarray) -> ndarray:
83
+ """
84
+ Returns the gradient of the 'cost', equal to the negative prior log-probability,
85
+ with respect to the model parameters.
86
+
87
+ :param theta: \
88
+ The model parameters as a 1D ``numpy.ndarray``.
89
+
90
+ :returns: \
91
+ The gradient of the negative prior log-probability value.
92
+ """
93
+ return -self.gradient(theta)
94
+
95
+ def sample(self) -> ndarray:
96
+ """
97
+ Draws a sample from the prior.
98
+
99
+ :returns: \
100
+ A single sample from the prior distribution as a 1D ``numpy.ndarray``.
101
+ """
102
+ raise NotImplementedError(
103
+ f"""\n
104
+ \r[ {self.__class__.__name__} error ]
105
+ \r>> 'sample' is an optional method for classes inheriting from
106
+ \r>> 'BasePrior', and has not been implemented for '{self.__class__.__name__}'.
107
+ """
108
+ )
109
+
110
+
111
+ class JointPrior(BasePrior):
13
112
  """
14
113
  A class which combines multiple prior distribution objects into a single
15
114
  joint-prior distribution object.
@@ -22,12 +121,13 @@ class JointPrior:
22
121
  The total number of model variables.
23
122
  """
24
123
 
25
- def __init__(self, components, n_variables):
124
+ def __init__(self, components: list[BasePrior], n_variables: int):
26
125
  if not all(isinstance(c, BasePrior) for c in components):
27
126
  raise TypeError(
28
- """
29
- All objects contained in the 'components' argument must be instances
30
- of a subclass of BasePrior (e.g. GaussianPrior, UniformPrior)
127
+ """\n
128
+ \r[ JointPrior error ]
129
+ \r>> The sequence of prior objects passed to the 'components' argument
130
+ \r>> of 'JointPrior' must be instances of a subclass of 'BasePrior'.
31
131
  """
32
132
  )
33
133
 
@@ -45,24 +145,31 @@ class JointPrior:
45
145
  for var in chain(*[c.variables for c in self.components]):
46
146
  if var in self.prior_variables:
47
147
  raise ValueError(
48
- f"Variable index '{var}' appears more than once in prior components"
148
+ f"""\n
149
+ \r[ JointPrior error ]
150
+ \r>> Variable index '{var}' appears more than once in the prior
151
+ \r>> objects passed to the 'components' argument of 'JointPrior'.
152
+ """
49
153
  )
50
154
  self.prior_variables.append(var)
51
155
 
52
156
  if len(self.prior_variables) != n_variables:
53
157
  raise ValueError(
54
- f"""
55
- The total number of variables specified across the various prior
56
- components ({len(self.prior_variables)}) does not match the number specified in
57
- the 'n_variables' argument ({n_variables}).
158
+ f"""\n
159
+ \r[ JointPrior error ]
160
+ \r>> The total number of variables specified across the various prior
161
+ \r>> components ({len(self.prior_variables)}) does not match the number
162
+ \r>> specified in the 'n_variables' argument ({n_variables}).
58
163
  """
59
164
  )
60
165
 
61
166
  if not all(0 <= i < n_variables for i in self.prior_variables):
62
167
  raise ValueError(
63
- """
64
- All variable indices given to the prior components must have values
65
- in the range [0, n_variables-1].
168
+ """\n
169
+ \r[ JointPrior error ]
170
+ \r>> All variable indices specified across the various prior
171
+ \r>> objects passed to the 'components' argument of 'JointPrior'
172
+ \r>> must have values in the range [0, n_variables - 1].
66
173
  """
67
174
  )
68
175
 
@@ -75,7 +182,7 @@ class JointPrior:
75
182
  )
76
183
  self.bounds = [v[0] for v in both]
77
184
 
78
- def __call__(self, theta):
185
+ def __call__(self, theta: ndarray) -> float:
79
186
  """
80
187
  Returns the joint-prior log-probability value, calculated as the sum
81
188
  of the log-probabilities from each prior component for the provided
@@ -89,7 +196,7 @@ class JointPrior:
89
196
  """
90
197
  return sum(c(theta) for c in self.components)
91
198
 
92
- def gradient(self, theta):
199
+ def gradient(self, theta: ndarray) -> ndarray:
93
200
  """
94
201
  Returns the gradient of the prior log-probability with respect to the model
95
202
  parameters.
@@ -105,7 +212,7 @@ class JointPrior:
105
212
  grad[c.variables] = c.gradient(theta)
106
213
  return grad
107
214
 
108
- def sample(self):
215
+ def sample(self) -> ndarray:
109
216
  """
110
217
  Draws a sample from the prior.
111
218
 
@@ -118,77 +225,35 @@ class JointPrior:
118
225
  return sample
119
226
 
120
227
 
121
- class BasePrior:
122
- @staticmethod
123
- def check_variables(variable_inds: Union[int, Iterable[int]], n_vars: int):
124
- if not isinstance(variable_inds, (int, Iterable)):
125
- raise TypeError("'variable_inds' must be an integer or list of integers")
126
-
127
- if isinstance(variable_inds, int):
128
- variable_inds = [variable_inds]
129
-
130
- if not all(isinstance(p, int) for p in variable_inds):
131
- raise TypeError("'variable_inds' must be an integer or list of integers")
132
-
133
- if n_vars != len(variable_inds):
134
- raise ValueError(
135
- """
136
- The total number of variables specified via the 'variable_indices' argument is
137
- inconsistent with the number specified by the other arguments.
138
- """
139
- )
140
-
141
- if len(variable_inds) != len(set(variable_inds)):
142
- raise ValueError(
143
- """
144
- All integers given via the 'variable_indices' must be unique.
145
- Two or more of the given integers are duplicates.
146
- """
147
- )
148
-
149
- return variable_inds
150
-
151
-
152
228
  class GaussianPrior(BasePrior):
153
229
  """
154
230
  A class for generating a Gaussian prior for one or more of the model variables.
155
231
 
156
232
  :param mean: \
157
- A list specifying the means of the Gaussian priors on each of the variables specified
158
- in the ``variable_indices`` argument.
233
+ The means of the Gaussian priors on each of the variables specified
234
+ in the ``variable_indices`` argument as a 1D ``numpy.ndarray``.
159
235
 
160
236
  :param sigma: \
161
- A list specifying the standard deviations of the Gaussian priors on each of the
162
- variables specified in the ``variable_indices`` argument.
237
+ The standard deviations of the Gaussian priors on each of the variables
238
+ specified in the ``variable_indices`` argument as a 1D ``numpy.ndarray``.
163
239
 
164
240
  :param variable_indices: \
165
241
  A list of integers specifying the indices of the variables to which the prior will apply.
166
242
  """
167
243
 
168
- def __init__(self, mean, sigma, variable_indices):
169
- self.mean = array(mean, dtype=float64).squeeze()
170
- self.sigma = array(sigma, dtype=float64).squeeze()
171
-
172
- # if parameters were passed as floats, convert from 0D to 1D arrays
173
- if self.mean.ndim == 0:
174
- self.mean = self.mean.reshape([1])
175
- if self.sigma.ndim == 0:
176
- self.sigma = self.sigma.reshape([1])
244
+ def __init__(self, mean: ndarray, sigma: ndarray, variable_indices: list[int]):
245
+ self.mean, self.sigma = validate_prior_parameters(
246
+ class_name="GaussianPrior",
247
+ params=[("mean", mean), ("sigma", sigma)],
248
+ require_positive={"sigma"},
249
+ )
177
250
 
178
251
  self.n_params = self.mean.size
179
-
180
- if self.mean.size != self.sigma.size:
181
- raise ValueError(
182
- "mean and sigma arguments must have the same number of elements"
183
- )
184
-
185
- if self.mean.ndim > 1 or self.sigma.ndim > 1:
186
- raise ValueError("mean and sigma arguments must be 1D arrays")
187
-
188
- if not (self.sigma > 0.0).all():
189
- raise ValueError('All values of "sigma" must be greater than zero')
190
-
191
- self.variables = self.check_variables(variable_indices, self.n_params)
252
+ self.variables = self.validate_variable_indices(
253
+ variable_inds=variable_indices,
254
+ n_parameters=self.n_params,
255
+ class_name="GaussianPrior",
256
+ )
192
257
 
193
258
  # pre-calculate some quantities as an optimisation
194
259
  self.inv_sigma = 1.0 / self.sigma
@@ -196,7 +261,7 @@ class GaussianPrior(BasePrior):
196
261
  self.normalisation = -log(self.sigma).sum() - 0.5 * log(2 * pi) * self.n_params
197
262
  self.bounds = [(None, None)] * self.n_params
198
263
 
199
- def __call__(self, theta):
264
+ def __call__(self, theta: ndarray) -> float:
200
265
  """
201
266
  Returns the prior log-probability value for the provided set of model parameters.
202
267
 
@@ -209,7 +274,7 @@ class GaussianPrior(BasePrior):
209
274
  z = (self.mean - theta[self.variables]) * self.inv_sigma
210
275
  return -0.5 * (z**2).sum() + self.normalisation
211
276
 
212
- def gradient(self, theta):
277
+ def gradient(self, theta: ndarray) -> ndarray:
213
278
  """
214
279
  Returns the gradient of the prior log-probability with respect to the model
215
280
  parameters.
@@ -222,7 +287,7 @@ class GaussianPrior(BasePrior):
222
287
  """
223
288
  return (self.mean - theta[self.variables]) * self.inv_sigma_sqr
224
289
 
225
- def sample(self):
290
+ def sample(self) -> ndarray:
226
291
  """
227
292
  Draws a sample from the prior.
228
293
 
@@ -251,26 +316,26 @@ class ExponentialPrior(BasePrior):
251
316
  A class for generating an exponential prior for one or more of the model variables.
252
317
 
253
318
  :param beta: \
254
- A list specifying the 'beta' parameter value of the exponential priors on each of the
255
- variables specified in the ``variable_indices`` argument.
319
+ The 'beta' parameter values of the exponential priors on each of the variables
320
+ specified in the ``variable_indices`` argument as a 1D ``numpy.ndarray``.
256
321
 
257
322
  :param variable_indices: \
258
323
  A list of integers specifying the indices of the variables to which the prior will apply.
259
324
  """
260
325
 
261
- def __init__(self, beta, variable_indices):
262
- self.beta = array(beta, dtype=float64).squeeze()
263
- if self.beta.ndim == 0:
264
- self.beta = self.beta.reshape([1])
265
- self.n_params = self.beta.size
266
-
267
- if self.beta.ndim > 1:
268
- raise ValueError("beta argument must be a 1D array")
269
-
270
- if not (self.beta > 0.0).all():
271
- raise ValueError('All values of "beta" must be greater than zero')
326
+ def __init__(self, beta: ndarray, variable_indices: list[int]):
327
+ (self.beta,) = validate_prior_parameters(
328
+ class_name="ExponentialPrior",
329
+ params=[("beta", beta)],
330
+ require_positive={"beta"},
331
+ )
272
332
 
273
- self.variables = self.check_variables(variable_indices, self.n_params)
333
+ self.n_params = self.beta.size
334
+ self.variables = self.validate_variable_indices(
335
+ variable_inds=variable_indices,
336
+ n_parameters=self.n_params,
337
+ class_name="ExponentialPrior",
338
+ )
274
339
 
275
340
  # pre-calculate some quantities as an optimisation
276
341
  self.lam = 1.0 / self.beta
@@ -278,7 +343,7 @@ class ExponentialPrior(BasePrior):
278
343
  self.zeros = zeros(self.n_params)
279
344
  self.bounds = [(0.0, None)] * self.n_params
280
345
 
281
- def __call__(self, theta):
346
+ def __call__(self, theta: ndarray) -> float:
282
347
  """
283
348
  Returns the prior log-probability value for the provided set of model parameters.
284
349
 
@@ -292,7 +357,7 @@ class ExponentialPrior(BasePrior):
292
357
  return -1e100
293
358
  return -(self.lam * theta[self.variables]).sum() + self.normalisation
294
359
 
295
- def gradient(self, theta):
360
+ def gradient(self, theta: ndarray) -> ndarray:
296
361
  """
297
362
  Returns the gradient of the prior log-probability with respect to the model
298
363
  parameters.
@@ -305,7 +370,7 @@ class ExponentialPrior(BasePrior):
305
370
  """
306
371
  return where(theta[self.variables] >= 0.0, -self.lam, self.zeros)
307
372
 
308
- def sample(self):
373
+ def sample(self) -> ndarray:
309
374
  """
310
375
  Draws a sample from the prior.
311
376
 
@@ -315,7 +380,7 @@ class ExponentialPrior(BasePrior):
315
380
  return exponential(scale=self.beta)
316
381
 
317
382
  @classmethod
318
- def combine(cls, priors):
383
+ def combine(cls, priors: list[BasePrior]):
319
384
  if not all(isinstance(p, cls) for p in priors):
320
385
  raise ValueError(f"All prior objects being combined must be of type {cls}")
321
386
 
@@ -332,48 +397,44 @@ class UniformPrior(BasePrior):
332
397
  A class for generating a uniform prior for one or more of the model variables.
333
398
 
334
399
  :param lower: \
335
- A list specifying the lower bound of the uniform priors on each of the variables
336
- specified in the ``variable_indices`` argument.
400
+ The lower bound of the uniform priors on each of the variables
401
+ specified in the ``variable_indices`` argument as a 1D ``numpy.ndarray``.
337
402
 
338
403
  :param upper: \
339
- A list specifying the upper bound of the uniform priors on each of the variables
340
- specified in the ``variable_indices`` argument.
404
+ The upper bound of the uniform priors on each of the variables
405
+ specified in the ``variable_indices`` argument as a 1D ``numpy.ndarray``.
341
406
 
342
407
  :param variable_indices: \
343
408
  A list of integers specifying the indices of the variables to which the prior will apply.
344
409
  """
345
410
 
346
- def __init__(self, lower, upper, variable_indices):
347
- self.lower = array(lower).squeeze()
348
- self.upper = array(upper).squeeze()
349
-
350
- # if parameters were passed as floats, convert from 0D to 1D arrays
351
- self.lower = self.lower.reshape([1]) if self.lower.ndim == 0 else self.lower
352
- self.upper = self.upper.reshape([1]) if self.upper.ndim == 0 else self.upper
411
+ def __init__(self, lower: ndarray, upper: ndarray, variable_indices: list[int]):
412
+ self.lower, self.upper = validate_prior_parameters(
413
+ class_name="UniformPrior", params=[("lower", lower), ("upper", upper)]
414
+ )
353
415
 
354
416
  self.n_params = self.lower.size
355
417
  self.grad = zeros(self.n_params)
356
418
 
357
- if self.lower.size != self.upper.size:
358
- raise ValueError(
359
- """'lower' and 'upper' arguments must have the same number of elements"""
360
- )
361
-
362
- if self.lower.ndim > 1 or self.upper.ndim > 1:
363
- raise ValueError("'lower' and 'upper' arguments must be 1D arrays")
364
-
365
419
  if (self.upper <= self.lower).any():
366
420
  raise ValueError(
367
- "All values in 'lower' must be less than the corresponding values in 'upper'"
421
+ """\n
422
+ \r[ UniformPrior error ]
423
+ \r>> All values in 'lower' must be less than the corresponding values in 'upper'
424
+ """
368
425
  )
369
426
 
370
- self.variables = self.check_variables(variable_indices, self.n_params)
427
+ self.variables = self.validate_variable_indices(
428
+ variable_inds=variable_indices,
429
+ n_parameters=self.n_params,
430
+ class_name="UniformPrior",
431
+ )
371
432
 
372
433
  # pre-calculate some quantities as an optimisation
373
434
  self.normalisation = -log(self.upper - self.lower).sum()
374
435
  self.bounds = [(lo, up) for lo, up in zip(self.lower, self.upper)]
375
436
 
376
- def __call__(self, theta):
437
+ def __call__(self, theta: ndarray) -> float:
377
438
  """
378
439
  Returns the prior log-probability value for the provided set of model parameters.
379
440
 
@@ -389,7 +450,7 @@ class UniformPrior(BasePrior):
389
450
  return self.normalisation
390
451
  return -1e100
391
452
 
392
- def gradient(self, theta):
453
+ def gradient(self, theta: ndarray) -> ndarray:
393
454
  """
394
455
  Returns the gradient of the prior log-probability with respect to the model
395
456
  parameters.
@@ -402,7 +463,7 @@ class UniformPrior(BasePrior):
402
463
  """
403
464
  return self.grad
404
465
 
405
- def sample(self):
466
+ def sample(self) -> ndarray:
406
467
  """
407
468
  Draws a sample from the prior.
408
469
 
@@ -424,3 +485,77 @@ class UniformPrior(BasePrior):
424
485
  upper = concatenate([p.upper for p in priors])
425
486
 
426
487
  return cls(lower=lower, upper=upper, variable_indices=variables)
488
+
489
+
490
+ def validate_prior_parameters(
491
+ class_name: str, params: list[tuple], require_positive: set[str] = frozenset()
492
+ ) -> list[ndarray]:
493
+ validated_params = []
494
+ for param_name, param in params:
495
+ if attempt_array_conversion(param):
496
+ param = atleast_1d(param).astype(float)
497
+
498
+ if not isinstance(param, ndarray):
499
+ raise TypeError(
500
+ f"""\n
501
+ \r[ {class_name} error ]
502
+ \r>> Argument '{param_name}' should be an instance of a numpy.ndarray,
503
+ \r>> but instead has type:
504
+ \r>> {type(param)}
505
+ """
506
+ )
507
+
508
+ if param.ndim != 1:
509
+ raise ValueError(
510
+ f"""\n
511
+ \r[ {class_name} error ]
512
+ \r>> Argument '{param_name}' should be a 1D numpy.ndarray,
513
+ \r>> but has {param.ndim} dimensions and shape {param.shape}.
514
+ """
515
+ )
516
+
517
+ if not isfinite(param).all():
518
+ raise ValueError(
519
+ f"""\n
520
+ \r[ {class_name} error ]
521
+ \r>> Argument '{param_name}' contains non-finite values.
522
+ """
523
+ )
524
+
525
+ if param_name in require_positive:
526
+ if not (param > 0.0).all():
527
+ raise ValueError(
528
+ f"""\n
529
+ \r[ {class_name} error ]
530
+ \r>> All values given in '{param_name}' must be greater than zero.
531
+ """
532
+ )
533
+
534
+ validated_params.append(param)
535
+
536
+ # check all inputs are the same size by collecting their sizes in a set
537
+ if len({param.size for param in validated_params}) != 1:
538
+ raise ValueError(
539
+ f"""\n
540
+ \r[ {class_name} error ]
541
+ \r>> Arguments
542
+ \r>> {[param_name for param_name, _ in params]}
543
+ \r>> must all be arrays of equal size, but instead have sizes
544
+ \r>> {[param.size for param in validated_params]}
545
+ \r>> respectively.
546
+ """
547
+ )
548
+
549
+ return validated_params
550
+
551
+
552
+ def attempt_array_conversion(param) -> bool:
553
+ # if input is a zero-dimensional array, we need to convert to 1D
554
+ zero_dim_array = isinstance(param, ndarray) and param.ndim == 0
555
+ # if the input is a float or an int, also convert to a 1D array
556
+ valid_number = isinstance(param, (int, float))
557
+ # if the input is a list or tuple containing only floats and ints, also convert
558
+ valid_sequence = isinstance(param, (list, tuple)) and all(
559
+ isinstance(v, (int, float)) for v in param
560
+ )
561
+ return zero_dim_array or valid_sequence or valid_number
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: inference-tools
3
- Version: 0.13.3
3
+ Version: 0.13.4
4
4
  Summary: A collection of python tools for Bayesian data analysis
5
5
  Author-email: Chris Bowman <chris.bowman.physics@gmail.com>
6
6
  License: MIT License
@@ -33,15 +33,15 @@ Classifier: Operating System :: OS Independent
33
33
  Requires-Python: >=3.9
34
34
  Description-Content-Type: text/markdown
35
35
  License-File: LICENSE
36
- Requires-Dist: numpy >=1.20
37
- Requires-Dist: scipy >=1.6.3
38
- Requires-Dist: matplotlib >=3.4.2
36
+ Requires-Dist: numpy>=1.20
37
+ Requires-Dist: scipy>=1.6.3
38
+ Requires-Dist: matplotlib>=3.4.2
39
39
  Provides-Extra: tests
40
- Requires-Dist: pytest >=3.3.0 ; extra == 'tests'
41
- Requires-Dist: pytest-cov >=3.0.0 ; extra == 'tests'
42
- Requires-Dist: pyqt5 >=5.15 ; extra == 'tests'
43
- Requires-Dist: hypothesis >=6.24 ; extra == 'tests'
44
- Requires-Dist: freezegun >=1.1.0 ; extra == 'tests'
40
+ Requires-Dist: pytest>=3.3.0; extra == "tests"
41
+ Requires-Dist: pytest-cov>=3.0.0; extra == "tests"
42
+ Requires-Dist: pyqt5>=5.15; extra == "tests"
43
+ Requires-Dist: hypothesis>=6.24; extra == "tests"
44
+ Requires-Dist: freezegun>=1.1.0; extra == "tests"
45
45
 
46
46
  # inference-tools
47
47
 
@@ -0,0 +1,33 @@
1
+ inference/__init__.py,sha256=Wheq9bSUF5Y_jAc_w_Avi4WW2kphDK0qHGM6FsIKSxY,275
2
+ inference/_version.py,sha256=dhPsd2j9Al5Z6JN-zXmB6-Uti2Ily2tvtEtKPTNHaCQ,413
3
+ inference/likelihoods.py,sha256=fS_k3mRr7bv6kgDt29u_OB6emU-ARVZktf7j-eXA-2U,10008
4
+ inference/plotting.py,sha256=U1M_F5I-UMtfHiaN1YihcxYq5gg_2MNyPm7MxF1LecY,19747
5
+ inference/posterior.py,sha256=ptPZgzT--ehbpu57nW9GmFuyovFOSmw56HWfuC-8GGA,3584
6
+ inference/priors.py,sha256=67cgKw7jDurda9UByFJ7jOoEJH1FyZDOHC9-nvr0nWY,19352
7
+ inference/approx/__init__.py,sha256=b8xCdshVeGHyao6-P0038QB71WOMLrcYXCOYiYjK7Tk,132
8
+ inference/approx/conditional.py,sha256=IeUismbo25qa1BUIqsZ2noum9_mLXNaORsg57abxBec,9515
9
+ inference/gp/__init__.py,sha256=R4iPgf8TdunkOv_VLwue7Fz3AjGWDTBop58nCmbmMQ0,801
10
+ inference/gp/acquisition.py,sha256=Yr1dshTYwkMIrKYPSwDZDusXXNsOpobrxaympJc5q3g,8158
11
+ inference/gp/covariance.py,sha256=DVN8lAtDjCWXYSsQwhQZxV6RJ8KZeo72unOCjHhTGg0,25919
12
+ inference/gp/inversion.py,sha256=AP5dprY8hibWgtCyQytJdErE1iVp_i7e6y9FxyGtKHo,9917
13
+ inference/gp/mean.py,sha256=6EJ_OxBi98netl9Rp2Ij7eXdWndGVS-X_g5VWnWMVkk,4084
14
+ inference/gp/optimisation.py,sha256=sPhakklWIgg1yEUhUzA-m5vl0kVPvHdcgnQ0OAGT8qs,11763
15
+ inference/gp/regression.py,sha256=10TzqVeUzUkuw8-Cbe4LbxevByTi5iE5QDdRClN7Nhk,25677
16
+ inference/mcmc/__init__.py,sha256=IsEhVSIpZCDNIqgSq_21M6DH6x8F1jJbYWM0e3S3QG4,445
17
+ inference/mcmc/base.py,sha256=cEh1LPmKd6JMop8EcuH3dvAeJYei88pcPTw1xe7tGKY,10496
18
+ inference/mcmc/ensemble.py,sha256=s9Xspq5r360_XmpRHCplN5cscD60UoYXlYqx3yVEhsM,15528
19
+ inference/mcmc/gibbs.py,sha256=9US0VqLEI_f70vrHg0sFZQneJMyjm8BF_l_0bD-ZqKI,24190
20
+ inference/mcmc/hmc.py,sha256=rfTqvD3aZqqHXcM17_Yj8U_2mt2eTQ_BI6hOeFqycoo,19420
21
+ inference/mcmc/parallel.py,sha256=HRK1Ka02iO5Q6m3282lqZeAlCZPXHIglC8RAlDE6Xd4,14082
22
+ inference/mcmc/pca.py,sha256=-XVs25hH8FRA6XY4xWEK1cUZ8oDDllW7t_vlK6FU7Gs,10739
23
+ inference/mcmc/utilities.py,sha256=YjpK3FvV0Q98jLusrZrvGck-bjm6uZZ1U7HHH3aly8g,6048
24
+ inference/pdf/__init__.py,sha256=gVmQ1HLTab6_oWMQN26A1r7PkqbApaJmBK-c7TIFxjY,270
25
+ inference/pdf/base.py,sha256=Zj5mfFmDqTe5cFz0biBxcvEaxdOUC-SsOUjebUEX7HM,5442
26
+ inference/pdf/hdi.py,sha256=j_W4kv70weXR7C2ltTHR6OUNkAK-kLQhnrnpPrjiLxQ,4282
27
+ inference/pdf/kde.py,sha256=KSl8y---602MlxoSVH8VknNQYZ2KAOTky50QU3jRw28,12999
28
+ inference/pdf/unimodal.py,sha256=9S05c0hq_rF-MLoDJgUmaJKRdcP8F9_Idj7Ncb6m9q0,6218
29
+ inference_tools-0.13.4.dist-info/LICENSE,sha256=Y0-EfO5pdxf6d0J6Er13ZSWiPZ2o6kHvM37eRgnJdww,1069
30
+ inference_tools-0.13.4.dist-info/METADATA,sha256=l2x2GqQSfSrgrLeZLLqQ-LX00bxB1CKjj3AxHvTJ7F8,5378
31
+ inference_tools-0.13.4.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
32
+ inference_tools-0.13.4.dist-info/top_level.txt,sha256=I7bsb71rLtH3yvVH_HSLXUosY2AwCxEG3vctNsEhbEM,10
33
+ inference_tools-0.13.4.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (71.1.0)
2
+ Generator: setuptools (73.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,33 +0,0 @@
1
- inference/__init__.py,sha256=Wheq9bSUF5Y_jAc_w_Avi4WW2kphDK0qHGM6FsIKSxY,275
2
- inference/_version.py,sha256=VXQPyzbmOGe0nuuEEus9EQY40THABR5BB9WWse8Z8LI,413
3
- inference/likelihoods.py,sha256=WELpZ6kHcm8ygmuTo4bUpOxhcbs4dxEhX94aShbDgxA,9688
4
- inference/plotting.py,sha256=U1M_F5I-UMtfHiaN1YihcxYq5gg_2MNyPm7MxF1LecY,19747
5
- inference/posterior.py,sha256=ueyfmfmHjSh26iR58kf07IdbOiC5tSEUe0VlOWSN9bo,3481
6
- inference/priors.py,sha256=RcGkY1e04tA_CkzSzD1ixe-OfPJlrJ4-6QOg4agfDls,14753
7
- inference/approx/__init__.py,sha256=b8xCdshVeGHyao6-P0038QB71WOMLrcYXCOYiYjK7Tk,132
8
- inference/approx/conditional.py,sha256=d9NfUCa8X6O0UP840LWzKdW6ms4yk1Li1hN_ptSQ4-k,9448
9
- inference/gp/__init__.py,sha256=R4iPgf8TdunkOv_VLwue7Fz3AjGWDTBop58nCmbmMQ0,801
10
- inference/gp/acquisition.py,sha256=Yr1dshTYwkMIrKYPSwDZDusXXNsOpobrxaympJc5q3g,8158
11
- inference/gp/covariance.py,sha256=myHUAeWF-2-S1b8itABSkbDBoTrhdG9xDDjmKLEVGJg,25472
12
- inference/gp/inversion.py,sha256=AP5dprY8hibWgtCyQytJdErE1iVp_i7e6y9FxyGtKHo,9917
13
- inference/gp/mean.py,sha256=ZL9-U6mgkuDhigmGFazJylSEivyu9prP6d4hY6j3ayU,4039
14
- inference/gp/optimisation.py,sha256=DwSZTargltFEG_rubtcwAp7XzR-58wCxHaqbPEI4O-8,11490
15
- inference/gp/regression.py,sha256=QilRzPd1JHZKTZsWJoTifIq8E-ZrOtMGMvsTogLyV_U,25563
16
- inference/mcmc/__init__.py,sha256=IsEhVSIpZCDNIqgSq_21M6DH6x8F1jJbYWM0e3S3QG4,445
17
- inference/mcmc/base.py,sha256=qEgNKQ3uL8icH9GRgvGjglpBezLIzs1kHC5z27cwdCc,10399
18
- inference/mcmc/ensemble.py,sha256=s9Xspq5r360_XmpRHCplN5cscD60UoYXlYqx3yVEhsM,15528
19
- inference/mcmc/gibbs.py,sha256=7v9OoFY5SBAE2RXc_3Gq-riFd-7jq2vZULuewt5p83c,24159
20
- inference/mcmc/hmc.py,sha256=uvTgpUBucqDNUgwuzKs_leRmBSvFtMLzfLmZS9K4lD4,19334
21
- inference/mcmc/parallel.py,sha256=HRK1Ka02iO5Q6m3282lqZeAlCZPXHIglC8RAlDE6Xd4,14082
22
- inference/mcmc/pca.py,sha256=5KtNtMyoVLnsZmbu-n6T80_gbEWuhBg_JK-KUs5Qktk,10699
23
- inference/mcmc/utilities.py,sha256=guRahwzregpKt1cP0Pw9WWDBpMEalhAabQ8Tjo3G68M,6005
24
- inference/pdf/__init__.py,sha256=gVmQ1HLTab6_oWMQN26A1r7PkqbApaJmBK-c7TIFxjY,270
25
- inference/pdf/base.py,sha256=Zj5mfFmDqTe5cFz0biBxcvEaxdOUC-SsOUjebUEX7HM,5442
26
- inference/pdf/hdi.py,sha256=j_W4kv70weXR7C2ltTHR6OUNkAK-kLQhnrnpPrjiLxQ,4282
27
- inference/pdf/kde.py,sha256=EWOBuBpt3mkZ-hzEEBmutNQBy5vc9ipBZ0KEjqpf-AE,12966
28
- inference/pdf/unimodal.py,sha256=Fza18CRpAqcuHgwJFsnvOuXoymbah_Rcmo4jdcMe_xo,5504
29
- inference_tools-0.13.3.dist-info/LICENSE,sha256=Y0-EfO5pdxf6d0J6Er13ZSWiPZ2o6kHvM37eRgnJdww,1069
30
- inference_tools-0.13.3.dist-info/METADATA,sha256=4YgiAy5YnqKdAfRmMQXYNLf4rNYHjsNz-jOh_4rMMYE,5391
31
- inference_tools-0.13.3.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
32
- inference_tools-0.13.3.dist-info/top_level.txt,sha256=I7bsb71rLtH3yvVH_HSLXUosY2AwCxEG3vctNsEhbEM,10
33
- inference_tools-0.13.3.dist-info/RECORD,,