inference-tools 0.13.4__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
inference/_version.py CHANGED
@@ -1,8 +1,13 @@
1
- # file generated by setuptools_scm
1
+ # file generated by setuptools-scm
2
2
  # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
3
6
  TYPE_CHECKING = False
4
7
  if TYPE_CHECKING:
5
- from typing import Tuple, Union
8
+ from typing import Tuple
9
+ from typing import Union
10
+
6
11
  VERSION_TUPLE = Tuple[Union[int, str], ...]
7
12
  else:
8
13
  VERSION_TUPLE = object
@@ -12,5 +17,5 @@ __version__: str
12
17
  __version_tuple__: VERSION_TUPLE
13
18
  version_tuple: VERSION_TUPLE
14
19
 
15
- __version__ = version = '0.13.4'
16
- __version_tuple__ = version_tuple = (0, 13, 4)
20
+ __version__ = version = '0.14.0'
21
+ __version_tuple__ = version_tuple = (0, 14, 0)
@@ -3,7 +3,7 @@ import matplotlib.pyplot as plt
3
3
 
4
4
  from numpy import array, ndarray, linspace, concatenate, savez, load
5
5
  from numpy import sqrt, var, cov, diag, isfinite, triu, exp, log, median
6
- from numpy.random import random, randint
6
+ from numpy.random import default_rng
7
7
 
8
8
  from inference.mcmc.utilities import Bounds, ChainProgressPrinter
9
9
  from inference.mcmc.base import MarkovChain
@@ -52,6 +52,7 @@ class EnsembleSampler(MarkovChain):
52
52
  display_progress=True,
53
53
  ):
54
54
  self.posterior = posterior
55
+ self.rng = default_rng()
55
56
 
56
57
  if starting_positions is not None:
57
58
  # store core data
@@ -180,9 +181,9 @@ class EnsembleSampler(MarkovChain):
180
181
 
181
182
  def __proposal(self, i: int):
182
183
  # randomly select walker that isn't 'i'
183
- j = (randint(low=1, high=self.n_walkers) + i) % self.n_walkers
184
+ j = (self.rng.integers(low=1, high=self.n_walkers) + i) % self.n_walkers
184
185
  # sample the stretch distance
185
- z = 0.5 * (self.x_lwr + self.x_width * random()) ** 2
186
+ z = 0.5 * (self.x_lwr + self.x_width * self.rng.random()) ** 2
186
187
  prop = self.process_proposal(
187
188
  self.walker_positions[i, :]
188
189
  + z * (self.walker_positions[j, :] - self.walker_positions[i, :])
@@ -194,7 +195,7 @@ class EnsembleSampler(MarkovChain):
194
195
  Y, z = self.__proposal(i)
195
196
  p = self.posterior(Y)
196
197
  q = exp((self.n_parameters - 1) * log(z) + p - self.walker_probs[i])
197
- if random() <= q:
198
+ if self.rng.random() <= q:
198
199
  self.walker_positions[i, :] = Y
199
200
  self.walker_probs[i] = p
200
201
  self.total_proposals[i].append(attempts)
inference/mcmc/gibbs.py CHANGED
@@ -8,7 +8,7 @@ from numpy import exp, log, mean, sqrt, argmax, diff
8
8
  from numpy import percentile
9
9
  from numpy import isfinite, savez, load
10
10
 
11
- from numpy.random import normal, random
11
+ from numpy.random import default_rng
12
12
  from inference.mcmc.utilities import ChainProgressPrinter, effective_sample_size
13
13
  from inference.mcmc.base import MarkovChain
14
14
 
@@ -27,6 +27,7 @@ class Parameter:
27
27
  def __init__(self, value: float, sigma: float):
28
28
  self.samples = [value] # list to store all samples for the parameter
29
29
  self.sigma = sigma # the width parameter for the proposal distribution
30
+ self.rng = default_rng()
30
31
 
31
32
  # storage for proposal width adjustment algorithm
32
33
  self.avg = 0
@@ -91,7 +92,7 @@ class Parameter:
91
92
  if self.try_count > self.max_tries:
92
93
  self.adjust_sigma(0.25)
93
94
  # return the proposed value
94
- return self.samples[-1] + self.sigma * normal()
95
+ return self.rng.normal(loc=self.samples[-1], scale=self.sigma)
95
96
 
96
97
  def abs_proposal(self):
97
98
  # increment the try count
@@ -100,7 +101,7 @@ class Parameter:
100
101
  if self.try_count > self.max_tries:
101
102
  self.adjust_sigma(0.25)
102
103
  # return the proposed value
103
- return abs(self.samples[-1] + self.sigma * normal())
104
+ return abs(self.rng.normal(loc=self.samples[-1], scale=self.sigma))
104
105
 
105
106
  def boundary_proposal(self):
106
107
  # increment the try count
@@ -109,7 +110,7 @@ class Parameter:
109
110
  if self.try_count > self.max_tries:
110
111
  self.adjust_sigma(0.25)
111
112
  # generate the proposed value
112
- prop = self.samples[-1] + self.sigma * normal()
113
+ prop = self.rng.normal(loc=self.samples[-1], scale=self.sigma)
113
114
 
114
115
  # we now pass the proposal through a 'reflecting' function where
115
116
  # proposals falling outside the boundary are reflected inside
@@ -248,6 +249,7 @@ class MetropolisChain(MarkovChain):
248
249
  display_progress: bool = True,
249
250
  ):
250
251
  self.inv_temp = 1.0 / temperature
252
+ self.rng = default_rng()
251
253
 
252
254
  if posterior is not None:
253
255
  self.posterior = posterior
@@ -295,9 +297,8 @@ class MetropolisChain(MarkovChain):
295
297
  if pval > self.probs[-1]:
296
298
  break
297
299
  else:
298
- test = random()
299
300
  acceptance_prob = exp(pval - self.probs[-1])
300
- if test < acceptance_prob:
301
+ if self.rng.random() < acceptance_prob:
301
302
  break
302
303
 
303
304
  for p, v in zip(self.params, proposal):
@@ -643,7 +644,7 @@ class GibbsChain(MetropolisChain):
643
644
  # else calculate the acceptance probability and perform the test
644
645
  acceptance_prob = exp(p_new - p_old)
645
646
  p.submit_accept_prob(acceptance_prob)
646
- if random() < acceptance_prob:
647
+ if self.rng.random() < acceptance_prob:
647
648
  break
648
649
 
649
650
  p_old = deepcopy(p_new) # NOTE - is deepcopy needed?
inference/mcmc/hmc.py CHANGED
@@ -4,7 +4,7 @@ import matplotlib.pyplot as plt
4
4
  from numpy import ndarray, float64
5
5
  from numpy import array, savez, savez_compressed, load, zeros
6
6
  from numpy import sqrt, var, isfinite, exp, log, dot, mean, argmax, percentile
7
- from numpy.random import random, normal
7
+ from numpy.random import default_rng
8
8
 
9
9
  from inference.mcmc.utilities import Bounds, ChainProgressPrinter, effective_sample_size
10
10
  from inference.mcmc.base import MarkovChain
@@ -73,6 +73,7 @@ class HamiltonianChain(MarkovChain):
73
73
  display_progress=True,
74
74
  ):
75
75
  self.posterior = posterior
76
+ self.rng = default_rng()
76
77
  # if no gradient function is supplied, default to finite difference
77
78
  self.grad = self.finite_diff if grad is None else grad
78
79
 
@@ -124,11 +125,11 @@ class HamiltonianChain(MarkovChain):
124
125
  """
125
126
  steps_taken = 0
126
127
  for attempt in range(self.max_attempts):
127
- r0 = normal(size=self.n_parameters, scale=self.sqrt_mass)
128
+ r0 = self.rng.normal(size=self.n_parameters, scale=self.sqrt_mass)
128
129
  t0 = self.theta[-1]
129
130
  H0 = 0.5 * dot(r0, r0 * self.inv_mass) - self.probs[-1]
130
131
 
131
- n_steps = int(self.steps * (1 + (random() - 0.5) * 0.2))
132
+ n_steps = int(self.steps * (1 + (self.rng.random() - 0.5) * 0.2))
132
133
  t, r = self.run_leapfrog(t0.copy(), r0.copy(), n_steps)
133
134
 
134
135
  steps_taken += n_steps
@@ -140,7 +141,7 @@ class HamiltonianChain(MarkovChain):
140
141
  min(accept_prob, 1) if isfinite(accept_prob) else 0.0
141
142
  )
142
143
 
143
- if (accept_prob >= 1) or (random() <= accept_prob):
144
+ if (accept_prob >= 1) or (self.rng.random() <= accept_prob):
144
145
  break
145
146
  else:
146
147
  raise ValueError(
@@ -469,7 +470,7 @@ class EpsilonSelector:
469
470
  self.chk_int = 15 # interval of steps at which proposal widths are adjusted
470
471
  self.growth_factor = 1.4 # growth factor for self.chk_int
471
472
 
472
- def add_probability(self, p):
473
+ def add_probability(self, p: float):
473
474
  self.num += 1
474
475
  self.avg += p
475
476
  self.var += max(p * (1 - p), 0.03)
@@ -7,7 +7,7 @@ from random import choice
7
7
 
8
8
  import matplotlib.pyplot as plt
9
9
  from numpy import arange, exp, identity, zeros
10
- from numpy.random import random, shuffle, seed, randint
10
+ from numpy.random import default_rng
11
11
  from inference.plotting import transition_matrix_plot
12
12
  from inference.mcmc.base import MarkovChain
13
13
 
@@ -18,23 +18,19 @@ class ChainPool:
18
18
  self.pool_size = len(self.chains)
19
19
  self.pool = Pool(self.pool_size)
20
20
 
21
- def advance(self, n):
21
+ def advance(self, n: int):
22
22
  self.chains = self.pool.map(
23
23
  self.adv_func, [(n, chain) for chain in self.chains]
24
24
  )
25
25
 
26
26
  @staticmethod
27
- def adv_func(arg):
27
+ def adv_func(arg: tuple[int, MarkovChain]) -> MarkovChain:
28
28
  n, chain = arg
29
29
  chain.advance(n)
30
30
  return chain
31
31
 
32
32
 
33
- def tempering_process(
34
- chain: MarkovChain, connection: Connection, end: Event, proc_seed: int
35
- ):
36
- # used to ensure each process has a different random seed
37
- seed(proc_seed)
33
+ def tempering_process(chain: MarkovChain, connection: Connection, end: Event):
38
34
  # main loop
39
35
  while not end.is_set():
40
36
  # poll the pipe until there is something to read
@@ -108,6 +104,7 @@ class ParallelTempering:
108
104
  """
109
105
 
110
106
  def __init__(self, chains: list[MarkovChain]):
107
+ self.rng = default_rng()
111
108
  self.shutdown_evt = Event()
112
109
  self.connections = []
113
110
  self.processes = []
@@ -132,7 +129,7 @@ class ParallelTempering:
132
129
  self.connections.append(parent_ctn)
133
130
  p = Process(
134
131
  target=tempering_process,
135
- args=(chn, child_ctn, self.shutdown_evt, randint(30000)),
132
+ args=(chn, child_ctn, self.shutdown_evt),
136
133
  )
137
134
  self.processes.append(p)
138
135
 
@@ -159,7 +156,7 @@ class ParallelTempering:
159
156
  Randomly pair up each chain, with uniform sampling across all possible pairings
160
157
  """
161
158
  proposed_swaps = arange(self.N_chains)
162
- shuffle(proposed_swaps)
159
+ self.rng.shuffle(proposed_swaps)
163
160
  return [p for p in zip(proposed_swaps[::2], proposed_swaps[1::2])]
164
161
 
165
162
  def tight_pairs(self):
@@ -181,7 +178,7 @@ class ParallelTempering:
181
178
  leftovers = [
182
179
  i for i in range(self.N_chains) if not any(i in p for p in sample)
183
180
  ]
184
- shuffle(leftovers)
181
+ self.rng.shuffle(leftovers)
185
182
  sample.extend(
186
183
  [
187
184
  p if p[0] < p[1] else (p[1], p[0])
@@ -216,7 +213,7 @@ class ParallelTempering:
216
213
  pj = probabilities[j] / self.inv_temps[j]
217
214
  dp = pi - pj
218
215
 
219
- if random() <= exp(-dt * dp): # check if the swap is successful
216
+ if self.rng.random() <= exp(-dt * dp): # check if the swap is successful
220
217
  Di = {
221
218
  "task": "update_position",
222
219
  "position": positions[i],
@@ -233,7 +230,7 @@ class ParallelTempering:
233
230
  self.connections[j].send(Di)
234
231
  self.successful_swaps[i, j] += 1
235
232
 
236
- def advance(self, n, swap_interval=10):
233
+ def advance(self, n: int, swap_interval=10):
237
234
  """
238
235
  Advances each chain by a total of *n* steps, performing swap attempts
239
236
  at intervals set by the *swap_interval* keyword.
inference/mcmc/pca.py CHANGED
@@ -4,7 +4,6 @@ import matplotlib.pyplot as plt
4
4
 
5
5
  from numpy import array, savez, load, zeros
6
6
  from numpy import sqrt, exp, dot, cov
7
- from numpy.random import random, normal
8
7
  from scipy.linalg import eigh
9
8
 
10
9
  from inference.mcmc.gibbs import MetropolisChain, Parameter
@@ -157,7 +156,7 @@ class PcaChain(MetropolisChain):
157
156
  # loop over each eigenvector and take a step along each
158
157
  for v, p in zip(self.directions, self.params):
159
158
  while True:
160
- prop = theta0 + v * p.sigma * normal()
159
+ prop = theta0 + v * p.sigma * self.rng.normal()
161
160
  prop = self.process_proposal(prop)
162
161
  p_new = self.posterior(prop) * self.inv_temp
163
162
 
@@ -165,10 +164,9 @@ class PcaChain(MetropolisChain):
165
164
  p.submit_accept_prob(1.0)
166
165
  break
167
166
  else:
168
- test = random()
169
167
  acceptance_prob = exp(p_new - p_old)
170
168
  p.submit_accept_prob(acceptance_prob)
171
- if test < acceptance_prob:
169
+ if self.rng.random() < acceptance_prob:
172
170
  break
173
171
 
174
172
  theta0 = copy(prop)
@@ -184,7 +182,7 @@ class PcaChain(MetropolisChain):
184
182
  if self.chain_length == self.next_update:
185
183
  self.update_directions()
186
184
 
187
- def save(self, filename):
185
+ def save(self, filename: str):
188
186
  """
189
187
  Save the entire state of the chain object as an .npz file.
190
188
 
@@ -221,7 +219,7 @@ class PcaChain(MetropolisChain):
221
219
  savez(filename, **items)
222
220
 
223
221
  @classmethod
224
- def load(cls, filename, posterior=None):
222
+ def load(cls, filename: str, posterior=None):
225
223
  """
226
224
  Load a chain object which has been previously saved using the save() method.
227
225
 
inference/pdf/hdi.py CHANGED
@@ -1,11 +1,9 @@
1
1
  from _warnings import warn
2
2
  from typing import Sequence
3
+ from numpy import ndarray, array, sort, zeros, take_along_axis, expand_dims
3
4
 
4
- from numpy import ndarray, array, sort
5
- from scipy.optimize import differential_evolution
6
5
 
7
-
8
- def sample_hdi(sample: ndarray, fraction: float, allow_double=False):
6
+ def sample_hdi(sample: ndarray, fraction: float) -> ndarray:
9
7
  """
10
8
  Estimate the highest-density interval(s) for a given sample.
11
9
 
@@ -13,26 +11,25 @@ def sample_hdi(sample: ndarray, fraction: float, allow_double=False):
13
11
  fraction of the elements in the given sample.
14
12
 
15
13
  :param sample: \
16
- A sample for which the interval will be determined.
14
+ A sample for which the interval will be determined. If the sample is given
15
+ as a 2D numpy array, the interval calculation will be distributed over the
16
+ second dimension of the array, i.e. given a sample array of shape ``(m, n)``
17
+ the highest-density intervals are returned as an array of shape ``(2, n)``.
17
18
 
18
19
  :param float fraction: \
19
20
  The fraction of the total probability to be contained by the interval.
20
21
 
21
- :param bool allow_double: \
22
- When set to True, a double-interval is returned instead if one exists whose
23
- total length is meaningfully shorter than the optimal single interval.
24
-
25
22
  :return: \
26
- Tuple(s) specifying the lower and upper bounds of the highest-density interval(s).
23
+ The lower and upper bounds of the highest-density interval(s) as a numpy array.
27
24
  """
28
25
 
29
26
  # verify inputs are valid
30
27
  if not 0.0 < fraction < 1.0:
31
28
  raise ValueError(
32
29
  f"""\n
33
- [ sample_hdi error ]
34
- >> The 'fraction' argument must be a float between 0 and 1,
35
- >> but the value given was {fraction}.
30
+ \r[ sample_hdi error ]
31
+ \r>> The 'fraction' argument must be a float between 0 and 1,
32
+ \r>> but the value given was {fraction}.
36
33
  """
37
34
  )
38
35
 
@@ -43,66 +40,72 @@ def sample_hdi(sample: ndarray, fraction: float, allow_double=False):
43
40
  else:
44
41
  raise ValueError(
45
42
  f"""\n
46
- [ sample_hdi error ]
47
- >> The 'sample' argument should be a numpy.ndarray or a
48
- >> Sequence which can be converted to an array, but
49
- >> instead has type {type(sample)}.
43
+ \r[ sample_hdi error ]
44
+ \r>> The 'sample' argument should be a numpy.ndarray or a
45
+ \r>> Sequence which can be converted to an array, but
46
+ \r>> instead has type {type(sample)}.
50
47
  """
51
48
  )
52
49
 
53
- if s.size < 2:
50
+ if s.ndim > 2 or s.ndim == 0:
54
51
  raise ValueError(
55
52
  f"""\n
56
- [ sample_hdi error ]
57
- >> The given 'sample' array must contain at least 2 values.
53
+ \r[ sample_hdi error ]
54
+ \r>> The 'sample' argument should be a numpy.ndarray
55
+ \r>> with either one or two dimensions, but the given
56
+ \r>> array has dimensionality {s.ndim}.
58
57
  """
59
58
  )
60
59
 
61
- if s.ndim > 1:
62
- s = s.flatten()
63
- s.sort()
64
- n = s.size
65
- L = int(fraction * n)
60
+ if s.ndim == 1:
61
+ s.resize([s.size, 1])
62
+
63
+ n_samples, n_intervals = s.shape
64
+ L = int(fraction * n_samples)
65
+
66
+ if n_samples < 2:
67
+ raise ValueError(
68
+ f"""\n
69
+ \r[ sample_hdi error ]
70
+ \r>> The first dimension of the given 'sample' array must
71
+ \r>> have have a length of at least 2.
72
+ """
73
+ )
66
74
 
67
75
  # check that we have enough samples to estimate the HDI for the chosen fraction
68
- if n <= L:
76
+ if n_samples <= L:
69
77
  warn(
70
78
  f"""\n
71
- [ sample_hdi warning ]
72
- >> The given number of samples is insufficient to estimate the interval
73
- >> for the given fraction.
79
+ \r[ sample_hdi warning ]
80
+ \r>> The given number of samples is insufficient to estimate the interval
81
+ \r>> for the given fraction.
74
82
  """
75
83
  )
76
- return s[0], s[-1]
77
- elif n - L < 20:
84
+
85
+ elif n_samples - L < 20:
78
86
  warn(
79
87
  f"""\n
80
- [ sample_hdi warning ]
81
- >> len(sample)*(1 - fraction) is small - calculated interval may be inaccurate.
88
+ \r[ sample_hdi warning ]
89
+ \r>> n_samples * (1 - fraction) is small - calculated interval may be inaccurate.
82
90
  """
83
91
  )
84
92
 
85
- # find the optimal single HDI
86
- widths = s[L:] - s[: n - L]
87
- i = widths.argmin()
88
- r1, w1 = (s[i], s[i + L]), s[i + L] - s[i]
89
-
90
- if allow_double:
91
- # now get the best 2-interval solution
92
- minfunc = dbl_interval_length(sample, fraction)
93
- bounds = minfunc.get_bounds()
94
- de_result = differential_evolution(minfunc, bounds)
95
- I1, I2 = minfunc.return_intervals(de_result.x)
96
- w2 = (I2[1] - I2[0]) + (I1[1] - I1[0])
97
-
98
- # return the split interval if the width reduction is non-trivial:
99
- if allow_double and w2 < w1 * 0.99:
100
- return I1, I2
93
+ # check that we have enough samples to estimate the HDI for the chosen fraction
94
+ s.sort(axis=0)
95
+ hdi = zeros([2, n_intervals])
96
+ if n_samples > L:
97
+ # find the optimal single HDI
98
+ widths = s[L:, :] - s[: n_samples - L, :]
99
+ i = expand_dims(widths.argmin(axis=0), axis=0)
100
+ hdi[0, :] = take_along_axis(s, i, 0).squeeze()
101
+ hdi[1, :] = take_along_axis(s, i + L, 0).squeeze()
101
102
  else:
102
- return r1
103
+ hdi[0, :] = s[0, :]
104
+ hdi[1, :] = s[-1, :]
105
+ return hdi.squeeze()
103
106
 
104
107
 
105
- class dbl_interval_length:
108
+ class DoubleIntervalLength:
106
109
  def __init__(self, sample, fraction):
107
110
  self.sample = sort(sample)
108
111
  self.f = fraction
inference/priors.py CHANGED
@@ -4,9 +4,11 @@
4
4
 
5
5
  from abc import ABC, abstractmethod
6
6
  from typing import Union, Iterable
7
- from numpy import atleast_1d, log, pi, zeros, concatenate, where, ndarray, isfinite
8
- from numpy.random import normal, exponential, uniform
9
7
  from itertools import chain
8
+ from numpy import atleast_1d, log, pi, zeros, concatenate, where, ndarray, isfinite
9
+ from numpy.random import default_rng
10
+
11
+ rng = default_rng()
10
12
 
11
13
 
12
14
  class BasePrior(ABC):
@@ -294,7 +296,7 @@ class GaussianPrior(BasePrior):
294
296
  :returns: \
295
297
  A single sample from the prior distribution as a 1D ``numpy.ndarray``.
296
298
  """
297
- return normal(loc=self.mean, scale=self.sigma)
299
+ return rng.normal(loc=self.mean, scale=self.sigma)
298
300
 
299
301
  @classmethod
300
302
  def combine(cls, priors):
@@ -377,7 +379,7 @@ class ExponentialPrior(BasePrior):
377
379
  :returns: \
378
380
  A single sample from the prior distribution as a 1D ``numpy.ndarray``.
379
381
  """
380
- return exponential(scale=self.beta)
382
+ return rng.exponential(scale=self.beta)
381
383
 
382
384
  @classmethod
383
385
  def combine(cls, priors: list[BasePrior]):
@@ -470,7 +472,7 @@ class UniformPrior(BasePrior):
470
472
  :returns: \
471
473
  A single sample from the prior distribution as a 1D ``numpy.ndarray``.
472
474
  """
473
- return uniform(low=self.lower, high=self.upper)
475
+ return rng.uniform(low=self.lower, high=self.upper)
474
476
 
475
477
  @classmethod
476
478
  def combine(cls, priors):
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: inference-tools
3
- Version: 0.13.4
3
+ Version: 0.14.0
4
4
  Summary: A collection of python tools for Bayesian data analysis
5
5
  Author-email: Chris Bowman <chris.bowman.physics@gmail.com>
6
6
  License: MIT License
@@ -1,9 +1,9 @@
1
1
  inference/__init__.py,sha256=Wheq9bSUF5Y_jAc_w_Avi4WW2kphDK0qHGM6FsIKSxY,275
2
- inference/_version.py,sha256=dhPsd2j9Al5Z6JN-zXmB6-Uti2Ily2tvtEtKPTNHaCQ,413
2
+ inference/_version.py,sha256=CNbGkYuFNWjd3DIUYNizEoC18xKEHqri0oYO6s7QgJ8,513
3
3
  inference/likelihoods.py,sha256=fS_k3mRr7bv6kgDt29u_OB6emU-ARVZktf7j-eXA-2U,10008
4
4
  inference/plotting.py,sha256=U1M_F5I-UMtfHiaN1YihcxYq5gg_2MNyPm7MxF1LecY,19747
5
5
  inference/posterior.py,sha256=ptPZgzT--ehbpu57nW9GmFuyovFOSmw56HWfuC-8GGA,3584
6
- inference/priors.py,sha256=67cgKw7jDurda9UByFJ7jOoEJH1FyZDOHC9-nvr0nWY,19352
6
+ inference/priors.py,sha256=zDuIgJTZrqEqkp8rE-aBRlAuqBacR9aC_QNm8jNIYl8,19368
7
7
  inference/approx/__init__.py,sha256=b8xCdshVeGHyao6-P0038QB71WOMLrcYXCOYiYjK7Tk,132
8
8
  inference/approx/conditional.py,sha256=IeUismbo25qa1BUIqsZ2noum9_mLXNaORsg57abxBec,9515
9
9
  inference/gp/__init__.py,sha256=R4iPgf8TdunkOv_VLwue7Fz3AjGWDTBop58nCmbmMQ0,801
@@ -15,19 +15,19 @@ inference/gp/optimisation.py,sha256=sPhakklWIgg1yEUhUzA-m5vl0kVPvHdcgnQ0OAGT8qs,
15
15
  inference/gp/regression.py,sha256=10TzqVeUzUkuw8-Cbe4LbxevByTi5iE5QDdRClN7Nhk,25677
16
16
  inference/mcmc/__init__.py,sha256=IsEhVSIpZCDNIqgSq_21M6DH6x8F1jJbYWM0e3S3QG4,445
17
17
  inference/mcmc/base.py,sha256=cEh1LPmKd6JMop8EcuH3dvAeJYei88pcPTw1xe7tGKY,10496
18
- inference/mcmc/ensemble.py,sha256=s9Xspq5r360_XmpRHCplN5cscD60UoYXlYqx3yVEhsM,15528
19
- inference/mcmc/gibbs.py,sha256=9US0VqLEI_f70vrHg0sFZQneJMyjm8BF_l_0bD-ZqKI,24190
20
- inference/mcmc/hmc.py,sha256=rfTqvD3aZqqHXcM17_Yj8U_2mt2eTQ_BI6hOeFqycoo,19420
21
- inference/mcmc/parallel.py,sha256=HRK1Ka02iO5Q6m3282lqZeAlCZPXHIglC8RAlDE6Xd4,14082
22
- inference/mcmc/pca.py,sha256=-XVs25hH8FRA6XY4xWEK1cUZ8oDDllW7t_vlK6FU7Gs,10739
18
+ inference/mcmc/ensemble.py,sha256=JRXu7SBYXN4Y9RzgA6kGUHpZNw4q4A9wf0KOAQdlz0E,15585
19
+ inference/mcmc/gibbs.py,sha256=f-eccDBILfaZercZii3vuJ29V505VUsCHoxhD9gZ7xA,24288
20
+ inference/mcmc/hmc.py,sha256=7SDjiwzVCqme1g8v65XldQpW5dnt7O3p1IG2AYGBb4o,19484
21
+ inference/mcmc/parallel.py,sha256=SKLzMP4aqIj1xsxKuByA1lr1GdgIu5pPzVw7hlfXZEQ,14053
22
+ inference/mcmc/pca.py,sha256=NxC81NghGlBQslFVOk2HzpsnCjlEdDnv_w8es4Qe7PU,10695
23
23
  inference/mcmc/utilities.py,sha256=YjpK3FvV0Q98jLusrZrvGck-bjm6uZZ1U7HHH3aly8g,6048
24
24
  inference/pdf/__init__.py,sha256=gVmQ1HLTab6_oWMQN26A1r7PkqbApaJmBK-c7TIFxjY,270
25
25
  inference/pdf/base.py,sha256=Zj5mfFmDqTe5cFz0biBxcvEaxdOUC-SsOUjebUEX7HM,5442
26
- inference/pdf/hdi.py,sha256=j_W4kv70weXR7C2ltTHR6OUNkAK-kLQhnrnpPrjiLxQ,4282
26
+ inference/pdf/hdi.py,sha256=soFw3fKQdzxbGNhU9BvFHdt0uGKfhus3E3vM6L47yhY,4638
27
27
  inference/pdf/kde.py,sha256=KSl8y---602MlxoSVH8VknNQYZ2KAOTky50QU3jRw28,12999
28
28
  inference/pdf/unimodal.py,sha256=9S05c0hq_rF-MLoDJgUmaJKRdcP8F9_Idj7Ncb6m9q0,6218
29
- inference_tools-0.13.4.dist-info/LICENSE,sha256=Y0-EfO5pdxf6d0J6Er13ZSWiPZ2o6kHvM37eRgnJdww,1069
30
- inference_tools-0.13.4.dist-info/METADATA,sha256=l2x2GqQSfSrgrLeZLLqQ-LX00bxB1CKjj3AxHvTJ7F8,5378
31
- inference_tools-0.13.4.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
32
- inference_tools-0.13.4.dist-info/top_level.txt,sha256=I7bsb71rLtH3yvVH_HSLXUosY2AwCxEG3vctNsEhbEM,10
33
- inference_tools-0.13.4.dist-info/RECORD,,
29
+ inference_tools-0.14.0.dist-info/LICENSE,sha256=Y0-EfO5pdxf6d0J6Er13ZSWiPZ2o6kHvM37eRgnJdww,1069
30
+ inference_tools-0.14.0.dist-info/METADATA,sha256=vlamppwmRyKBi3LuZP-IVRMsDy2UhdzoEiabRWw0aoE,5378
31
+ inference_tools-0.14.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
32
+ inference_tools-0.14.0.dist-info/top_level.txt,sha256=I7bsb71rLtH3yvVH_HSLXUosY2AwCxEG3vctNsEhbEM,10
33
+ inference_tools-0.14.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (73.0.1)
2
+ Generator: setuptools (76.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5