inference-tools 0.13.3__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
inference/_version.py CHANGED
@@ -1,8 +1,13 @@
1
- # file generated by setuptools_scm
1
+ # file generated by setuptools-scm
2
2
  # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
3
6
  TYPE_CHECKING = False
4
7
  if TYPE_CHECKING:
5
- from typing import Tuple, Union
8
+ from typing import Tuple
9
+ from typing import Union
10
+
6
11
  VERSION_TUPLE = Tuple[Union[int, str], ...]
7
12
  else:
8
13
  VERSION_TUPLE = object
@@ -12,5 +17,5 @@ __version__: str
12
17
  __version_tuple__: VERSION_TUPLE
13
18
  version_tuple: VERSION_TUPLE
14
19
 
15
- __version__ = version = '0.13.3'
16
- __version_tuple__ = version_tuple = (0, 13, 3)
20
+ __version__ = version = '0.14.0'
21
+ __version_tuple__ = version_tuple = (0, 14, 0)
@@ -12,7 +12,7 @@ class Conditional:
12
12
  self.theta = theta
13
13
  self.variable_index = variable_index
14
14
 
15
- def __call__(self, x):
15
+ def __call__(self, x: ndarray):
16
16
  t = self.theta.copy()
17
17
  t[self.variable_index] = x
18
18
  return self.posterior(t)
@@ -58,12 +58,12 @@ def binary_search(
58
58
  return x_new
59
59
 
60
60
 
61
- def trapezium_full(x, dh):
61
+ def trapezium_full(x: ndarray, dh: ndarray) -> ndarray:
62
62
  b = dh - 1
63
63
  return (b + sqrt(b**2 + 4 * x * dh)) / (2 * dh)
64
64
 
65
65
 
66
- def trapezium_near_zero(x, dh):
66
+ def trapezium_near_zero(x: ndarray, dh: ndarray) -> ndarray:
67
67
  return x + (1 - x) * x * dh
68
68
 
69
69
 
@@ -11,23 +11,23 @@ class CovarianceFunction(ABC):
11
11
  """
12
12
 
13
13
  @abstractmethod
14
- def pass_spatial_data(self, x):
14
+ def pass_spatial_data(self, x: ndarray):
15
15
  pass
16
16
 
17
17
  @abstractmethod
18
- def estimate_hyperpar_bounds(self, y):
18
+ def estimate_hyperpar_bounds(self, y: ndarray):
19
19
  pass
20
20
 
21
21
  @abstractmethod
22
- def __call__(self, u, v, theta):
22
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
23
23
  pass
24
24
 
25
25
  @abstractmethod
26
- def build_covariance(self, theta):
26
+ def build_covariance(self, theta: ndarray) -> ndarray:
27
27
  pass
28
28
 
29
29
  @abstractmethod
30
- def covariance_and_gradients(self, theta):
30
+ def covariance_and_gradients(self, theta: ndarray):
31
31
  pass
32
32
 
33
33
  def __add__(self, other):
@@ -202,7 +202,7 @@ class SquaredExponential(CovarianceFunction):
202
202
  length-2 tuples giving the lower/upper bounds for each parameter.
203
203
  """
204
204
 
205
- def __init__(self, hyperpar_bounds=None):
205
+ def __init__(self, hyperpar_bounds: list[tuple] = None):
206
206
  self.bounds = hyperpar_bounds
207
207
  self.n_params: int
208
208
  self.dx: ndarray
@@ -237,14 +237,14 @@ class SquaredExponential(CovarianceFunction):
237
237
  upr = log(self.dx[:, :, i].max()) + 2
238
238
  self.bounds.append((lwr, upr))
239
239
 
240
- def __call__(self, u, v, theta):
240
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
241
241
  a = exp(theta[0])
242
242
  L = exp(theta[1:])
243
243
  D = -0.5 * (u[:, None, :] - v[None, :, :]) ** 2
244
244
  C = exp((D / L[None, None, :] ** 2).sum(axis=2))
245
245
  return (a**2) * C
246
246
 
247
- def build_covariance(self, theta):
247
+ def build_covariance(self, theta: ndarray) -> ndarray:
248
248
  """
249
249
  Optimized version of self.matrix() specifically for the data
250
250
  covariance matrix where the vectors v1 & v2 are both self.x.
@@ -254,7 +254,7 @@ class SquaredExponential(CovarianceFunction):
254
254
  C = exp((self.distances / L[None, None, :] ** 2).sum(axis=2)) + self.epsilon
255
255
  return (a**2) * C
256
256
 
257
- def gradient_terms(self, v, x, theta):
257
+ def gradient_terms(self, v: ndarray, x: ndarray, theta: ndarray):
258
258
  """
259
259
  Calculates the covariance-function specific parts of
260
260
  the expression for the predictive mean and covariance
@@ -265,7 +265,7 @@ class SquaredExponential(CovarianceFunction):
265
265
  A = (x - v[None, :]) / L[None, :] ** 2
266
266
  return A.T, (a / L) ** 2
267
267
 
268
- def covariance_and_gradients(self, theta):
268
+ def covariance_and_gradients(self, theta: ndarray):
269
269
  a = exp(theta[0])
270
270
  L = exp(theta[1:])
271
271
  C = exp((self.distances / L[None, None, :] ** 2).sum(axis=2)) + self.epsilon
@@ -303,7 +303,7 @@ class RationalQuadratic(CovarianceFunction):
303
303
  length-2 tuples giving the lower/upper bounds for each parameter.
304
304
  """
305
305
 
306
- def __init__(self, hyperpar_bounds=None):
306
+ def __init__(self, hyperpar_bounds: list[tuple] = None):
307
307
  self.bounds = hyperpar_bounds
308
308
 
309
309
  def pass_spatial_data(self, x: ndarray):
@@ -332,7 +332,7 @@ class RationalQuadratic(CovarianceFunction):
332
332
  upr = log(self.dx[:, :, i].max()) + 2
333
333
  self.bounds.append((lwr, upr))
334
334
 
335
- def __call__(self, u, v, theta):
335
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
336
336
  a = exp(theta[0])
337
337
  k = exp(theta[1])
338
338
  L = exp(theta[2:])
@@ -340,14 +340,14 @@ class RationalQuadratic(CovarianceFunction):
340
340
  Z = (D / L[None, None, :] ** 2).sum(axis=2)
341
341
  return (a**2) * (1 + Z / k) ** (-k)
342
342
 
343
- def build_covariance(self, theta):
343
+ def build_covariance(self, theta: ndarray) -> ndarray:
344
344
  a = exp(theta[0])
345
345
  k = exp(theta[1])
346
346
  L = exp(theta[2:])
347
347
  Z = (self.distances / L[None, None, :] ** 2).sum(axis=2)
348
348
  return (a**2) * ((1 + Z / k) ** (-k) + self.epsilon)
349
349
 
350
- def covariance_and_gradients(self, theta):
350
+ def covariance_and_gradients(self, theta: ndarray):
351
351
  a = exp(theta[0])
352
352
  q = exp(theta[1])
353
353
  L = exp(theta[2:])
@@ -437,11 +437,11 @@ class ChangePoint(CovarianceFunction):
437
437
  for K in self.cov:
438
438
  if not isinstance(K, CovarianceFunction):
439
439
  raise TypeError(
440
- """
441
- [ ChangePoint error ]
442
- >> Each of the specified covariance kernels must be an instance of
443
- >> a class which inherits from the 'CovarianceFunction' abstract
444
- >> base-class.
440
+ """\n
441
+ \r[ ChangePoint error ]
442
+ \r>> Each of the specified covariance kernels must be an instance of
443
+ \r>> a class which inherits from the 'CovarianceFunction' abstract
444
+ \r>> base-class.
445
445
  """
446
446
  )
447
447
 
@@ -450,9 +450,9 @@ class ChangePoint(CovarianceFunction):
450
450
  if location_bounds is not None:
451
451
  if len(location_bounds) != self.n_kernels - 1:
452
452
  raise ValueError(
453
- """
454
- [ ChangePoint error ]
455
- >> The length of 'location_bounds' must be one less than the number of kernels
453
+ """\n
454
+ \r[ ChangePoint error ]
455
+ \r>> The length of 'location_bounds' must be one less than the number of kernels
456
456
  """
457
457
  )
458
458
  self.location_bounds = [check_bounds(lb) for lb in location_bounds]
@@ -462,9 +462,9 @@ class ChangePoint(CovarianceFunction):
462
462
  if width_bounds is not None:
463
463
  if len(width_bounds) != self.n_kernels - 1:
464
464
  raise ValueError(
465
- """
466
- [ ChangePoint error ]
467
- >> The length of 'width_bounds' must be one less than the number of kernels
465
+ """\n
466
+ \r[ ChangePoint error ]
467
+ \r>> The length of 'width_bounds' must be one less than the number of kernels
468
468
  """
469
469
  )
470
470
  self.width_bounds = [check_bounds(wb) for wb in width_bounds]
@@ -526,7 +526,7 @@ class ChangePoint(CovarianceFunction):
526
526
  # check for consistency of length of bounds
527
527
  assert self.n_params == len(self.bounds)
528
528
 
529
- def __call__(self, u: ndarray, v: ndarray, theta):
529
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
530
530
  kernel_coeffs = [1.0]
531
531
  for slc in self.cp_slc:
532
532
  w_u = self.logistic(u[:, self.axis], theta[slc])
@@ -543,7 +543,7 @@ class ChangePoint(CovarianceFunction):
543
543
  for i in range(self.n_kernels)
544
544
  )
545
545
 
546
- def build_covariance(self, theta):
546
+ def build_covariance(self, theta: ndarray) -> ndarray:
547
547
  kernel_coeffs = [1.0]
548
548
  for slc in self.cp_slc:
549
549
  w = self.logistic(self.x_cp, theta[slc])
@@ -558,7 +558,7 @@ class ChangePoint(CovarianceFunction):
558
558
  for i in range(self.n_kernels)
559
559
  )
560
560
 
561
- def covariance_and_gradients(self, theta):
561
+ def covariance_and_gradients(self, theta: ndarray):
562
562
  K_vals = []
563
563
  K_grads = []
564
564
  for i in range(self.n_kernels):
@@ -593,12 +593,12 @@ class ChangePoint(CovarianceFunction):
593
593
  return covar, gradients
594
594
 
595
595
  @staticmethod
596
- def logistic(x, theta):
596
+ def logistic(x, theta: ndarray):
597
597
  z = (x - theta[0]) / theta[1]
598
598
  return 1.0 / (1.0 + exp(-z))
599
599
 
600
600
  @staticmethod
601
- def logistic_and_gradient(x, theta):
601
+ def logistic_and_gradient(x, theta: ndarray):
602
602
  z = (x - theta[0]) / theta[1]
603
603
  f = 1.0 / (1.0 + exp(-z))
604
604
  dfdc = -f * (1 - f) / theta[1]
@@ -643,7 +643,7 @@ class HeteroscedasticNoise(CovarianceFunction):
643
643
  sequence of length-2 tuples giving the lower/upper bounds.
644
644
  """
645
645
 
646
- def __init__(self, hyperpar_bounds=None):
646
+ def __init__(self, hyperpar_bounds: list[tuple] = None):
647
647
  self.bounds = hyperpar_bounds
648
648
 
649
649
  def pass_spatial_data(self, x: ndarray):
@@ -668,10 +668,10 @@ class HeteroscedasticNoise(CovarianceFunction):
668
668
  s = log(ptp(y))
669
669
  self.bounds = [(s - 8, s + 2) for _ in range(self.n_params)]
670
670
 
671
- def __call__(self, u, v, theta):
671
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
672
672
  return zeros([u.size, v.size])
673
673
 
674
- def build_covariance(self, theta):
674
+ def build_covariance(self, theta: ndarray) -> ndarray:
675
675
  """
676
676
  Optimized version of self.matrix() specifically for the data
677
677
  covariance matrix where the vectors v1 & v2 are both self.x.
@@ -679,7 +679,7 @@ class HeteroscedasticNoise(CovarianceFunction):
679
679
  sigma_sq = exp(2 * theta)
680
680
  return diag(sigma_sq)
681
681
 
682
- def covariance_and_gradients(self, theta):
682
+ def covariance_and_gradients(self, theta: ndarray):
683
683
  sigma_sq = exp(2 * theta)
684
684
  K = diag(sigma_sq)
685
685
  grads = [s * dk for s, dk in zip(sigma_sq, self.dK)]
inference/gp/mean.py CHANGED
@@ -8,23 +8,23 @@ class MeanFunction(ABC):
8
8
  """
9
9
 
10
10
  @abstractmethod
11
- def pass_spatial_data(self, x):
11
+ def pass_spatial_data(self, x: ndarray):
12
12
  pass
13
13
 
14
14
  @abstractmethod
15
- def estimate_hyperpar_bounds(self, y):
15
+ def estimate_hyperpar_bounds(self, y: ndarray):
16
16
  pass
17
17
 
18
18
  @abstractmethod
19
- def __call__(self, q, theta):
19
+ def __call__(self, q, theta: ndarray):
20
20
  pass
21
21
 
22
22
  @abstractmethod
23
- def build_mean(self, theta):
23
+ def build_mean(self, theta: ndarray):
24
24
  pass
25
25
 
26
26
  @abstractmethod
27
- def mean_and_gradients(self, theta):
27
+ def mean_and_gradients(self, theta: ndarray):
28
28
  pass
29
29
 
30
30
 
@@ -7,7 +7,7 @@ import matplotlib.pyplot as plt
7
7
 
8
8
  from inference.gp.regression import GpRegressor
9
9
  from inference.gp.covariance import CovarianceFunction, SquaredExponential
10
- from inference.gp.acquisition import ExpectedImprovement
10
+ from inference.gp.acquisition import AcquisitionFunction, ExpectedImprovement
11
11
  from inference.gp.mean import MeanFunction, ConstantMean
12
12
 
13
13
 
@@ -92,7 +92,7 @@ class GpOptimiser:
92
92
  kernel: CovarianceFunction = SquaredExponential,
93
93
  mean: MeanFunction = ConstantMean,
94
94
  cross_val: bool = False,
95
- acquisition=ExpectedImprovement,
95
+ acquisition: AcquisitionFunction = ExpectedImprovement,
96
96
  optimizer: str = "bfgs",
97
97
  n_processes: int = 1,
98
98
  ):
@@ -165,7 +165,12 @@ class GpOptimiser:
165
165
  self.y_err = append(self.y_err, new_y_err)
166
166
  else:
167
167
  raise ValueError(
168
- "y_err must be specified for new evaluations if y_err was specified during __init__"
168
+ """\n
169
+ \r[ GpOptimiser error ]
170
+ \r>> 'new_y_err' argument of the 'add_evaluation' method must be
171
+ \r>> specified if the 'y_err' argument was specified when the
172
+ \r>> instance of GpOptimiser was initialised.
173
+ """
169
174
  )
170
175
 
171
176
  # re-train the GP
@@ -243,7 +248,7 @@ class GpOptimiser:
243
248
  proposed_ev = proposed_ev[0]
244
249
  return proposed_ev
245
250
 
246
- def plot_results(self, filename=None, show_plot=True):
251
+ def plot_results(self, filename: str = None, show_plot=True):
247
252
  fig = plt.figure(figsize=(10, 4))
248
253
  ax1 = fig.add_subplot(121)
249
254
  maxvals = maximum.accumulate(self.y)
@@ -98,8 +98,8 @@ class GpRegressor:
98
98
  if self.y.ndim != 1:
99
99
  raise ValueError(
100
100
  f"""\n
101
- [ GpRegressor error ]
102
- >> 'y' argument must be a 1D array, but instead has shape {self.y.shape}
101
+ \r[ GpRegressor error ]
102
+ \r>> 'y' argument must be a 1D array, but instead has shape {self.y.shape}
103
103
  """
104
104
  )
105
105
 
@@ -113,19 +113,19 @@ class GpRegressor:
113
113
  else:
114
114
  raise ValueError(
115
115
  f"""\n
116
- [ GpRegressor Error ]
117
- >> 'x' argument must be a 2D array, but instead has
118
- >> {self.x.ndim} dimensions and shape {self.x.shape}.
116
+ \r[ GpRegressor Error ]
117
+ \r>> 'x' argument must be a 2D array, but instead has
118
+ \r>> {self.x.ndim} dimensions and shape {self.x.shape}.
119
119
  """
120
120
  )
121
121
 
122
122
  if self.x.shape[0] != self.n_points:
123
123
  raise ValueError(
124
124
  f"""\n
125
- [ GpRegressor Error ]
126
- >> The first dimension of the 'x' array must be equal in size
127
- >> to the 'y' array.
128
- >> 'x' has shape {self.x.shape}, but 'y' has size {self.y.size}.
125
+ \r[ GpRegressor Error ]
126
+ \r>> The first dimension of the 'x' array must be equal in size
127
+ \r>> to the 'y' array.
128
+ \r>> 'x' has shape {self.x.shape}, but 'y' has size {self.y.size}.
129
129
  """
130
130
  )
131
131
 
@@ -215,7 +215,7 @@ class GpRegressor:
215
215
 
216
216
  return array(mu_q), sqrt(abs(array(errs)))
217
217
 
218
- def set_hyperparameters(self, hyperpars):
218
+ def set_hyperparameters(self, hyperpars: ndarray):
219
219
  """
220
220
  Update the hyper-parameter values of the model.
221
221
 
@@ -243,7 +243,7 @@ class GpRegressor:
243
243
  self.L.T, solve_triangular(self.L, self.y - self.mu, lower=True)
244
244
  )
245
245
 
246
- def check_error_data(self, y_err, y_cov):
246
+ def check_error_data(self, y_err, y_cov) -> ndarray:
247
247
  if y_cov is not None:
248
248
  # if y_cov is given as a list or tuple, attempt conversion to an array
249
249
  if any([type(y_cov) is t for t in [list, tuple]]):
@@ -321,7 +321,7 @@ class GpRegressor:
321
321
  else:
322
322
  return zeros([self.n_points, self.n_points])
323
323
 
324
- def process_points(self, points):
324
+ def process_points(self, points: ndarray) -> ndarray:
325
325
  x = points if isinstance(points, ndarray) else array(points)
326
326
 
327
327
  if x.ndim <= 1 and self.n_dimensions == 1:
@@ -448,7 +448,7 @@ class GpRegressor:
448
448
  sigma = K_qq - (Q.T @ Q)
449
449
  return mu, sigma
450
450
 
451
- def loo_predictions(self):
451
+ def loo_predictions(self) -> tuple[ndarray, ndarray]:
452
452
  """
453
453
  Calculates the 'leave-one out' (LOO) predictions for the data, where each data
454
454
  point is removed from the training set and then has its value predicted using
@@ -465,7 +465,7 @@ class GpRegressor:
465
465
  sigma = sqrt(var)
466
466
  return mu, sigma
467
467
 
468
- def loo_likelihood(self, theta: ndarray):
468
+ def loo_likelihood(self, theta: ndarray) -> float:
469
469
  """
470
470
  Calculates the 'leave-one out' (LOO) log-likelihood.
471
471
 
@@ -525,7 +525,7 @@ class GpRegressor:
525
525
 
526
526
  return LOO, grad
527
527
 
528
- def marginal_likelihood(self, theta: ndarray):
528
+ def marginal_likelihood(self, theta: ndarray) -> float:
529
529
  """
530
530
  returns the log-marginal likelihood for the supplied hyper-parameter values.
531
531
 
@@ -566,7 +566,7 @@ class GpRegressor:
566
566
  grad[self.cov_slice] = array([0.5 * (Q * dK.T).sum() for dK in grad_K])
567
567
  return LML, grad
568
568
 
569
- def differential_evo(self):
569
+ def differential_evo(self) -> ndarray:
570
570
  # optimise the hyper-parameters
571
571
  opt_result = differential_evolution(
572
572
  func=lambda x: -self.model_selector(x), bounds=self.hp_bounds
inference/likelihoods.py CHANGED
@@ -73,14 +73,16 @@ class Likelihood(ABC):
73
73
  )
74
74
 
75
75
  @abstractmethod
76
- def _log_likelihood(self, predictions):
76
+ def _log_likelihood(self, predictions: ndarray) -> float:
77
77
  pass
78
78
 
79
79
  @abstractmethod
80
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
80
+ def _log_likelihood_gradient(
81
+ self, predictions: ndarray, predictions_jacobian: ndarray
82
+ ) -> ndarray:
81
83
  pass
82
84
 
83
- def __call__(self, theta):
85
+ def __call__(self, theta: ndarray) -> float:
84
86
  """
85
87
  Returns the log-likelihood value for the given set of model parameters.
86
88
 
@@ -92,7 +94,7 @@ class Likelihood(ABC):
92
94
  """
93
95
  return self._log_likelihood(predictions=self.model(theta))
94
96
 
95
- def gradient(self, theta):
97
+ def gradient(self, theta: ndarray) -> ndarray:
96
98
  """
97
99
  Returns the gradient of the log-likelihood with respect to model parameters.
98
100
 
@@ -110,10 +112,10 @@ class Likelihood(ABC):
110
112
  predictions_jacobian=self.model_jacobian(theta),
111
113
  )
112
114
 
113
- def cost(self, theta):
115
+ def cost(self, theta: ndarray) -> float:
114
116
  return -self.__call__(theta)
115
117
 
116
- def cost_gradient(self, theta):
118
+ def cost_gradient(self, theta: ndarray) -> ndarray:
117
119
  return -self.gradient(theta)
118
120
 
119
121
 
@@ -154,11 +156,13 @@ class GaussianLikelihood(Likelihood):
154
156
  self.inv_sigma_sqr = self.inv_sigma**2
155
157
  self.normalisation = -log(self.sigma).sum() - 0.5 * log(2 * pi) * self.n_data
156
158
 
157
- def _log_likelihood(self, predictions):
159
+ def _log_likelihood(self, predictions: ndarray) -> float:
158
160
  z = (self.y - predictions) * self.inv_sigma
159
161
  return -0.5 * (z**2).sum() + self.normalisation
160
162
 
161
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
163
+ def _log_likelihood_gradient(
164
+ self, predictions: ndarray, predictions_jacobian: ndarray
165
+ ) -> ndarray:
162
166
  dL_dF = (self.y - predictions) * self.inv_sigma_sqr
163
167
  return dL_dF @ predictions_jacobian
164
168
 
@@ -199,11 +203,13 @@ class CauchyLikelihood(Likelihood):
199
203
  self.inv_gamma = 1.0 / self.gamma
200
204
  self.normalisation = -log(pi * self.gamma).sum()
201
205
 
202
- def _log_likelihood(self, predictions):
206
+ def _log_likelihood(self, predictions: ndarray) -> float:
203
207
  z = (self.y - predictions) * self.inv_gamma
204
208
  return -log(1 + z**2).sum() + self.normalisation
205
209
 
206
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
210
+ def _log_likelihood_gradient(
211
+ self, predictions: ndarray, predictions_jacobian: ndarray
212
+ ) -> ndarray:
207
213
  z = (self.y - predictions) * self.inv_gamma
208
214
  dL_dF = 2 * self.inv_gamma * z / (1 + z**2)
209
215
  return dL_dF @ predictions_jacobian
@@ -246,11 +252,13 @@ class LogisticLikelihood(Likelihood):
246
252
  self.inv_scale = 1.0 / self.scale
247
253
  self.normalisation = -log(self.scale).sum()
248
254
 
249
- def _log_likelihood(self, predictions):
255
+ def _log_likelihood(self, predictions: ndarray) -> float:
250
256
  z = (self.y - predictions) * self.inv_scale
251
257
  return z.sum() - 2 * log(1 + exp(z)).sum() + self.normalisation
252
258
 
253
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
259
+ def _log_likelihood_gradient(
260
+ self, predictions: ndarray, predictions_jacobian: ndarray
261
+ ) -> ndarray:
254
262
  z = (self.y - predictions) * self.inv_scale
255
263
  dL_dF = (2 / (1 + exp(-z)) - 1) * self.inv_scale
256
264
  return dL_dF @ predictions_jacobian
inference/mcmc/base.py CHANGED
@@ -106,7 +106,9 @@ class MarkovChain(ABC):
106
106
  else:
107
107
  return GaussianKDE(self.get_parameter(index, burn=burn, thin=thin))
108
108
 
109
- def get_interval(self, interval=0.95, burn: int = 1, thin: int = 1, samples=None):
109
+ def get_interval(
110
+ self, interval: float = 0.95, burn: int = 1, thin: int = 1, samples: int = None
111
+ ) -> tuple[ndarray, ndarray]:
110
112
  """
111
113
  Return the samples from the chain which lie inside a chosen highest-density interval.
112
114
 
@@ -126,8 +128,8 @@ class MarkovChain(ABC):
126
128
  that specifying ``samples`` overrides the value of ``thin``.
127
129
 
128
130
  :return: \
129
- List containing sample points stored as tuples, and a corresponding list of
130
- log-probability values.
131
+ Samples from the chosen interval as a 2D ``numpy.ndarray``, followed by the
132
+ corresponding log-probability values as a 1D ``numpy.ndarray``.
131
133
  """
132
134
 
133
135
  # get the sorting indices for the probabilities
@@ -3,7 +3,7 @@ import matplotlib.pyplot as plt
3
3
 
4
4
  from numpy import array, ndarray, linspace, concatenate, savez, load
5
5
  from numpy import sqrt, var, cov, diag, isfinite, triu, exp, log, median
6
- from numpy.random import random, randint
6
+ from numpy.random import default_rng
7
7
 
8
8
  from inference.mcmc.utilities import Bounds, ChainProgressPrinter
9
9
  from inference.mcmc.base import MarkovChain
@@ -52,6 +52,7 @@ class EnsembleSampler(MarkovChain):
52
52
  display_progress=True,
53
53
  ):
54
54
  self.posterior = posterior
55
+ self.rng = default_rng()
55
56
 
56
57
  if starting_positions is not None:
57
58
  # store core data
@@ -180,9 +181,9 @@ class EnsembleSampler(MarkovChain):
180
181
 
181
182
  def __proposal(self, i: int):
182
183
  # randomly select walker that isn't 'i'
183
- j = (randint(low=1, high=self.n_walkers) + i) % self.n_walkers
184
+ j = (self.rng.integers(low=1, high=self.n_walkers) + i) % self.n_walkers
184
185
  # sample the stretch distance
185
- z = 0.5 * (self.x_lwr + self.x_width * random()) ** 2
186
+ z = 0.5 * (self.x_lwr + self.x_width * self.rng.random()) ** 2
186
187
  prop = self.process_proposal(
187
188
  self.walker_positions[i, :]
188
189
  + z * (self.walker_positions[j, :] - self.walker_positions[i, :])
@@ -194,7 +195,7 @@ class EnsembleSampler(MarkovChain):
194
195
  Y, z = self.__proposal(i)
195
196
  p = self.posterior(Y)
196
197
  q = exp((self.n_parameters - 1) * log(z) + p - self.walker_probs[i])
197
- if random() <= q:
198
+ if self.rng.random() <= q:
198
199
  self.walker_positions[i, :] = Y
199
200
  self.walker_probs[i] = p
200
201
  self.total_proposals[i].append(attempts)