inference-tools 0.13.3__py3-none-any.whl → 0.13.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
inference/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.13.3'
16
- __version_tuple__ = version_tuple = (0, 13, 3)
15
+ __version__ = version = '0.13.4'
16
+ __version_tuple__ = version_tuple = (0, 13, 4)
@@ -12,7 +12,7 @@ class Conditional:
12
12
  self.theta = theta
13
13
  self.variable_index = variable_index
14
14
 
15
- def __call__(self, x):
15
+ def __call__(self, x: ndarray):
16
16
  t = self.theta.copy()
17
17
  t[self.variable_index] = x
18
18
  return self.posterior(t)
@@ -58,12 +58,12 @@ def binary_search(
58
58
  return x_new
59
59
 
60
60
 
61
- def trapezium_full(x, dh):
61
+ def trapezium_full(x: ndarray, dh: ndarray) -> ndarray:
62
62
  b = dh - 1
63
63
  return (b + sqrt(b**2 + 4 * x * dh)) / (2 * dh)
64
64
 
65
65
 
66
- def trapezium_near_zero(x, dh):
66
+ def trapezium_near_zero(x: ndarray, dh: ndarray) -> ndarray:
67
67
  return x + (1 - x) * x * dh
68
68
 
69
69
 
@@ -11,23 +11,23 @@ class CovarianceFunction(ABC):
11
11
  """
12
12
 
13
13
  @abstractmethod
14
- def pass_spatial_data(self, x):
14
+ def pass_spatial_data(self, x: ndarray):
15
15
  pass
16
16
 
17
17
  @abstractmethod
18
- def estimate_hyperpar_bounds(self, y):
18
+ def estimate_hyperpar_bounds(self, y: ndarray):
19
19
  pass
20
20
 
21
21
  @abstractmethod
22
- def __call__(self, u, v, theta):
22
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
23
23
  pass
24
24
 
25
25
  @abstractmethod
26
- def build_covariance(self, theta):
26
+ def build_covariance(self, theta: ndarray) -> ndarray:
27
27
  pass
28
28
 
29
29
  @abstractmethod
30
- def covariance_and_gradients(self, theta):
30
+ def covariance_and_gradients(self, theta: ndarray):
31
31
  pass
32
32
 
33
33
  def __add__(self, other):
@@ -202,7 +202,7 @@ class SquaredExponential(CovarianceFunction):
202
202
  length-2 tuples giving the lower/upper bounds for each parameter.
203
203
  """
204
204
 
205
- def __init__(self, hyperpar_bounds=None):
205
+ def __init__(self, hyperpar_bounds: list[tuple] = None):
206
206
  self.bounds = hyperpar_bounds
207
207
  self.n_params: int
208
208
  self.dx: ndarray
@@ -237,14 +237,14 @@ class SquaredExponential(CovarianceFunction):
237
237
  upr = log(self.dx[:, :, i].max()) + 2
238
238
  self.bounds.append((lwr, upr))
239
239
 
240
- def __call__(self, u, v, theta):
240
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
241
241
  a = exp(theta[0])
242
242
  L = exp(theta[1:])
243
243
  D = -0.5 * (u[:, None, :] - v[None, :, :]) ** 2
244
244
  C = exp((D / L[None, None, :] ** 2).sum(axis=2))
245
245
  return (a**2) * C
246
246
 
247
- def build_covariance(self, theta):
247
+ def build_covariance(self, theta: ndarray) -> ndarray:
248
248
  """
249
249
  Optimized version of self.matrix() specifically for the data
250
250
  covariance matrix where the vectors v1 & v2 are both self.x.
@@ -254,7 +254,7 @@ class SquaredExponential(CovarianceFunction):
254
254
  C = exp((self.distances / L[None, None, :] ** 2).sum(axis=2)) + self.epsilon
255
255
  return (a**2) * C
256
256
 
257
- def gradient_terms(self, v, x, theta):
257
+ def gradient_terms(self, v: ndarray, x: ndarray, theta: ndarray):
258
258
  """
259
259
  Calculates the covariance-function specific parts of
260
260
  the expression for the predictive mean and covariance
@@ -265,7 +265,7 @@ class SquaredExponential(CovarianceFunction):
265
265
  A = (x - v[None, :]) / L[None, :] ** 2
266
266
  return A.T, (a / L) ** 2
267
267
 
268
- def covariance_and_gradients(self, theta):
268
+ def covariance_and_gradients(self, theta: ndarray):
269
269
  a = exp(theta[0])
270
270
  L = exp(theta[1:])
271
271
  C = exp((self.distances / L[None, None, :] ** 2).sum(axis=2)) + self.epsilon
@@ -303,7 +303,7 @@ class RationalQuadratic(CovarianceFunction):
303
303
  length-2 tuples giving the lower/upper bounds for each parameter.
304
304
  """
305
305
 
306
- def __init__(self, hyperpar_bounds=None):
306
+ def __init__(self, hyperpar_bounds: list[tuple] = None):
307
307
  self.bounds = hyperpar_bounds
308
308
 
309
309
  def pass_spatial_data(self, x: ndarray):
@@ -332,7 +332,7 @@ class RationalQuadratic(CovarianceFunction):
332
332
  upr = log(self.dx[:, :, i].max()) + 2
333
333
  self.bounds.append((lwr, upr))
334
334
 
335
- def __call__(self, u, v, theta):
335
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
336
336
  a = exp(theta[0])
337
337
  k = exp(theta[1])
338
338
  L = exp(theta[2:])
@@ -340,14 +340,14 @@ class RationalQuadratic(CovarianceFunction):
340
340
  Z = (D / L[None, None, :] ** 2).sum(axis=2)
341
341
  return (a**2) * (1 + Z / k) ** (-k)
342
342
 
343
- def build_covariance(self, theta):
343
+ def build_covariance(self, theta: ndarray) -> ndarray:
344
344
  a = exp(theta[0])
345
345
  k = exp(theta[1])
346
346
  L = exp(theta[2:])
347
347
  Z = (self.distances / L[None, None, :] ** 2).sum(axis=2)
348
348
  return (a**2) * ((1 + Z / k) ** (-k) + self.epsilon)
349
349
 
350
- def covariance_and_gradients(self, theta):
350
+ def covariance_and_gradients(self, theta: ndarray):
351
351
  a = exp(theta[0])
352
352
  q = exp(theta[1])
353
353
  L = exp(theta[2:])
@@ -437,11 +437,11 @@ class ChangePoint(CovarianceFunction):
437
437
  for K in self.cov:
438
438
  if not isinstance(K, CovarianceFunction):
439
439
  raise TypeError(
440
- """
441
- [ ChangePoint error ]
442
- >> Each of the specified covariance kernels must be an instance of
443
- >> a class which inherits from the 'CovarianceFunction' abstract
444
- >> base-class.
440
+ """\n
441
+ \r[ ChangePoint error ]
442
+ \r>> Each of the specified covariance kernels must be an instance of
443
+ \r>> a class which inherits from the 'CovarianceFunction' abstract
444
+ \r>> base-class.
445
445
  """
446
446
  )
447
447
 
@@ -450,9 +450,9 @@ class ChangePoint(CovarianceFunction):
450
450
  if location_bounds is not None:
451
451
  if len(location_bounds) != self.n_kernels - 1:
452
452
  raise ValueError(
453
- """
454
- [ ChangePoint error ]
455
- >> The length of 'location_bounds' must be one less than the number of kernels
453
+ """\n
454
+ \r[ ChangePoint error ]
455
+ \r>> The length of 'location_bounds' must be one less than the number of kernels
456
456
  """
457
457
  )
458
458
  self.location_bounds = [check_bounds(lb) for lb in location_bounds]
@@ -462,9 +462,9 @@ class ChangePoint(CovarianceFunction):
462
462
  if width_bounds is not None:
463
463
  if len(width_bounds) != self.n_kernels - 1:
464
464
  raise ValueError(
465
- """
466
- [ ChangePoint error ]
467
- >> The length of 'width_bounds' must be one less than the number of kernels
465
+ """\n
466
+ \r[ ChangePoint error ]
467
+ \r>> The length of 'width_bounds' must be one less than the number of kernels
468
468
  """
469
469
  )
470
470
  self.width_bounds = [check_bounds(wb) for wb in width_bounds]
@@ -526,7 +526,7 @@ class ChangePoint(CovarianceFunction):
526
526
  # check for consistency of length of bounds
527
527
  assert self.n_params == len(self.bounds)
528
528
 
529
- def __call__(self, u: ndarray, v: ndarray, theta):
529
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
530
530
  kernel_coeffs = [1.0]
531
531
  for slc in self.cp_slc:
532
532
  w_u = self.logistic(u[:, self.axis], theta[slc])
@@ -543,7 +543,7 @@ class ChangePoint(CovarianceFunction):
543
543
  for i in range(self.n_kernels)
544
544
  )
545
545
 
546
- def build_covariance(self, theta):
546
+ def build_covariance(self, theta: ndarray) -> ndarray:
547
547
  kernel_coeffs = [1.0]
548
548
  for slc in self.cp_slc:
549
549
  w = self.logistic(self.x_cp, theta[slc])
@@ -558,7 +558,7 @@ class ChangePoint(CovarianceFunction):
558
558
  for i in range(self.n_kernels)
559
559
  )
560
560
 
561
- def covariance_and_gradients(self, theta):
561
+ def covariance_and_gradients(self, theta: ndarray):
562
562
  K_vals = []
563
563
  K_grads = []
564
564
  for i in range(self.n_kernels):
@@ -593,12 +593,12 @@ class ChangePoint(CovarianceFunction):
593
593
  return covar, gradients
594
594
 
595
595
  @staticmethod
596
- def logistic(x, theta):
596
+ def logistic(x, theta: ndarray):
597
597
  z = (x - theta[0]) / theta[1]
598
598
  return 1.0 / (1.0 + exp(-z))
599
599
 
600
600
  @staticmethod
601
- def logistic_and_gradient(x, theta):
601
+ def logistic_and_gradient(x, theta: ndarray):
602
602
  z = (x - theta[0]) / theta[1]
603
603
  f = 1.0 / (1.0 + exp(-z))
604
604
  dfdc = -f * (1 - f) / theta[1]
@@ -643,7 +643,7 @@ class HeteroscedasticNoise(CovarianceFunction):
643
643
  sequence of length-2 tuples giving the lower/upper bounds.
644
644
  """
645
645
 
646
- def __init__(self, hyperpar_bounds=None):
646
+ def __init__(self, hyperpar_bounds: list[tuple] = None):
647
647
  self.bounds = hyperpar_bounds
648
648
 
649
649
  def pass_spatial_data(self, x: ndarray):
@@ -668,10 +668,10 @@ class HeteroscedasticNoise(CovarianceFunction):
668
668
  s = log(ptp(y))
669
669
  self.bounds = [(s - 8, s + 2) for _ in range(self.n_params)]
670
670
 
671
- def __call__(self, u, v, theta):
671
+ def __call__(self, u: ndarray, v: ndarray, theta: ndarray) -> ndarray:
672
672
  return zeros([u.size, v.size])
673
673
 
674
- def build_covariance(self, theta):
674
+ def build_covariance(self, theta: ndarray) -> ndarray:
675
675
  """
676
676
  Optimized version of self.matrix() specifically for the data
677
677
  covariance matrix where the vectors v1 & v2 are both self.x.
@@ -679,7 +679,7 @@ class HeteroscedasticNoise(CovarianceFunction):
679
679
  sigma_sq = exp(2 * theta)
680
680
  return diag(sigma_sq)
681
681
 
682
- def covariance_and_gradients(self, theta):
682
+ def covariance_and_gradients(self, theta: ndarray):
683
683
  sigma_sq = exp(2 * theta)
684
684
  K = diag(sigma_sq)
685
685
  grads = [s * dk for s, dk in zip(sigma_sq, self.dK)]
inference/gp/mean.py CHANGED
@@ -8,23 +8,23 @@ class MeanFunction(ABC):
8
8
  """
9
9
 
10
10
  @abstractmethod
11
- def pass_spatial_data(self, x):
11
+ def pass_spatial_data(self, x: ndarray):
12
12
  pass
13
13
 
14
14
  @abstractmethod
15
- def estimate_hyperpar_bounds(self, y):
15
+ def estimate_hyperpar_bounds(self, y: ndarray):
16
16
  pass
17
17
 
18
18
  @abstractmethod
19
- def __call__(self, q, theta):
19
+ def __call__(self, q, theta: ndarray):
20
20
  pass
21
21
 
22
22
  @abstractmethod
23
- def build_mean(self, theta):
23
+ def build_mean(self, theta: ndarray):
24
24
  pass
25
25
 
26
26
  @abstractmethod
27
- def mean_and_gradients(self, theta):
27
+ def mean_and_gradients(self, theta: ndarray):
28
28
  pass
29
29
 
30
30
 
@@ -7,7 +7,7 @@ import matplotlib.pyplot as plt
7
7
 
8
8
  from inference.gp.regression import GpRegressor
9
9
  from inference.gp.covariance import CovarianceFunction, SquaredExponential
10
- from inference.gp.acquisition import ExpectedImprovement
10
+ from inference.gp.acquisition import AcquisitionFunction, ExpectedImprovement
11
11
  from inference.gp.mean import MeanFunction, ConstantMean
12
12
 
13
13
 
@@ -92,7 +92,7 @@ class GpOptimiser:
92
92
  kernel: CovarianceFunction = SquaredExponential,
93
93
  mean: MeanFunction = ConstantMean,
94
94
  cross_val: bool = False,
95
- acquisition=ExpectedImprovement,
95
+ acquisition: AcquisitionFunction = ExpectedImprovement,
96
96
  optimizer: str = "bfgs",
97
97
  n_processes: int = 1,
98
98
  ):
@@ -165,7 +165,12 @@ class GpOptimiser:
165
165
  self.y_err = append(self.y_err, new_y_err)
166
166
  else:
167
167
  raise ValueError(
168
- "y_err must be specified for new evaluations if y_err was specified during __init__"
168
+ """\n
169
+ \r[ GpOptimiser error ]
170
+ \r>> 'new_y_err' argument of the 'add_evaluation' method must be
171
+ \r>> specified if the 'y_err' argument was specified when the
172
+ \r>> instance of GpOptimiser was initialised.
173
+ """
169
174
  )
170
175
 
171
176
  # re-train the GP
@@ -243,7 +248,7 @@ class GpOptimiser:
243
248
  proposed_ev = proposed_ev[0]
244
249
  return proposed_ev
245
250
 
246
- def plot_results(self, filename=None, show_plot=True):
251
+ def plot_results(self, filename: str = None, show_plot=True):
247
252
  fig = plt.figure(figsize=(10, 4))
248
253
  ax1 = fig.add_subplot(121)
249
254
  maxvals = maximum.accumulate(self.y)
@@ -98,8 +98,8 @@ class GpRegressor:
98
98
  if self.y.ndim != 1:
99
99
  raise ValueError(
100
100
  f"""\n
101
- [ GpRegressor error ]
102
- >> 'y' argument must be a 1D array, but instead has shape {self.y.shape}
101
+ \r[ GpRegressor error ]
102
+ \r>> 'y' argument must be a 1D array, but instead has shape {self.y.shape}
103
103
  """
104
104
  )
105
105
 
@@ -113,19 +113,19 @@ class GpRegressor:
113
113
  else:
114
114
  raise ValueError(
115
115
  f"""\n
116
- [ GpRegressor Error ]
117
- >> 'x' argument must be a 2D array, but instead has
118
- >> {self.x.ndim} dimensions and shape {self.x.shape}.
116
+ \r[ GpRegressor Error ]
117
+ \r>> 'x' argument must be a 2D array, but instead has
118
+ \r>> {self.x.ndim} dimensions and shape {self.x.shape}.
119
119
  """
120
120
  )
121
121
 
122
122
  if self.x.shape[0] != self.n_points:
123
123
  raise ValueError(
124
124
  f"""\n
125
- [ GpRegressor Error ]
126
- >> The first dimension of the 'x' array must be equal in size
127
- >> to the 'y' array.
128
- >> 'x' has shape {self.x.shape}, but 'y' has size {self.y.size}.
125
+ \r[ GpRegressor Error ]
126
+ \r>> The first dimension of the 'x' array must be equal in size
127
+ \r>> to the 'y' array.
128
+ \r>> 'x' has shape {self.x.shape}, but 'y' has size {self.y.size}.
129
129
  """
130
130
  )
131
131
 
@@ -215,7 +215,7 @@ class GpRegressor:
215
215
 
216
216
  return array(mu_q), sqrt(abs(array(errs)))
217
217
 
218
- def set_hyperparameters(self, hyperpars):
218
+ def set_hyperparameters(self, hyperpars: ndarray):
219
219
  """
220
220
  Update the hyper-parameter values of the model.
221
221
 
@@ -243,7 +243,7 @@ class GpRegressor:
243
243
  self.L.T, solve_triangular(self.L, self.y - self.mu, lower=True)
244
244
  )
245
245
 
246
- def check_error_data(self, y_err, y_cov):
246
+ def check_error_data(self, y_err, y_cov) -> ndarray:
247
247
  if y_cov is not None:
248
248
  # if y_cov is given as a list or tuple, attempt conversion to an array
249
249
  if any([type(y_cov) is t for t in [list, tuple]]):
@@ -321,7 +321,7 @@ class GpRegressor:
321
321
  else:
322
322
  return zeros([self.n_points, self.n_points])
323
323
 
324
- def process_points(self, points):
324
+ def process_points(self, points: ndarray) -> ndarray:
325
325
  x = points if isinstance(points, ndarray) else array(points)
326
326
 
327
327
  if x.ndim <= 1 and self.n_dimensions == 1:
@@ -448,7 +448,7 @@ class GpRegressor:
448
448
  sigma = K_qq - (Q.T @ Q)
449
449
  return mu, sigma
450
450
 
451
- def loo_predictions(self):
451
+ def loo_predictions(self) -> tuple[ndarray, ndarray]:
452
452
  """
453
453
  Calculates the 'leave-one out' (LOO) predictions for the data, where each data
454
454
  point is removed from the training set and then has its value predicted using
@@ -465,7 +465,7 @@ class GpRegressor:
465
465
  sigma = sqrt(var)
466
466
  return mu, sigma
467
467
 
468
- def loo_likelihood(self, theta: ndarray):
468
+ def loo_likelihood(self, theta: ndarray) -> float:
469
469
  """
470
470
  Calculates the 'leave-one out' (LOO) log-likelihood.
471
471
 
@@ -525,7 +525,7 @@ class GpRegressor:
525
525
 
526
526
  return LOO, grad
527
527
 
528
- def marginal_likelihood(self, theta: ndarray):
528
+ def marginal_likelihood(self, theta: ndarray) -> float:
529
529
  """
530
530
  returns the log-marginal likelihood for the supplied hyper-parameter values.
531
531
 
@@ -566,7 +566,7 @@ class GpRegressor:
566
566
  grad[self.cov_slice] = array([0.5 * (Q * dK.T).sum() for dK in grad_K])
567
567
  return LML, grad
568
568
 
569
- def differential_evo(self):
569
+ def differential_evo(self) -> ndarray:
570
570
  # optimise the hyper-parameters
571
571
  opt_result = differential_evolution(
572
572
  func=lambda x: -self.model_selector(x), bounds=self.hp_bounds
inference/likelihoods.py CHANGED
@@ -73,14 +73,16 @@ class Likelihood(ABC):
73
73
  )
74
74
 
75
75
  @abstractmethod
76
- def _log_likelihood(self, predictions):
76
+ def _log_likelihood(self, predictions: ndarray) -> float:
77
77
  pass
78
78
 
79
79
  @abstractmethod
80
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
80
+ def _log_likelihood_gradient(
81
+ self, predictions: ndarray, predictions_jacobian: ndarray
82
+ ) -> ndarray:
81
83
  pass
82
84
 
83
- def __call__(self, theta):
85
+ def __call__(self, theta: ndarray) -> float:
84
86
  """
85
87
  Returns the log-likelihood value for the given set of model parameters.
86
88
 
@@ -92,7 +94,7 @@ class Likelihood(ABC):
92
94
  """
93
95
  return self._log_likelihood(predictions=self.model(theta))
94
96
 
95
- def gradient(self, theta):
97
+ def gradient(self, theta: ndarray) -> ndarray:
96
98
  """
97
99
  Returns the gradient of the log-likelihood with respect to model parameters.
98
100
 
@@ -110,10 +112,10 @@ class Likelihood(ABC):
110
112
  predictions_jacobian=self.model_jacobian(theta),
111
113
  )
112
114
 
113
- def cost(self, theta):
115
+ def cost(self, theta: ndarray) -> float:
114
116
  return -self.__call__(theta)
115
117
 
116
- def cost_gradient(self, theta):
118
+ def cost_gradient(self, theta: ndarray) -> ndarray:
117
119
  return -self.gradient(theta)
118
120
 
119
121
 
@@ -154,11 +156,13 @@ class GaussianLikelihood(Likelihood):
154
156
  self.inv_sigma_sqr = self.inv_sigma**2
155
157
  self.normalisation = -log(self.sigma).sum() - 0.5 * log(2 * pi) * self.n_data
156
158
 
157
- def _log_likelihood(self, predictions):
159
+ def _log_likelihood(self, predictions: ndarray) -> float:
158
160
  z = (self.y - predictions) * self.inv_sigma
159
161
  return -0.5 * (z**2).sum() + self.normalisation
160
162
 
161
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
163
+ def _log_likelihood_gradient(
164
+ self, predictions: ndarray, predictions_jacobian: ndarray
165
+ ) -> ndarray:
162
166
  dL_dF = (self.y - predictions) * self.inv_sigma_sqr
163
167
  return dL_dF @ predictions_jacobian
164
168
 
@@ -199,11 +203,13 @@ class CauchyLikelihood(Likelihood):
199
203
  self.inv_gamma = 1.0 / self.gamma
200
204
  self.normalisation = -log(pi * self.gamma).sum()
201
205
 
202
- def _log_likelihood(self, predictions):
206
+ def _log_likelihood(self, predictions: ndarray) -> float:
203
207
  z = (self.y - predictions) * self.inv_gamma
204
208
  return -log(1 + z**2).sum() + self.normalisation
205
209
 
206
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
210
+ def _log_likelihood_gradient(
211
+ self, predictions: ndarray, predictions_jacobian: ndarray
212
+ ) -> ndarray:
207
213
  z = (self.y - predictions) * self.inv_gamma
208
214
  dL_dF = 2 * self.inv_gamma * z / (1 + z**2)
209
215
  return dL_dF @ predictions_jacobian
@@ -246,11 +252,13 @@ class LogisticLikelihood(Likelihood):
246
252
  self.inv_scale = 1.0 / self.scale
247
253
  self.normalisation = -log(self.scale).sum()
248
254
 
249
- def _log_likelihood(self, predictions):
255
+ def _log_likelihood(self, predictions: ndarray) -> float:
250
256
  z = (self.y - predictions) * self.inv_scale
251
257
  return z.sum() - 2 * log(1 + exp(z)).sum() + self.normalisation
252
258
 
253
- def _log_likelihood_gradient(self, predictions, predictions_jacobian):
259
+ def _log_likelihood_gradient(
260
+ self, predictions: ndarray, predictions_jacobian: ndarray
261
+ ) -> ndarray:
254
262
  z = (self.y - predictions) * self.inv_scale
255
263
  dL_dF = (2 / (1 + exp(-z)) - 1) * self.inv_scale
256
264
  return dL_dF @ predictions_jacobian
inference/mcmc/base.py CHANGED
@@ -106,7 +106,9 @@ class MarkovChain(ABC):
106
106
  else:
107
107
  return GaussianKDE(self.get_parameter(index, burn=burn, thin=thin))
108
108
 
109
- def get_interval(self, interval=0.95, burn: int = 1, thin: int = 1, samples=None):
109
+ def get_interval(
110
+ self, interval: float = 0.95, burn: int = 1, thin: int = 1, samples: int = None
111
+ ) -> tuple[ndarray, ndarray]:
110
112
  """
111
113
  Return the samples from the chain which lie inside a chosen highest-density interval.
112
114
 
@@ -126,8 +128,8 @@ class MarkovChain(ABC):
126
128
  that specifying ``samples`` overrides the value of ``thin``.
127
129
 
128
130
  :return: \
129
- List containing sample points stored as tuples, and a corresponding list of
130
- log-probability values.
131
+ Samples from the chosen interval as a 2D ``numpy.ndarray``, followed by the
132
+ corresponding log-probability values as a 1D ``numpy.ndarray``.
131
133
  """
132
134
 
133
135
  # get the sorting indices for the probabilities
inference/mcmc/gibbs.py CHANGED
@@ -120,7 +120,7 @@ class Parameter:
120
120
  else:
121
121
  return self.upper - d % self.width
122
122
 
123
- def submit_accept_prob(self, p):
123
+ def submit_accept_prob(self, p: float):
124
124
  self.num += 1
125
125
  self.avg += p
126
126
  self.var += p * (1 - p)
@@ -375,23 +375,26 @@ class MetropolisChain(MarkovChain):
375
375
  ind = argmax(self.probs)
376
376
  return array([p.samples[ind] for p in self.params])
377
377
 
378
- def set_non_negative(self, parameter, flag=True):
378
+ def set_non_negative(self, parameter: int, flag=True):
379
379
  """
380
380
  Constrain a particular parameter to have non-negative values.
381
381
 
382
- :param int parameter: Index of the parameter which is to be set \
383
- as non-negative.
382
+ :param int parameter: \
383
+ Index of the parameter which is to be set as non-negative.
384
384
  """
385
385
  self.params[parameter].non_negative = flag
386
386
 
387
- def set_boundaries(self, parameter, boundaries, remove=False):
387
+ def set_boundaries(
388
+ self, parameter: int, boundaries: tuple[float, float], remove=False
389
+ ):
388
390
  """
389
391
  Constrain the value of a particular parameter to specified boundaries.
390
392
 
391
- :param int parameter: Index of the parameter for which boundaries \
392
- are to be set.
393
+ :param int parameter: \
394
+ Index of the parameter for which boundaries are to be set.
393
395
 
394
- :param boundaries: Tuple of boundaries in the format (lower_limit, upper_limit)
396
+ :param boundaries: \
397
+ Tuple of boundaries in the format (lower_limit, upper_limit)
395
398
  """
396
399
  if remove:
397
400
  self.params[parameter].remove_boundaries()
@@ -402,7 +405,7 @@ class MetropolisChain(MarkovChain):
402
405
  """
403
406
  Plot diagnostic traces that give information on how the chain is progressing.
404
407
 
405
- Currently this method plots:
408
+ Currently, this method plots:
406
409
 
407
410
  - The posterior log-probability as a function of step number, which is useful
408
411
  for checking if the chain has reached a maximum. Any early parts of the chain
inference/mcmc/hmc.py CHANGED
@@ -145,8 +145,8 @@ class HamiltonianChain(MarkovChain):
145
145
  else:
146
146
  raise ValueError(
147
147
  f"""\n
148
- [ HamiltonianChain error ]
149
- >> Failed to take step within maximum allowed attempts of {self.max_attempts}
148
+ \r[ HamiltonianChain error ]
149
+ \r>> Failed to take step within maximum allowed attempts of {self.max_attempts}
150
150
  """
151
151
  )
152
152
 
@@ -155,7 +155,9 @@ class HamiltonianChain(MarkovChain):
155
155
  self.leapfrog_steps.append(steps_taken)
156
156
  self.chain_length += 1
157
157
 
158
- def standard_leapfrog(self, t: ndarray, r: ndarray, n_steps: int):
158
+ def standard_leapfrog(
159
+ self, t: ndarray, r: ndarray, n_steps: int
160
+ ) -> tuple[ndarray, ndarray]:
159
161
  t_step = self.inv_mass * self.ES.epsilon
160
162
  r_step = self.inv_temp * self.ES.epsilon
161
163
  r += (0.5 * r_step) * self.grad(t)
@@ -166,7 +168,9 @@ class HamiltonianChain(MarkovChain):
166
168
  r += (0.5 * r_step) * self.grad(t)
167
169
  return t, r
168
170
 
169
- def bounded_leapfrog(self, t: ndarray, r: ndarray, n_steps: int):
171
+ def bounded_leapfrog(
172
+ self, t: ndarray, r: ndarray, n_steps: int
173
+ ) -> tuple[ndarray, ndarray]:
170
174
  t_step = self.inv_mass * self.ES.epsilon
171
175
  r_step = self.inv_temp * self.ES.epsilon
172
176
  r += (0.5 * r_step) * self.grad(t)
inference/mcmc/pca.py CHANGED
@@ -33,13 +33,13 @@ class PcaChain(MetropolisChain):
33
33
  and returns the posterior log-probability.
34
34
 
35
35
  :param start: \
36
- Vector of model parameters which correspond to the parameter-space coordinates
37
- at which the chain will start.
36
+ Values of the model parameters as a ``numpy.ndarray`` which correspond to the
37
+ parameter-space coordinates at which the chain will start.
38
38
 
39
39
  :param widths: \
40
- Vector of standard deviations which serve as initial guesses for the widths of
41
- the proposal distribution for each model parameter. If not specified, the starting
42
- widths will be approximated as 5% of the values in 'start'.
40
+ A ``numpy.ndarray`` of standard deviations which serve as initial guesses for
41
+ the widths of the proposal distribution for each model parameter. If not
42
+ specified, the starting widths will be approximated as 5% of the values in 'start'.
43
43
 
44
44
  :param bounds: \
45
45
  An instance of the ``inference.mcmc.Bounds`` class, or a sequence of two