inference-tools 0.14.2__py3-none-any.whl → 0.14.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- inference/_version.py +16 -3
- inference/approx/conditional.py +4 -8
- inference/gp/covariance.py +8 -16
- inference/gp/inversion.py +12 -24
- inference/gp/optimisation.py +2 -4
- inference/gp/regression.py +26 -52
- inference/likelihoods.py +2 -4
- inference/mcmc/base.py +12 -24
- inference/mcmc/ensemble.py +14 -28
- inference/mcmc/gibbs.py +2 -4
- inference/mcmc/hmc/__init__.py +2 -4
- inference/mcmc/hmc/mass.py +8 -16
- inference/mcmc/parallel.py +2 -4
- inference/mcmc/pca.py +4 -8
- inference/mcmc/utilities.py +10 -20
- inference/pdf/base.py +2 -4
- inference/pdf/hdi.py +12 -24
- inference/pdf/kde.py +2 -4
- inference/plotting.py +68 -43
- inference/priors.py +28 -56
- {inference_tools-0.14.2.dist-info → inference_tools-0.14.3.dist-info}/METADATA +1 -1
- inference_tools-0.14.3.dist-info/RECORD +35 -0
- {inference_tools-0.14.2.dist-info → inference_tools-0.14.3.dist-info}/WHEEL +1 -1
- inference_tools-0.14.2.dist-info/RECORD +0 -35
- {inference_tools-0.14.2.dist-info → inference_tools-0.14.3.dist-info}/licenses/LICENSE +0 -0
- {inference_tools-0.14.2.dist-info → inference_tools-0.14.3.dist-info}/top_level.txt +0 -0
inference/_version.py
CHANGED
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
# file generated by setuptools-scm
|
|
2
2
|
# don't change, don't track in version control
|
|
3
3
|
|
|
4
|
-
__all__ = [
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
5
12
|
|
|
6
13
|
TYPE_CHECKING = False
|
|
7
14
|
if TYPE_CHECKING:
|
|
@@ -9,13 +16,19 @@ if TYPE_CHECKING:
|
|
|
9
16
|
from typing import Union
|
|
10
17
|
|
|
11
18
|
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
12
20
|
else:
|
|
13
21
|
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
14
23
|
|
|
15
24
|
version: str
|
|
16
25
|
__version__: str
|
|
17
26
|
__version_tuple__: VERSION_TUPLE
|
|
18
27
|
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
19
30
|
|
|
20
|
-
__version__ = version = '0.14.
|
|
21
|
-
__version_tuple__ = version_tuple = (0, 14,
|
|
31
|
+
__version__ = version = '0.14.3'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 14, 3)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = 'gc06533b'
|
inference/approx/conditional.py
CHANGED
|
@@ -109,20 +109,16 @@ def piecewise_linear_sample(
|
|
|
109
109
|
"""
|
|
110
110
|
dx = x[1:] - x[:-1]
|
|
111
111
|
if (dx <= 0.0).any():
|
|
112
|
-
raise ValueError(
|
|
113
|
-
"""\n
|
|
112
|
+
raise ValueError("""\n
|
|
114
113
|
\r[ piecewise_linear_sample error ]
|
|
115
114
|
\r>> The 'x' argument must be given in strictly ascending order.
|
|
116
|
-
"""
|
|
117
|
-
)
|
|
115
|
+
""")
|
|
118
116
|
|
|
119
117
|
if (probability_density < 0).any():
|
|
120
|
-
raise ValueError(
|
|
121
|
-
"""\n
|
|
118
|
+
raise ValueError("""\n
|
|
122
119
|
\r[ piecewise_linear_sample error ]
|
|
123
120
|
\r>> All values in the given 'probability_density' array must be non-negative.
|
|
124
|
-
"""
|
|
125
|
-
)
|
|
121
|
+
""")
|
|
126
122
|
|
|
127
123
|
means = 0.5 * (probability_density[1:] + probability_density[:-1])
|
|
128
124
|
delta = 0.5 * (probability_density[1:] - probability_density[:-1]) / means
|
inference/gp/covariance.py
CHANGED
|
@@ -36,12 +36,10 @@ class CovarianceFunction(ABC):
|
|
|
36
36
|
return CompositeCovariance([*K1, *K2])
|
|
37
37
|
|
|
38
38
|
def gradient_terms(self, v, x, theta):
|
|
39
|
-
raise NotImplementedError(
|
|
40
|
-
f"""
|
|
39
|
+
raise NotImplementedError(f"""
|
|
41
40
|
Gradient calculations are not yet available for the
|
|
42
41
|
{type(self)} covariance function.
|
|
43
|
-
"""
|
|
44
|
-
)
|
|
42
|
+
""")
|
|
45
43
|
|
|
46
44
|
|
|
47
45
|
class CompositeCovariance(CovarianceFunction):
|
|
@@ -436,37 +434,31 @@ class ChangePoint(CovarianceFunction):
|
|
|
436
434
|
]
|
|
437
435
|
for K in self.cov:
|
|
438
436
|
if not isinstance(K, CovarianceFunction):
|
|
439
|
-
raise TypeError(
|
|
440
|
-
"""\n
|
|
437
|
+
raise TypeError("""\n
|
|
441
438
|
\r[ ChangePoint error ]
|
|
442
439
|
\r>> Each of the specified covariance kernels must be an instance of
|
|
443
440
|
\r>> a class which inherits from the 'CovarianceFunction' abstract
|
|
444
441
|
\r>> base-class.
|
|
445
|
-
"""
|
|
446
|
-
)
|
|
442
|
+
""")
|
|
447
443
|
|
|
448
444
|
self.n_kernels = len(kernels)
|
|
449
445
|
|
|
450
446
|
if location_bounds is not None:
|
|
451
447
|
if len(location_bounds) != self.n_kernels - 1:
|
|
452
|
-
raise ValueError(
|
|
453
|
-
"""\n
|
|
448
|
+
raise ValueError("""\n
|
|
454
449
|
\r[ ChangePoint error ]
|
|
455
450
|
\r>> The length of 'location_bounds' must be one less than the number of kernels
|
|
456
|
-
"""
|
|
457
|
-
)
|
|
451
|
+
""")
|
|
458
452
|
self.location_bounds = [check_bounds(lb) for lb in location_bounds]
|
|
459
453
|
else:
|
|
460
454
|
self.location_bounds = None
|
|
461
455
|
|
|
462
456
|
if width_bounds is not None:
|
|
463
457
|
if len(width_bounds) != self.n_kernels - 1:
|
|
464
|
-
raise ValueError(
|
|
465
|
-
"""\n
|
|
458
|
+
raise ValueError("""\n
|
|
466
459
|
\r[ ChangePoint error ]
|
|
467
460
|
\r>> The length of 'width_bounds' must be one less than the number of kernels
|
|
468
|
-
"""
|
|
469
|
-
)
|
|
461
|
+
""")
|
|
470
462
|
self.width_bounds = [check_bounds(wb) for wb in width_bounds]
|
|
471
463
|
else:
|
|
472
464
|
self.width_bounds = None
|
inference/gp/inversion.py
CHANGED
|
@@ -61,55 +61,45 @@ class GpLinearInverter:
|
|
|
61
61
|
prior_mean_function: MeanFunction = ConstantMean,
|
|
62
62
|
):
|
|
63
63
|
if model_matrix.ndim != 2:
|
|
64
|
-
raise ValueError(
|
|
65
|
-
"""\n
|
|
64
|
+
raise ValueError("""\n
|
|
66
65
|
[ GpLinearInverter error ]
|
|
67
66
|
>> 'model_matrix' argument must be a 2D numpy.ndarray
|
|
68
|
-
"""
|
|
69
|
-
)
|
|
67
|
+
""")
|
|
70
68
|
|
|
71
69
|
if y.ndim != y_err.ndim != 1 or y.size != y_err.size:
|
|
72
|
-
raise ValueError(
|
|
73
|
-
"""\n
|
|
70
|
+
raise ValueError("""\n
|
|
74
71
|
[ GpLinearInverter error ]
|
|
75
72
|
>> 'y' and 'y_err' arguments must be 1D numpy.ndarray
|
|
76
73
|
>> of equal size.
|
|
77
|
-
"""
|
|
78
|
-
)
|
|
74
|
+
""")
|
|
79
75
|
|
|
80
76
|
if model_matrix.shape[0] != y.size:
|
|
81
|
-
raise ValueError(
|
|
82
|
-
f"""\n
|
|
77
|
+
raise ValueError(f"""\n
|
|
83
78
|
[ GpLinearInverter error ]
|
|
84
79
|
>> The size of the first dimension of 'model_matrix' must
|
|
85
80
|
>> equal the size of 'y', however they have shapes
|
|
86
81
|
>> {model_matrix.shape}, {y.shape}
|
|
87
82
|
>> respectively.
|
|
88
|
-
"""
|
|
89
|
-
)
|
|
83
|
+
""")
|
|
90
84
|
|
|
91
85
|
if parameter_spatial_positions.ndim != 2:
|
|
92
|
-
raise ValueError(
|
|
93
|
-
"""\n
|
|
86
|
+
raise ValueError("""\n
|
|
94
87
|
[ GpLinearInverter error ]
|
|
95
88
|
>> 'parameter_spatial_positions' must be a 2D numpy.ndarray, with the
|
|
96
89
|
>> size of first dimension being equal to the number of model parameters
|
|
97
90
|
>> and the size of the second dimension being equal to the number of
|
|
98
91
|
>> spatial dimensions.
|
|
99
|
-
"""
|
|
100
|
-
)
|
|
92
|
+
""")
|
|
101
93
|
|
|
102
94
|
if model_matrix.shape[1] != parameter_spatial_positions.shape[0]:
|
|
103
|
-
raise ValueError(
|
|
104
|
-
f"""\n
|
|
95
|
+
raise ValueError(f"""\n
|
|
105
96
|
[ GpLinearInverter error ]
|
|
106
97
|
>> The size of the second dimension of 'model_matrix' must be equal
|
|
107
98
|
>> to the size of the first dimension of 'parameter_spatial_positions',
|
|
108
99
|
>> however they have shapes
|
|
109
100
|
>> {model_matrix.shape}, {parameter_spatial_positions.shape}
|
|
110
101
|
>> respectively.
|
|
111
|
-
"""
|
|
112
|
-
)
|
|
102
|
+
""")
|
|
113
103
|
|
|
114
104
|
self.A = model_matrix
|
|
115
105
|
self.y = y
|
|
@@ -230,13 +220,11 @@ class GpLinearInverter:
|
|
|
230
220
|
as a 1D ``numpy.ndarray``.
|
|
231
221
|
"""
|
|
232
222
|
if initial_guess.size != self.n_hyperpars:
|
|
233
|
-
raise ValueError(
|
|
234
|
-
f"""\n
|
|
223
|
+
raise ValueError(f"""\n
|
|
235
224
|
[ GpLinearInverter error ]
|
|
236
225
|
>> There are a total of {self.n_hyperpars} hyper-parameters,
|
|
237
226
|
>> but {initial_guess.size} values were given in 'initial_guess'.
|
|
238
|
-
"""
|
|
239
|
-
)
|
|
227
|
+
""")
|
|
240
228
|
|
|
241
229
|
hp_bounds = [*self.mean.bounds, *self.cov.bounds]
|
|
242
230
|
|
inference/gp/optimisation.py
CHANGED
|
@@ -164,14 +164,12 @@ class GpOptimiser:
|
|
|
164
164
|
if new_y_err is not None:
|
|
165
165
|
self.y_err = append(self.y_err, new_y_err)
|
|
166
166
|
else:
|
|
167
|
-
raise ValueError(
|
|
168
|
-
"""\n
|
|
167
|
+
raise ValueError("""\n
|
|
169
168
|
\r[ GpOptimiser error ]
|
|
170
169
|
\r>> 'new_y_err' argument of the 'add_evaluation' method must be
|
|
171
170
|
\r>> specified if the 'y_err' argument was specified when the
|
|
172
171
|
\r>> instance of GpOptimiser was initialised.
|
|
173
|
-
"""
|
|
174
|
-
)
|
|
172
|
+
""")
|
|
175
173
|
|
|
176
174
|
# re-train the GP
|
|
177
175
|
self.gp = GpRegressor(
|
inference/gp/regression.py
CHANGED
|
@@ -96,12 +96,10 @@ class GpRegressor:
|
|
|
96
96
|
self.y = self.y.squeeze()
|
|
97
97
|
|
|
98
98
|
if self.y.ndim != 1:
|
|
99
|
-
raise ValueError(
|
|
100
|
-
f"""\n
|
|
99
|
+
raise ValueError(f"""\n
|
|
101
100
|
\r[ GpRegressor error ]
|
|
102
101
|
\r>> 'y' argument must be a 1D array, but instead has shape {self.y.shape}
|
|
103
|
-
"""
|
|
104
|
-
)
|
|
102
|
+
""")
|
|
105
103
|
|
|
106
104
|
# determine the number of data points and spatial dimensions
|
|
107
105
|
self.n_points = self.y.size
|
|
@@ -111,23 +109,19 @@ class GpRegressor:
|
|
|
111
109
|
self.n_dimensions = 1
|
|
112
110
|
self.x = self.x.reshape([self.x.size, self.n_dimensions])
|
|
113
111
|
else:
|
|
114
|
-
raise ValueError(
|
|
115
|
-
f"""\n
|
|
112
|
+
raise ValueError(f"""\n
|
|
116
113
|
\r[ GpRegressor Error ]
|
|
117
114
|
\r>> 'x' argument must be a 2D array, but instead has
|
|
118
115
|
\r>> {self.x.ndim} dimensions and shape {self.x.shape}.
|
|
119
|
-
"""
|
|
120
|
-
)
|
|
116
|
+
""")
|
|
121
117
|
|
|
122
118
|
if self.x.shape[0] != self.n_points:
|
|
123
|
-
raise ValueError(
|
|
124
|
-
f"""\n
|
|
119
|
+
raise ValueError(f"""\n
|
|
125
120
|
\r[ GpRegressor Error ]
|
|
126
121
|
\r>> The first dimension of the 'x' array must be equal in size
|
|
127
122
|
\r>> to the 'y' array.
|
|
128
123
|
\r>> 'x' has shape {self.x.shape}, but 'y' has size {self.y.size}.
|
|
129
|
-
"""
|
|
130
|
-
)
|
|
124
|
+
""")
|
|
131
125
|
|
|
132
126
|
# build data errors covariance matrix
|
|
133
127
|
self.sig = self.check_error_data(y_err, y_cov)
|
|
@@ -167,13 +161,11 @@ class GpRegressor:
|
|
|
167
161
|
if hyperpars is None:
|
|
168
162
|
if optimizer not in ["bfgs", "diffev"]:
|
|
169
163
|
optimizer = "bfgs"
|
|
170
|
-
warn(
|
|
171
|
-
"""
|
|
164
|
+
warn("""
|
|
172
165
|
An invalid option was passed to the 'optimizer' keyword argument.
|
|
173
166
|
The default option 'bfgs' was used instead.
|
|
174
167
|
Valid options are 'bfgs' and 'diffev'.
|
|
175
|
-
"""
|
|
176
|
-
)
|
|
168
|
+
""")
|
|
177
169
|
|
|
178
170
|
if optimizer == "diffev":
|
|
179
171
|
hyperpars = self.differential_evo()
|
|
@@ -224,14 +216,12 @@ class GpRegressor:
|
|
|
224
216
|
"""
|
|
225
217
|
# check to make sure the right number of hyper-parameters were given
|
|
226
218
|
if len(hyperpars) != self.n_hyperpars:
|
|
227
|
-
raise ValueError(
|
|
228
|
-
f"""\n
|
|
219
|
+
raise ValueError(f"""\n
|
|
229
220
|
[ GpRegressor error ]
|
|
230
221
|
>> An incorrect number of hyper-parameter values were passed via the
|
|
231
222
|
>> 'hyperpars' keyword argument:
|
|
232
223
|
>> There are {self.n_hyperpars} hyper-parameters but {len(hyperpars)} values were given.
|
|
233
|
-
"""
|
|
234
|
-
)
|
|
224
|
+
""")
|
|
235
225
|
|
|
236
226
|
self.hyperpars = hyperpars
|
|
237
227
|
self.mean_hyperpars = self.hyperpars[self.mean_slice]
|
|
@@ -250,45 +240,37 @@ class GpRegressor:
|
|
|
250
240
|
y_err = array(y_cov).squeeze()
|
|
251
241
|
elif type(y_cov) is not ndarray:
|
|
252
242
|
# else if it isn't already an array raise an error
|
|
253
|
-
raise TypeError(
|
|
254
|
-
f"""\n
|
|
243
|
+
raise TypeError(f"""\n
|
|
255
244
|
[ GpRegressor error ]
|
|
256
245
|
>> The 'y_cov' keyword argument should be given as a numpy array:
|
|
257
246
|
>> Expected type {ndarray} but type {type(y_cov)} was given.
|
|
258
|
-
"""
|
|
259
|
-
)
|
|
247
|
+
""")
|
|
260
248
|
|
|
261
249
|
# now check to make sure the given error array is a valid size
|
|
262
250
|
if y_cov.shape != (self.n_points, self.n_points):
|
|
263
|
-
raise ValueError(
|
|
264
|
-
"""\n
|
|
251
|
+
raise ValueError("""\n
|
|
265
252
|
[ GpRegressor error ]
|
|
266
253
|
>> The 'y_cov' keyword argument was passed an array with an incorrect
|
|
267
254
|
>> shape. 'y_cov' must be a 2D array of shape (N,N), where 'N' is the
|
|
268
255
|
>> number of given y-data values.
|
|
269
|
-
"""
|
|
270
|
-
)
|
|
256
|
+
""")
|
|
271
257
|
|
|
272
258
|
# check to make sure the given matrix is symmetric
|
|
273
259
|
if not (y_cov == y_cov.T).all():
|
|
274
|
-
raise ValueError(
|
|
275
|
-
"""\n
|
|
260
|
+
raise ValueError("""\n
|
|
276
261
|
[ GpRegressor error ]
|
|
277
262
|
>> The covariance matrix passed to the 'y_cov' keyword argument
|
|
278
263
|
>> is not symmetric.
|
|
279
|
-
"""
|
|
280
|
-
)
|
|
264
|
+
""")
|
|
281
265
|
|
|
282
266
|
# raise a warning if both keywords have been specified
|
|
283
267
|
if y_err is not None:
|
|
284
|
-
warn(
|
|
285
|
-
"""\n
|
|
268
|
+
warn("""\n
|
|
286
269
|
[ GpRegressor warning ]
|
|
287
270
|
>> Only one of the 'y_err' and 'y_cov' keyword arguments should
|
|
288
271
|
>> be specified. Only the input to 'y_cov' will be used - the
|
|
289
272
|
>> input to 'y_err' will be ignored.
|
|
290
|
-
"""
|
|
291
|
-
)
|
|
273
|
+
""")
|
|
292
274
|
|
|
293
275
|
return y_cov
|
|
294
276
|
|
|
@@ -298,24 +280,20 @@ class GpRegressor:
|
|
|
298
280
|
y_err = array(y_err).squeeze()
|
|
299
281
|
elif type(y_err) is not ndarray:
|
|
300
282
|
# else if it isn't already an array raise an error
|
|
301
|
-
raise TypeError(
|
|
302
|
-
f"""\n
|
|
283
|
+
raise TypeError(f"""\n
|
|
303
284
|
[ GpRegressor error ]
|
|
304
285
|
>> The 'y_err' keyword argument should be given as a numpy array:
|
|
305
286
|
>> Expected type {ndarray} but type {type(y_err)} was given.
|
|
306
|
-
"""
|
|
307
|
-
)
|
|
287
|
+
""")
|
|
308
288
|
|
|
309
289
|
# now check to make sure the given error array is a valid size
|
|
310
290
|
if y_err.shape != (self.n_points,):
|
|
311
|
-
raise ValueError(
|
|
312
|
-
"""\n
|
|
291
|
+
raise ValueError("""\n
|
|
313
292
|
[ GpRegressor error ]
|
|
314
293
|
>> The 'y_err' keyword argument was passed an array with an
|
|
315
294
|
>> incorrect shape. 'y_err' must be a 1D array of length 'N',
|
|
316
295
|
>> where 'N' is the number of given y-data values.
|
|
317
|
-
"""
|
|
318
|
-
)
|
|
296
|
+
""")
|
|
319
297
|
|
|
320
298
|
return diag(y_err**2)
|
|
321
299
|
else:
|
|
@@ -329,23 +307,19 @@ class GpRegressor:
|
|
|
329
307
|
elif x.ndim == 1 and x.size == self.n_dimensions:
|
|
330
308
|
x = x.reshape([1, x.size])
|
|
331
309
|
elif x.ndim > 2:
|
|
332
|
-
raise ValueError(
|
|
333
|
-
f"""\n
|
|
310
|
+
raise ValueError(f"""\n
|
|
334
311
|
[ GpRegressor error ]
|
|
335
312
|
>> 'points' argument must be a 2D array, but given array
|
|
336
313
|
>> has {x.ndim} dimensions and shape {x.shape}.
|
|
337
|
-
"""
|
|
338
|
-
)
|
|
314
|
+
""")
|
|
339
315
|
|
|
340
316
|
if x.shape[1] != self.n_dimensions:
|
|
341
|
-
raise ValueError(
|
|
342
|
-
f"""\n
|
|
317
|
+
raise ValueError(f"""\n
|
|
343
318
|
[ GpRegressor error ]
|
|
344
319
|
>> The second dimension of the 'points' array must have size
|
|
345
320
|
>> equal to the number of dimensions of the input data.
|
|
346
321
|
>> The input data have {self.n_dimensions} dimensions but 'points' has shape {x.shape}.
|
|
347
|
-
"""
|
|
348
|
-
)
|
|
322
|
+
""")
|
|
349
323
|
return x
|
|
350
324
|
|
|
351
325
|
def gradient(self, points: ndarray):
|
inference/likelihoods.py
CHANGED
|
@@ -265,10 +265,8 @@ class LogisticLikelihood(Likelihood):
|
|
|
265
265
|
|
|
266
266
|
|
|
267
267
|
def jacobian_not_given(*args):
|
|
268
|
-
raise ValueError(
|
|
269
|
-
"""
|
|
268
|
+
raise ValueError("""
|
|
270
269
|
The gradient() method of a likelihood class instance was called, however
|
|
271
270
|
the forward_model_jacobian keyword argument was not specified when instance
|
|
272
271
|
was created.
|
|
273
|
-
"""
|
|
274
|
-
)
|
|
272
|
+
""")
|
inference/mcmc/base.py
CHANGED
|
@@ -217,24 +217,20 @@ class MarkovChain(ABC):
|
|
|
217
217
|
|
|
218
218
|
def __plot_checks(self, burn: int, thin: int, plot_type: str):
|
|
219
219
|
if self.chain_length < 2:
|
|
220
|
-
raise ValueError(
|
|
221
|
-
f"""\n
|
|
220
|
+
raise ValueError(f"""\n
|
|
222
221
|
\r[ {self.__class__.__name__} error ]
|
|
223
222
|
\r>> Cannot generate the {plot_type} plot as no samples have
|
|
224
223
|
\r>> been produced - current chain length is {self.chain_length}.
|
|
225
|
-
"""
|
|
226
|
-
)
|
|
224
|
+
""")
|
|
227
225
|
|
|
228
226
|
reduced_length = max(self.chain_length - burn - 1, 0) // thin + 1
|
|
229
227
|
if reduced_length < 2:
|
|
230
|
-
raise ValueError(
|
|
231
|
-
f"""\n
|
|
228
|
+
raise ValueError(f"""\n
|
|
232
229
|
\r[ {self.__class__.__name__} error ]
|
|
233
230
|
\r>> The given values of 'burn' and 'thin' leave insufficient
|
|
234
231
|
\r>> samples to generate the {plot_type} plot.
|
|
235
232
|
\r>> Number of samples after burn / thin is {reduced_length}.
|
|
236
|
-
"""
|
|
237
|
-
)
|
|
233
|
+
""")
|
|
238
234
|
|
|
239
235
|
@property
|
|
240
236
|
def burn(self):
|
|
@@ -253,44 +249,36 @@ class MarkovChain(ABC):
|
|
|
253
249
|
self.__burn_thin_error()
|
|
254
250
|
|
|
255
251
|
def __burn_thin_error(self):
|
|
256
|
-
raise AttributeError(
|
|
257
|
-
f"""\n
|
|
252
|
+
raise AttributeError(f"""\n
|
|
258
253
|
\r[ {self.__class__.__name__} error ]
|
|
259
254
|
\r>> The 'burn' and 'thin' instance attributes of inference-tools
|
|
260
255
|
\r>> mcmc samplers were removed in version 0.13.0. Burn and thin
|
|
261
256
|
\r>> values should now be passed explicitly to any methods with
|
|
262
257
|
\r>> 'burn' and 'thin' keyword arguments.
|
|
263
|
-
"""
|
|
264
|
-
)
|
|
258
|
+
""")
|
|
265
259
|
|
|
266
260
|
def _validate_posterior(self, posterior: callable, start: ndarray):
|
|
267
261
|
if not callable(posterior):
|
|
268
|
-
raise ValueError(
|
|
269
|
-
f"""\n
|
|
262
|
+
raise ValueError(f"""\n
|
|
270
263
|
\r[ {self.__class__.__name__} error ]
|
|
271
264
|
\r>> The given 'posterior' is not a callable object.
|
|
272
|
-
"""
|
|
273
|
-
)
|
|
265
|
+
""")
|
|
274
266
|
|
|
275
267
|
prob = posterior(start)
|
|
276
268
|
|
|
277
269
|
if not isinstance(prob, float):
|
|
278
|
-
raise ValueError(
|
|
279
|
-
f"""\n
|
|
270
|
+
raise ValueError(f"""\n
|
|
280
271
|
\r[ {self.__class__.__name__} error ]
|
|
281
272
|
\r>> The given 'posterior' must return a float or a type which
|
|
282
273
|
\r>> derives from float (e.g. numpy.float64), however the returned
|
|
283
274
|
\r>> value has type:
|
|
284
275
|
\r>> {type(prob)}
|
|
285
|
-
"""
|
|
286
|
-
)
|
|
276
|
+
""")
|
|
287
277
|
|
|
288
278
|
if not isfinite(prob):
|
|
289
|
-
raise ValueError(
|
|
290
|
-
f"""\n
|
|
279
|
+
raise ValueError(f"""\n
|
|
291
280
|
\r[ {self.__class__.__name__} error ]
|
|
292
281
|
\r>> The given 'posterior' must return a finite value for the given
|
|
293
282
|
\r>> 'start' parameter values, but instead returns a value of:
|
|
294
283
|
\r>> {prob}
|
|
295
|
-
"""
|
|
296
|
-
)
|
|
284
|
+
""")
|
inference/mcmc/ensemble.py
CHANGED
|
@@ -91,12 +91,10 @@ class EnsembleSampler(MarkovChain):
|
|
|
91
91
|
|
|
92
92
|
# proposal settings
|
|
93
93
|
if not alpha > 1.0:
|
|
94
|
-
raise ValueError(
|
|
95
|
-
"""\n
|
|
94
|
+
raise ValueError("""\n
|
|
96
95
|
\r[ EnsembleSampler error ]
|
|
97
96
|
\r>> The given value of the 'alpha' parameter must be greater than 1.
|
|
98
|
-
"""
|
|
99
|
-
)
|
|
97
|
+
""")
|
|
100
98
|
self.alpha = alpha
|
|
101
99
|
# uniform sampling in 'x' where z = 0.5*x**2 yields the correct PDF for z
|
|
102
100
|
self.x_lwr = sqrt(2.0 / self.alpha)
|
|
@@ -113,70 +111,58 @@ class EnsembleSampler(MarkovChain):
|
|
|
113
111
|
@staticmethod
|
|
114
112
|
def __validate_starting_positions(positions: ndarray):
|
|
115
113
|
if not isinstance(positions, ndarray):
|
|
116
|
-
raise ValueError(
|
|
117
|
-
f"""\n
|
|
114
|
+
raise ValueError(f"""\n
|
|
118
115
|
\r[ EnsembleSampler error ]
|
|
119
116
|
\r>> 'starting_positions' should be a numpy.ndarray, but instead has type:
|
|
120
117
|
\r>> {type(positions)}
|
|
121
|
-
"""
|
|
122
|
-
)
|
|
118
|
+
""")
|
|
123
119
|
|
|
124
120
|
theta = (
|
|
125
121
|
positions.reshape([positions.size, 1]) if positions.ndim == 1 else positions
|
|
126
122
|
)
|
|
127
123
|
|
|
128
124
|
if theta.ndim != 2 or theta.shape[0] < (theta.shape[1] + 1):
|
|
129
|
-
raise ValueError(
|
|
130
|
-
f"""\n
|
|
125
|
+
raise ValueError(f"""\n
|
|
131
126
|
\r[ EnsembleSampler error ]
|
|
132
127
|
\r>> 'starting_positions' should be a numpy.ndarray with shape
|
|
133
128
|
\r>> (n_walkers, n_parameters), where n_walkers >= n_parameters + 1.
|
|
134
129
|
\r>> Instead, the given array has shape {positions.shape}.
|
|
135
|
-
"""
|
|
136
|
-
)
|
|
130
|
+
""")
|
|
137
131
|
|
|
138
132
|
if not isfinite(theta).all():
|
|
139
|
-
raise ValueError(
|
|
140
|
-
"""\n
|
|
133
|
+
raise ValueError("""\n
|
|
141
134
|
\r[ EnsembleSampler error ]
|
|
142
135
|
\r>> The given 'starting_positions' array contains at least one
|
|
143
136
|
\r>> value which is non-finite.
|
|
144
|
-
"""
|
|
145
|
-
)
|
|
137
|
+
""")
|
|
146
138
|
|
|
147
139
|
if theta.shape[1] == 1:
|
|
148
140
|
# only need to check the variance for the one-parameter case
|
|
149
141
|
if var(theta) == 0:
|
|
150
|
-
raise ValueError(
|
|
151
|
-
"""\n
|
|
142
|
+
raise ValueError("""\n
|
|
152
143
|
\r[ EnsembleSampler error ]
|
|
153
144
|
\r>> The values given in 'starting_positions' have zero variance,
|
|
154
145
|
\r>> and therefore the walkers are unable to move.
|
|
155
|
-
"""
|
|
156
|
-
)
|
|
146
|
+
""")
|
|
157
147
|
else:
|
|
158
148
|
covar = cov(theta.T)
|
|
159
149
|
std_dev = sqrt(diag(covar)) # get the standard devs
|
|
160
150
|
if (std_dev == 0).any():
|
|
161
|
-
raise ValueError(
|
|
162
|
-
"""\n
|
|
151
|
+
raise ValueError("""\n
|
|
163
152
|
\r[ EnsembleSampler error ]
|
|
164
153
|
\r>> For one or more variables, The values given in 'starting_positions'
|
|
165
154
|
\r>> have zero variance, and therefore the walkers are unable to move
|
|
166
155
|
\r>> in those variables.
|
|
167
|
-
"""
|
|
168
|
-
)
|
|
156
|
+
""")
|
|
169
157
|
# now check if any pairs of variables are approximately co-linear
|
|
170
158
|
correlation = covar / (std_dev[:, None] * std_dev[None, :])
|
|
171
159
|
if (abs(triu(correlation, k=1)) > 0.999).any():
|
|
172
|
-
raise ValueError(
|
|
173
|
-
"""\n
|
|
160
|
+
raise ValueError("""\n
|
|
174
161
|
\r[ EnsembleSampler error ]
|
|
175
162
|
\r>> The values given in 'starting_positions' are approximately
|
|
176
163
|
\r>> co-linear for one or more pair of variables. This will
|
|
177
164
|
\r>> prevent the walkers from moving properly in those variables.
|
|
178
|
-
"""
|
|
179
|
-
)
|
|
165
|
+
""")
|
|
180
166
|
return theta
|
|
181
167
|
|
|
182
168
|
def __proposal(self, i: int):
|
inference/mcmc/gibbs.py
CHANGED
|
@@ -272,13 +272,11 @@ class MetropolisChain(MarkovChain):
|
|
|
272
272
|
|
|
273
273
|
# check posterior value of chain starting point is finite
|
|
274
274
|
if not isfinite(self.probs[0]):
|
|
275
|
-
ValueError(
|
|
276
|
-
"""\n
|
|
275
|
+
ValueError("""\n
|
|
277
276
|
\r[ MetropolisChain error ]
|
|
278
277
|
\r>> 'posterior' argument callable returns a non-finite value
|
|
279
278
|
\r>> for the starting position given to the 'start' argument.
|
|
280
|
-
"""
|
|
281
|
-
)
|
|
279
|
+
""")
|
|
282
280
|
|
|
283
281
|
self.display_progress = display_progress
|
|
284
282
|
self.ProgressPrinter = ChainProgressPrinter(
|
inference/mcmc/hmc/__init__.py
CHANGED
|
@@ -149,12 +149,10 @@ class HamiltonianChain(MarkovChain):
|
|
|
149
149
|
if (accept_prob >= 1) or (self.rng.random() <= accept_prob):
|
|
150
150
|
break
|
|
151
151
|
else:
|
|
152
|
-
raise ValueError(
|
|
153
|
-
f"""\n
|
|
152
|
+
raise ValueError(f"""\n
|
|
154
153
|
\r[ HamiltonianChain error ]
|
|
155
154
|
\r>> Failed to take step within maximum allowed attempts of {self.max_attempts}
|
|
156
|
-
"""
|
|
157
|
-
)
|
|
155
|
+
""")
|
|
158
156
|
|
|
159
157
|
self.theta.append(t)
|
|
160
158
|
self.probs.append(p)
|