eryn 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eryn/CMakeLists.txt +51 -0
- eryn/__init__.py +35 -0
- eryn/backends/__init__.py +20 -0
- eryn/backends/backend.py +1150 -0
- eryn/backends/hdfbackend.py +819 -0
- eryn/ensemble.py +1690 -0
- eryn/git_version.py.in +7 -0
- eryn/model.py +18 -0
- eryn/moves/__init__.py +42 -0
- eryn/moves/combine.py +135 -0
- eryn/moves/delayedrejection.py +229 -0
- eryn/moves/distgen.py +104 -0
- eryn/moves/distgenrj.py +222 -0
- eryn/moves/gaussian.py +190 -0
- eryn/moves/group.py +281 -0
- eryn/moves/groupstretch.py +120 -0
- eryn/moves/mh.py +193 -0
- eryn/moves/move.py +703 -0
- eryn/moves/mtdistgen.py +137 -0
- eryn/moves/mtdistgenrj.py +190 -0
- eryn/moves/multipletry.py +776 -0
- eryn/moves/red_blue.py +333 -0
- eryn/moves/rj.py +388 -0
- eryn/moves/stretch.py +231 -0
- eryn/moves/tempering.py +649 -0
- eryn/pbar.py +56 -0
- eryn/prior.py +452 -0
- eryn/state.py +775 -0
- eryn/tests/__init__.py +0 -0
- eryn/tests/test_eryn.py +1246 -0
- eryn/utils/__init__.py +10 -0
- eryn/utils/periodic.py +134 -0
- eryn/utils/stopping.py +164 -0
- eryn/utils/transform.py +226 -0
- eryn/utils/updates.py +69 -0
- eryn/utils/utility.py +329 -0
- eryn-1.2.0.dist-info/METADATA +167 -0
- eryn-1.2.0.dist-info/RECORD +39 -0
- eryn-1.2.0.dist-info/WHEEL +4 -0
eryn/prior.py
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy import stats
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
import cupy as cp
|
|
7
|
+
|
|
8
|
+
except (ModuleNotFoundError, ImportError) as e:
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class UniformDistribution(object):
|
|
13
|
+
"""Generate uniform distribution between ``min`` and ``max``
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
min_val (double): Minimum in the uniform distribution
|
|
17
|
+
max_val (double): Maximum in the uniform distribution
|
|
18
|
+
use_cupy (bool, optional): If ``True``, use CuPy. If ``False`` use Numpy.
|
|
19
|
+
(default: ``False``)
|
|
20
|
+
return_gpu (bool, optional): If ``True``, return CuPy array. If ``False``,
|
|
21
|
+
return Numpy array. (default: ``False``)
|
|
22
|
+
|
|
23
|
+
Raises:
|
|
24
|
+
ValueError: Issue with inputs.
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, min_val, max_val, use_cupy=False, return_gpu=False):
|
|
29
|
+
if min_val > max_val:
|
|
30
|
+
tmp = min_val
|
|
31
|
+
min_val = max_val
|
|
32
|
+
max_val = tmp
|
|
33
|
+
elif min_val == max_val:
|
|
34
|
+
raise ValueError("Min and max values are the same.")
|
|
35
|
+
|
|
36
|
+
self.min_val = min_val
|
|
37
|
+
self.max_val = max_val
|
|
38
|
+
self.diff = max_val - min_val
|
|
39
|
+
|
|
40
|
+
self.pdf_val = 1 / self.diff
|
|
41
|
+
self.logpdf_val = np.log(self.pdf_val)
|
|
42
|
+
|
|
43
|
+
self.use_cupy = use_cupy
|
|
44
|
+
self.return_gpu = return_gpu
|
|
45
|
+
if use_cupy:
|
|
46
|
+
try:
|
|
47
|
+
cp.abs(1.0)
|
|
48
|
+
except NameError:
|
|
49
|
+
raise ValueError("CuPy not found.")
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def xp(self):
|
|
53
|
+
"""Numpy or Cupy"""
|
|
54
|
+
xp = np if not self.use_cupy else cp
|
|
55
|
+
return xp
|
|
56
|
+
|
|
57
|
+
def rvs(self, size=1):
|
|
58
|
+
if not isinstance(size, int) and not isinstance(size, tuple):
|
|
59
|
+
raise ValueError("size must be an integer or tuple of ints.")
|
|
60
|
+
|
|
61
|
+
if isinstance(size, int):
|
|
62
|
+
size = (size,)
|
|
63
|
+
|
|
64
|
+
rand_unif = self.xp.random.rand(*size)
|
|
65
|
+
|
|
66
|
+
out = rand_unif * self.diff + self.min_val
|
|
67
|
+
|
|
68
|
+
if self.use_cupy and not self.return_gpu:
|
|
69
|
+
return out.get()
|
|
70
|
+
|
|
71
|
+
return out
|
|
72
|
+
|
|
73
|
+
def pdf(self, x):
|
|
74
|
+
out = self.pdf_val * ((x >= self.min_val) & (x <= self.max_val))
|
|
75
|
+
if self.use_cupy and not self.return_gpu:
|
|
76
|
+
return out.get()
|
|
77
|
+
|
|
78
|
+
return out
|
|
79
|
+
|
|
80
|
+
def logpdf(self, x):
|
|
81
|
+
|
|
82
|
+
out = self.xp.zeros_like(x)
|
|
83
|
+
out[(x >= self.min_val) & (x <= self.max_val)] = self.logpdf_val
|
|
84
|
+
out[(x < self.min_val) | (x > self.max_val)] = -np.inf
|
|
85
|
+
if self.use_cupy and not self.return_gpu:
|
|
86
|
+
return out.get()
|
|
87
|
+
|
|
88
|
+
return out
|
|
89
|
+
|
|
90
|
+
def copy(self):
|
|
91
|
+
return deepcopy(self)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def uniform_dist(min, max, use_cupy=False, return_gpu=False):
|
|
95
|
+
"""Generate uniform distribution between ``min`` and ``max``
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
min (double): Minimum in the uniform distribution
|
|
99
|
+
max (double): Maximum in the uniform distribution
|
|
100
|
+
use_cupy (bool, optional): If ``True``, use CuPy. If ``False`` use Numpy.
|
|
101
|
+
(default: ``False``)
|
|
102
|
+
return_gpu (bool, optional): If ``True``, return CuPy array. If ``False``,
|
|
103
|
+
return Numpy array. (default: ``False``)
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
:class:`UniformDistribution`: Uniform distribution.
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
"""
|
|
110
|
+
dist = UniformDistribution(min, max, use_cupy=use_cupy, return_gpu=return_gpu)
|
|
111
|
+
|
|
112
|
+
return dist
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def log_uniform(min, max):
|
|
116
|
+
"""Generate log-uniform distribution between ``min`` and ``max``
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
min (double): Minimum in the log-uniform distribution
|
|
120
|
+
max (double): Maximum in the log-uniform distribution
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
scipy distribution object: Log-uniform distribution built from
|
|
124
|
+
`scipy.stats.uniform <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.loguniform.html>_`.
|
|
125
|
+
|
|
126
|
+
"""
|
|
127
|
+
# adjust ordering if needed
|
|
128
|
+
if min > max:
|
|
129
|
+
temp = min
|
|
130
|
+
min = max
|
|
131
|
+
max = temp
|
|
132
|
+
|
|
133
|
+
# setup quantities for scipy
|
|
134
|
+
sig = max - min
|
|
135
|
+
dist = stats.loguniform(min, sig)
|
|
136
|
+
return dist
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class MappedUniformDistribution:
|
|
140
|
+
"""Maps uniform distribution to zero to 1.
|
|
141
|
+
|
|
142
|
+
This is a modified uniform distribution that maps
|
|
143
|
+
the input values to a range from zero to 1 by using ``min`` and
|
|
144
|
+
``max`` values input by user. This ensures the log of the prior value
|
|
145
|
+
from this distribution is zero if the value is between ``min`` and ``max``.
|
|
146
|
+
and ``-np.inf`` if it is outside that range.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
min (double): Minimum in the uniform distribution
|
|
150
|
+
max (double): Maximum in the uniform distribution
|
|
151
|
+
use_cupy (bool, optional): If ``True``, use CuPy. If ``False`` use Numpy.
|
|
152
|
+
(default: ``False``)
|
|
153
|
+
return_gpu (bool, optional): If ``True``, return CuPy array. If ``False``,
|
|
154
|
+
return Numpy array. (default: ``False``)
|
|
155
|
+
|
|
156
|
+
Raises:
|
|
157
|
+
ValueError: If ``min`` is greater than ``max``.
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
def __init__(self, min, max, use_cupy=False, return_gpu=False):
|
|
163
|
+
self.min, self.max = min, max
|
|
164
|
+
self.diff = self.max - self.min
|
|
165
|
+
if self.min > self.max:
|
|
166
|
+
raise ValueError("min must be less than max.")
|
|
167
|
+
|
|
168
|
+
self.dist = uniform_dist(0.0, 1.0, use_cupy=use_cupy, return_gpu=return_gpu)
|
|
169
|
+
|
|
170
|
+
@property
|
|
171
|
+
def xp(self):
|
|
172
|
+
"""Numpy or Cupy"""
|
|
173
|
+
xp = np if not self.use_cupy else cp
|
|
174
|
+
return xp
|
|
175
|
+
|
|
176
|
+
def logpdf(self, x):
|
|
177
|
+
"""Get the log of the pdf value for this distribution.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
x (double np.ndarray):
|
|
181
|
+
Input parameters to get prior values.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
np.ndarray: Associated logpdf values of the input.
|
|
185
|
+
|
|
186
|
+
"""
|
|
187
|
+
temp = 1.0 - (self.max - x) / self.diff
|
|
188
|
+
out = self.dist.logpdf(temp)
|
|
189
|
+
if self.use_cupy and not self.return_gpu:
|
|
190
|
+
return out.get()
|
|
191
|
+
return out
|
|
192
|
+
|
|
193
|
+
def rvs(self, size=1):
|
|
194
|
+
"""Get the log of the pdf value for this distribution.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
size (int or tuple of ints, optional): Output size for number of generated
|
|
198
|
+
sources from prior distributions.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
np.ndarray: Generated values.
|
|
202
|
+
|
|
203
|
+
"""
|
|
204
|
+
# adjust size if int
|
|
205
|
+
if isinstance(size, int):
|
|
206
|
+
size = (size,)
|
|
207
|
+
|
|
208
|
+
elif not isinstance(size, tuple):
|
|
209
|
+
raise ValueError("Size must be int or tuple of ints.")
|
|
210
|
+
|
|
211
|
+
temp = self.dist.rvs(size=size)
|
|
212
|
+
|
|
213
|
+
out = self.max + (temp - 1.0) * self.diff
|
|
214
|
+
if self.use_cupy and not self.return_gpu:
|
|
215
|
+
return out.get()
|
|
216
|
+
return out
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class ProbDistContainer:
|
|
220
|
+
"""Container for holding and generating prior info
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
priors_in (dict): Dictionary with keys as int or tuple of int
|
|
224
|
+
describing which parameters the prior takes. Values are
|
|
225
|
+
probability distributions with ``logpdf`` and ``rvs`` methods.
|
|
226
|
+
|
|
227
|
+
Attributes:
|
|
228
|
+
priors_in (dict): Dictionary with keys as int or tuple of int
|
|
229
|
+
describing which parameters the prior takes. Values are
|
|
230
|
+
probability distributions with ``logpdf`` and ``rvs`` methods.
|
|
231
|
+
priors (list): list of indexes and their associated distributions arranged
|
|
232
|
+
in a list.
|
|
233
|
+
ndim (int): Full dimensionality.
|
|
234
|
+
use_cupy (bool, optional): If ``True``, use CuPy. If ``False`` use Numpy.
|
|
235
|
+
(default: ``False``)
|
|
236
|
+
return_gpu (bool, optional): If ``True``, return CuPy array. If ``False``,
|
|
237
|
+
return Numpy array. (default: ``False``)
|
|
238
|
+
|
|
239
|
+
Raises:
|
|
240
|
+
ValueError: Missing parameters or incorrect index keys.
|
|
241
|
+
|
|
242
|
+
"""
|
|
243
|
+
|
|
244
|
+
def __init__(self, priors_in, use_cupy=False, return_gpu=False):
|
|
245
|
+
# copy to have
|
|
246
|
+
self.priors_in = priors_in.copy()
|
|
247
|
+
|
|
248
|
+
# to separate out in list form
|
|
249
|
+
self.priors = []
|
|
250
|
+
|
|
251
|
+
# setup lists
|
|
252
|
+
temp_inds = []
|
|
253
|
+
for inds, dist in priors_in.items():
|
|
254
|
+
# multiple index
|
|
255
|
+
if isinstance(inds, tuple):
|
|
256
|
+
inds_in = np.asarray(inds)
|
|
257
|
+
self.priors.append([inds_in, dist])
|
|
258
|
+
|
|
259
|
+
# single index
|
|
260
|
+
elif isinstance(inds, int):
|
|
261
|
+
inds_in = np.array([inds])
|
|
262
|
+
self.priors.append([inds_in, dist])
|
|
263
|
+
|
|
264
|
+
else:
|
|
265
|
+
raise ValueError(
|
|
266
|
+
"Keys for prior dictionary must be an integer or tuple."
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
temp_inds.append(np.asarray([inds_in]))
|
|
270
|
+
|
|
271
|
+
uni_inds = np.unique(np.concatenate(temp_inds, axis=1).flatten())
|
|
272
|
+
if len(uni_inds) != len(np.arange(np.max(uni_inds) + 1)):
|
|
273
|
+
raise ValueError(
|
|
274
|
+
"Please ensure all sampled parameters are included in priors."
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
self.ndim = uni_inds.max() + 1
|
|
278
|
+
|
|
279
|
+
self.use_cupy = use_cupy
|
|
280
|
+
self.return_gpu = return_gpu
|
|
281
|
+
for key, item in self.priors_in.items():
|
|
282
|
+
item.use_cupy = use_cupy
|
|
283
|
+
# need this because the prob dist container will conglomerate
|
|
284
|
+
item.return_gpu = True
|
|
285
|
+
|
|
286
|
+
@property
|
|
287
|
+
def xp(self):
|
|
288
|
+
"""Numpy or Cupy"""
|
|
289
|
+
xp = np if not self.use_cupy else cp
|
|
290
|
+
return xp
|
|
291
|
+
|
|
292
|
+
def logpdf(self, x, keys=None):
|
|
293
|
+
"""Get logpdf by summing logpdf of individual distributions
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
x (double np.ndarray[..., ndim]):
|
|
297
|
+
Input parameters to get prior values.
|
|
298
|
+
keys (list, optional): List of keys related to which parameters to gather the logpdf for.
|
|
299
|
+
They must exactly match the input keys for the ``priors_in`` dictionary for the ``__init__``
|
|
300
|
+
function. Even when using this kwarg, must provide all ``ndim`` parameters as input. The prior will just not
|
|
301
|
+
be calculated if its associated key is not included. Default is ``None``.
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
np.ndarray[...]: Prior values.
|
|
305
|
+
|
|
306
|
+
"""
|
|
307
|
+
# TODO: check if mutliple index prior will work
|
|
308
|
+
|
|
309
|
+
# make sure at least 2D
|
|
310
|
+
if x.ndim == 1:
|
|
311
|
+
x = x[None, :]
|
|
312
|
+
squeeze = True
|
|
313
|
+
|
|
314
|
+
elif x.ndim != 2:
|
|
315
|
+
raise ValueError("x needs to 1 or 2 dimensional array.")
|
|
316
|
+
else:
|
|
317
|
+
squeeze = False
|
|
318
|
+
|
|
319
|
+
prior_vals = self.xp.zeros(x.shape[0])
|
|
320
|
+
|
|
321
|
+
# sum the logs (assumes parameters are independent)
|
|
322
|
+
for i, (inds, prior_i) in enumerate(self.priors):
|
|
323
|
+
|
|
324
|
+
if keys is not None:
|
|
325
|
+
if len(inds) > 1:
|
|
326
|
+
if tuple(inds) not in keys:
|
|
327
|
+
continue
|
|
328
|
+
else:
|
|
329
|
+
if inds[0] not in keys:
|
|
330
|
+
continue
|
|
331
|
+
|
|
332
|
+
vals_in = x[:, inds]
|
|
333
|
+
if hasattr(prior_i, "logpdf"):
|
|
334
|
+
temp = prior_i.logpdf(vals_in)
|
|
335
|
+
else:
|
|
336
|
+
temp = prior_i.logpmf(vals_in)
|
|
337
|
+
|
|
338
|
+
prior_vals += temp.squeeze()
|
|
339
|
+
|
|
340
|
+
# if only one walker was asked for, return a scalar value not an array
|
|
341
|
+
if squeeze:
|
|
342
|
+
prior_vals = prior_vals[0].item()
|
|
343
|
+
|
|
344
|
+
if self.use_cupy and not self.return_gpu:
|
|
345
|
+
return prior_vals.get()
|
|
346
|
+
|
|
347
|
+
return prior_vals
|
|
348
|
+
|
|
349
|
+
def ppf(self, x, groups=None):
|
|
350
|
+
"""Get logpdf by summing logpdf of individual distributions
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
x (double np.ndarray[..., ndim]):
|
|
354
|
+
Input parameters to get prior values.
|
|
355
|
+
|
|
356
|
+
Returns:
|
|
357
|
+
np.ndarray[...]: Prior values.
|
|
358
|
+
|
|
359
|
+
"""
|
|
360
|
+
raise NotImplementedError
|
|
361
|
+
if groups is not None:
|
|
362
|
+
raise NotImplementedError
|
|
363
|
+
|
|
364
|
+
# TODO: check if mutliple index prior will work
|
|
365
|
+
is_1d = x.ndim == 1
|
|
366
|
+
x = self.xp.atleast_2d(x)
|
|
367
|
+
out_vals = self.xp.zeros_like(x)
|
|
368
|
+
|
|
369
|
+
# sum the logs (assumes parameters are independent)
|
|
370
|
+
for i, (inds, prior_i) in enumerate(self.priors):
|
|
371
|
+
if len(inds) > 1:
|
|
372
|
+
raise NotImplementedError
|
|
373
|
+
|
|
374
|
+
vals_in = x[:, inds].squeeze()
|
|
375
|
+
temp = prior_i.ppf(vals_in)
|
|
376
|
+
|
|
377
|
+
out_vals[:, inds[0]] = temp
|
|
378
|
+
|
|
379
|
+
if is_1d:
|
|
380
|
+
return out_vals.squeeze()
|
|
381
|
+
|
|
382
|
+
if self.use_cupy and not self.return_gpu:
|
|
383
|
+
return out_vals.get()
|
|
384
|
+
|
|
385
|
+
return out_vals
|
|
386
|
+
|
|
387
|
+
def rvs(self, size=1, keys=None):
|
|
388
|
+
"""Generate random values according to prior distribution
|
|
389
|
+
|
|
390
|
+
The user will have to be careful if there are prior functions that
|
|
391
|
+
do not have an ``rvs`` method. This means that generated points may lay
|
|
392
|
+
inside the prior of all input priors that have ``rvs`` methods, but
|
|
393
|
+
outside the prior if priors without the ``rvs`` method are included.
|
|
394
|
+
|
|
395
|
+
Args:
|
|
396
|
+
size (int or tuple of ints, optional): Output size for number of generated
|
|
397
|
+
sources from prior distributions.
|
|
398
|
+
keys (list, optional): List of keys related to which parameters to generate.
|
|
399
|
+
They must exactly match the input keys for the ``priors_in`` dictionary for the ``__init__``
|
|
400
|
+
function. If used, it will produce and output array of ``tuple(size) + (len(keys),)``.
|
|
401
|
+
Default is ``None``.
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
np.ndarray[``size + (self.ndim,)``]: Generated samples.
|
|
405
|
+
|
|
406
|
+
Raises:
|
|
407
|
+
ValueError: If size is not an int or tuple.
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
"""
|
|
411
|
+
|
|
412
|
+
# adjust size if int
|
|
413
|
+
if isinstance(size, int):
|
|
414
|
+
size = (size,)
|
|
415
|
+
|
|
416
|
+
elif not isinstance(size, tuple):
|
|
417
|
+
raise ValueError("Size must be int or tuple of ints.")
|
|
418
|
+
|
|
419
|
+
# setup the slicing to properly sample points
|
|
420
|
+
out_inds = tuple([slice(None) for _ in range(len(size))])
|
|
421
|
+
|
|
422
|
+
# setup output and loop through priors
|
|
423
|
+
|
|
424
|
+
ndim = self.ndim
|
|
425
|
+
|
|
426
|
+
out = self.xp.zeros(size + (ndim,))
|
|
427
|
+
for i, (inds, prior_i) in enumerate(self.priors):
|
|
428
|
+
# only generate desired parameters
|
|
429
|
+
if keys is not None:
|
|
430
|
+
if len(inds) > 1:
|
|
431
|
+
if tuple(inds) not in keys:
|
|
432
|
+
continue
|
|
433
|
+
else:
|
|
434
|
+
if inds[0] not in keys:
|
|
435
|
+
continue
|
|
436
|
+
|
|
437
|
+
# guard against extra prior functions without rvs methods
|
|
438
|
+
if not hasattr(prior_i, "rvs"):
|
|
439
|
+
continue
|
|
440
|
+
# combines outer dimensions with indices of interest
|
|
441
|
+
inds_in = out_inds + (inds,)
|
|
442
|
+
|
|
443
|
+
# allows for proper adding of quantities to out array
|
|
444
|
+
if len(inds) == 1:
|
|
445
|
+
adjust_inds = out_inds + (None,)
|
|
446
|
+
out[inds_in] = prior_i.rvs(size=size)[adjust_inds]
|
|
447
|
+
else:
|
|
448
|
+
out[inds_in] = prior_i.rvs(size=size)
|
|
449
|
+
|
|
450
|
+
if self.use_cupy and not self.return_gpu:
|
|
451
|
+
return out.get()
|
|
452
|
+
return out
|