eryn 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
eryn/utils/utility.py ADDED
@@ -0,0 +1,329 @@
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import numpy as np
4
+ from scipy.special import logsumexp
5
+ import warnings
6
+
7
+
8
+ def groups_from_inds(inds):
9
+ """Convert inds to group information
10
+
11
+ Args:
12
+ inds (dict): Keys are ``branch_names`` and values are inds
13
+ np.ndarrays[ntemps, nwalkers, nleaves_max] that specify
14
+ which leaves are used in this step.
15
+
16
+ Returns:
17
+ dict: Dictionary with group information.
18
+ Keys are ``branch_names`` and values are
19
+ np.ndarray[total number of used leaves]. The array is flat.
20
+
21
+ """
22
+ # prepare output
23
+ groups = {}
24
+ for name, inds_temp in inds.items():
25
+
26
+ # shape information
27
+ ntemps, nwalkers, nleaves_max = inds_temp.shape
28
+ num_groups = ntemps * nwalkers
29
+
30
+ # place which group each active leaf belongs to along flattened array
31
+ group_id = np.repeat(
32
+ np.arange(num_groups).reshape(ntemps, nwalkers)[:, :, None],
33
+ nleaves_max,
34
+ axis=-1,
35
+ )
36
+
37
+ # fill new information
38
+ groups[name] = group_id[inds_temp]
39
+
40
+ return groups
41
+
42
+
43
+ def get_acf(x, axis=0, fast=False):
44
+ """
45
+ Estimate the autocorrelation function of a time series using the FFT.
46
+ :param x:
47
+ The time series. If multidimensional, set the time axis using the
48
+ ``axis`` keyword argument and the function will be computed for every
49
+ other axis.
50
+ :param axis: (optional)
51
+ The time axis of ``x``. Assumed to be the first axis if not specified.
52
+ :param fast: (optional)
53
+ If ``True``, only use the largest ``2^n`` entries for efficiency.
54
+ (default: False)
55
+ """
56
+
57
+ x = np.atleast_1d(x)
58
+ m = [
59
+ slice(None),
60
+ ] * len(x.shape)
61
+
62
+ # For computational efficiency, crop the chain to the largest power of
63
+ # two if requested.
64
+ if fast:
65
+ n = int(2 ** np.floor(np.log2(x.shape[axis])))
66
+ m[axis] = slice(0, n)
67
+ x = x
68
+ else:
69
+ n = x.shape[axis]
70
+
71
+ # Compute the FFT and then (from that) the auto-correlation function.
72
+ f = np.fft.fft(x - np.mean(x, axis=axis), n=2 * n, axis=axis)
73
+ m[axis] = slice(0, n)
74
+ acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[tuple(m)].real
75
+ m[axis] = 0
76
+ return acf / acf[tuple(m)]
77
+
78
+
79
+ def get_integrated_act(x, axis=0, window=50, fast=False, average=True):
80
+ """
81
+ Estimate the integrated autocorrelation time of a time series.
82
+ See `Sokal's notes on
83
+ MCMC and sample estimators for autocorrelation times.
84
+ :param x:
85
+ The time series. If multidimensional, set the time axis using the
86
+ ``axis`` keyword argument and the function will be computed for every
87
+ other axis.
88
+ :param axis: (optional)
89
+ The time axis of ``x``. Assumed to be the first axis if not specified.
90
+ :param window: (optional)
91
+ The size of the window to use. (default: 50)
92
+ :param fast: (optional)
93
+ If ``True``, only use the largest ``2^n`` entries for efficiency.
94
+ (default: False)
95
+ """
96
+
97
+ if axis != 0:
98
+ # TODO: need to check this
99
+ raise NotImplementedError
100
+
101
+ # Compute the autocorrelation function.
102
+ if isinstance(x, dict):
103
+ is_dict = True
104
+ ndim_total = 0
105
+ values_out = []
106
+ ind_breaks = []
107
+ for name, values in x.items():
108
+ nsteps, ntemps, nwalkers, nleaves_max, ndim = values.shape
109
+ ndim_total += ndim
110
+ ind_breaks.append(ndim_total)
111
+ values_out.append(values.reshape(nsteps, ntemps, nwalkers, -1))
112
+
113
+ x_in = np.concatenate(values_out, axis=-1)
114
+
115
+ elif isinstance(x, np.ndarray):
116
+ is_dict = False
117
+ x_in = x
118
+ else:
119
+ raise ValueError("x must be dictionary of np.ndarrays or an np.ndarray.")
120
+
121
+ f = get_acf(x_in, axis=axis, fast=fast)
122
+
123
+ # Special case 1D for simplicity.
124
+ if len(f.shape) == 1:
125
+ return 1 + 2 * np.sum(f[1:window])
126
+
127
+ # N-dimensional case.
128
+ m = [
129
+ slice(None),
130
+ ] * len(f.shape)
131
+ m[axis] = slice(1, window)
132
+ tau = 1 + 2 * np.sum(f[tuple(m)], axis=axis)
133
+
134
+ if average:
135
+ tau = np.average(tau, axis=1)
136
+
137
+ if is_dict:
138
+ splits = np.split(tau, ind_breaks, axis=-1)
139
+ out = {name: split for name, split in zip(x.keys(), splits)}
140
+
141
+ else:
142
+ out = tau
143
+
144
+ return out
145
+
146
+
147
+ def thermodynamic_integration_log_evidence(betas, logls):
148
+ """
149
+ Thermodynamic integration estimate of the evidence.
150
+
151
+ This function origindated in ``ptemcee``.
152
+
153
+ Args:
154
+ betas (np.ndarray[ntemps]): The inverse temperatures to use for the quadrature.
155
+ logls (np.ndarray[ntemps]): The mean log-Likelihoods corresponding to ``betas`` to use for
156
+ computing the thermodynamic evidence.
157
+ Returns:
158
+ tuple: ``(logZ, dlogZ)``:
159
+ Returns an estimate of the
160
+ log-evidence and the error associated with the finite
161
+ number of temperatures at which the posterior has been
162
+ sampled.
163
+
164
+ The evidence is the integral of the un-normalized posterior
165
+ over all of parameter space:
166
+ .. math::
167
+ Z \\equiv \\int d\\theta \\, l(\\theta) p(\\theta)
168
+ Thermodymanic integration is a technique for estimating the
169
+ evidence integral using information from the chains at various
170
+ temperatures. Let
171
+ .. math::
172
+ Z(\\beta) = \\int d\\theta \\, l^\\beta(\\theta) p(\\theta)
173
+ Then
174
+ .. math::
175
+ \\frac{d \\log Z}{d \\beta}
176
+ = \\frac{1}{Z(\\beta)} \\int d\\theta l^\\beta p \\log l
177
+ = \\left \\langle \\log l \\right \\rangle_\\beta
178
+ so
179
+ .. math::
180
+ \\log Z(1) - \\log Z(0)
181
+ = \\int_0^1 d\\beta \\left \\langle \\log l \\right\\rangle_\\beta
182
+ By computing the average of the log-likelihood at the
183
+ difference temperatures, the sampler can approximate the above
184
+ integral.
185
+
186
+ """
187
+
188
+ # make sure they are the same length
189
+ if len(betas) != len(logls):
190
+ raise ValueError("Need the same number of log(L) values as temperatures.")
191
+
192
+ # make sure they are in order
193
+ order = np.argsort(betas)[::-1]
194
+ betas = betas[order]
195
+ logls = logls[order]
196
+
197
+ betas0 = np.copy(betas)
198
+ if betas[-1] != 0.0:
199
+ betas = np.concatenate((betas0, [0.0]))
200
+ betas2 = np.concatenate((betas0[::2], [0.0]))
201
+
202
+ # Duplicate mean log-likelihood of hottest chain as a best guess for beta = 0.
203
+ logls2 = np.concatenate((logls[::2], [logls[-1]]))
204
+ logls = np.concatenate((logls, [logls[-1]]))
205
+ else:
206
+ betas2 = np.concatenate((betas0[:-1:2], [0.0]))
207
+ logls2 = np.concatenate((logls[:-1:2], [logls[-1]]))
208
+
209
+ # integrate by trapz
210
+ logZ = -np.trapz(logls, betas)
211
+ logZ2 = -np.trapz(logls2, betas2)
212
+ return logZ, np.abs(logZ - logZ2)
213
+
214
+
215
+ def stepping_stone_log_evidence(betas, logls, block_len=50, repeats=100):
216
+ """
217
+ Stepping stone approximation for the evidence calculation.
218
+
219
+ Based on
220
+ a. https://arxiv.org/abs/1810.04488 and
221
+ b. https://pubmed.ncbi.nlm.nih.gov/21187451/.
222
+
223
+ Args:
224
+ betas (np.ndarray[ntemps]): The inverse temperatures to use for the quadrature.
225
+ logls (np.ndarray[ntemps]): The mean log-Likelihoods corresponding to ``betas`` to use for
226
+ computing the thermodynamic evidence.
227
+ block_len (int): The length of each chain block to compute the evidence from. Useful for computing the error-bars.
228
+ repeats (int): The number of repeats to compute the evidence (using the block above).
229
+
230
+ Returns
231
+ tuple: ``(logZ, dlogZ)``:
232
+ Returns an estimate of the
233
+ log-evidence and the error associated with the finite
234
+ number of temperatures at which the posterior has been
235
+ sampled.
236
+ """
237
+
238
+ def calculate_stepping_stone(betas, logls):
239
+ n = logls.shape[0]
240
+ delta_betas = betas[1:] - betas[:-1]
241
+ n_T = betas.shape[0]
242
+ log_ratio = logsumexp(delta_betas * logls[:, :-1], axis=0) - np.log(n)
243
+ return np.sum(log_ratio), log_ratio
244
+
245
+ # make sure they are the same length
246
+ if len(betas) != logls.shape[1]:
247
+ raise ValueError(
248
+ "Need the log(L).shape[1] to be the same as the number of temperatures."
249
+ )
250
+
251
+ # make sure they are in order
252
+ order = np.argsort(betas)
253
+ betas = betas[order]
254
+ logls = logls[:, order, :]
255
+ logls = logls.reshape(-1, betas.shape[0]) # Get all samples per temperature
256
+ steps = logls.shape[0] # Get number of samples
257
+
258
+ logZ, _ = calculate_stepping_stone(betas, logls)
259
+
260
+ # Estimate the evidence uncertainty (Maturana-Russel et. al. (2019))
261
+ logZ_i = np.zeros(repeats)
262
+ try:
263
+ for i in range(repeats):
264
+ idxs = [
265
+ np.random.randint(i, i + block_len) for i in range(steps - block_len)
266
+ ]
267
+ logZ_i[i] = calculate_stepping_stone(betas, logls[idxs, :])[0]
268
+ dlogZ = np.std(logZ_i)
269
+ except ValueError:
270
+ warnings.warn(
271
+ "Warning: Failed to compute evidence uncertainty via Stepping Stone algorithm"
272
+ )
273
+ dlogZ = np.nan
274
+
275
+ return logZ, dlogZ
276
+
277
+
278
+ def psrf(C, ndims, per_walker=False):
279
+ """
280
+ The Gelman - Rubin convergence diagnostic.
281
+ A general approach to monitoring convergence of MCMC output of multiple walkers.
282
+ The function makes a comparison of within-chain and between-chain variances.
283
+ A large deviation between these two variances indicates non-convergence, and
284
+ the output [Rhat] deviates from unity.
285
+
286
+ By default, it combines the MCMC chains for all walkers, and then computes the
287
+ Rhat for the first and last 1/3 parts of the traces. This can be tuned with the
288
+ ``per_walker`` flag.
289
+
290
+ Based on
291
+ a. Brooks, SP. and Gelman, A. (1998) General methods for monitoring convergence
292
+ of iterative simulations. Journal of Computational and Graphical Statistics, 7, 434-455
293
+ b. Gelman, A and Rubin, DB (1992) Inference from iterative simulation using multiple sequences,
294
+ Statistical Science, 7, 457-511.
295
+
296
+ Args:
297
+ C (np.ndarray[nwalkers, ndim]): The parameter traces. The MCMC chains.
298
+ ndims (int): The dimensions
299
+ per_walker (bool, optional): Do the test on the combined chains, or using
300
+ each if the walkers separatelly.
301
+
302
+ Returns
303
+ tuple: ``(Rhat, neff)``:
304
+ Returns an estimate of the Gelman-Rubin convergence diagnostic ``Rhat``,
305
+ and the effective number od samples ``neff``.
306
+
307
+ Code taken from https://joergdietrich.github.io/emcee-convergence.html
308
+ """
309
+ if not per_walker:
310
+ # Split the complete chains into three parts and perform the
311
+ # diagnostic on the forst and last 1/3 of the chains.
312
+ C = C.reshape(-1, ndims)
313
+ n = int(np.floor(C[:, 0].shape[0] / 3))
314
+ c1 = C[0:n, :]
315
+ c2 = C[-n:, :]
316
+ C = np.zeros((2, c1.shape[0], c1.shape[1]))
317
+ C = np.array([c1, c2])
318
+
319
+ ssq = np.var(C, axis=1, ddof=1)
320
+ W = np.mean(ssq, axis=0)
321
+ θb = np.mean(C, axis=1)
322
+ θbb = np.mean(θb, axis=0)
323
+ m = C.shape[0]
324
+ nn = C.shape[1]
325
+ B = nn / (m - 1) * np.sum((θbb - θb) ** 2, axis=0)
326
+
327
+ var_θ = (nn - 1) / nn * W + 1 / nn * B
328
+ R̂ = np.sqrt(var_θ / W)
329
+ return R̂
@@ -0,0 +1,167 @@
1
+ Metadata-Version: 2.3
2
+ Name: eryn
3
+ Version: 1.2.0
4
+ Summary: Eryn: an omni-MCMC sampling package.
5
+ Author: Michael Katz
6
+ Author-email: Michael Katz <mikekatz04@gmail.com>
7
+ Classifier: License :: OSI Approved :: Apache Software License
8
+ Classifier: Natural Language :: English
9
+ Classifier: Programming Language :: C++
10
+ Classifier: Programming Language :: Cython
11
+ Classifier: Programming Language :: Python :: 3 :: Only
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Requires-Dist: exceptiongroup ; python_full_version < '3.11'
18
+ Requires-Dist: h5py
19
+ Requires-Dist: jsonschema
20
+ Requires-Dist: matplotlib
21
+ Requires-Dist: numpy
22
+ Requires-Dist: nvidia-ml-py
23
+ Requires-Dist: platformdirs
24
+ Requires-Dist: pydantic
25
+ Requires-Dist: pyyaml
26
+ Requires-Dist: requests
27
+ Requires-Dist: rich
28
+ Requires-Dist: scipy
29
+ Requires-Dist: tqdm
30
+ Requires-Dist: wrapt
31
+ Requires-Dist: ipykernel ; extra == 'doc'
32
+ Requires-Dist: ipython ; extra == 'doc'
33
+ Requires-Dist: ipywidgets ; extra == 'doc'
34
+ Requires-Dist: myst-parser ; extra == 'doc'
35
+ Requires-Dist: nbsphinx ; extra == 'doc'
36
+ Requires-Dist: pypandoc ; extra == 'doc'
37
+ Requires-Dist: sphinx ; extra == 'doc'
38
+ Requires-Dist: sphinx-rtd-theme ; extra == 'doc'
39
+ Requires-Dist: sphinx-tippy ; extra == 'doc'
40
+ Requires-Dist: corner ; extra == 'doc'
41
+ Requires-Dist: matplotlib ; extra == 'testing'
42
+ Requires-Dist: corner ; extra == 'testing'
43
+ Requires-Dist: chainconsumer ; extra == 'testing'
44
+ Requires-Python: >=3.9
45
+ Provides-Extra: doc
46
+ Provides-Extra: testing
47
+ Description-Content-Type: text/markdown
48
+
49
+ # Eryn: a multi-purpose MCMC sampler
50
+
51
+ Eryn is an advanced MCMC sampler. It has the capability to run with parallel tempering, multiple model types, and unknown counts within each model type using Reversible Jump MCMC techniques. Eryn is heavily based on [emcee](https://emcee.readthedocs.io/en/stable/). The `emcee` base structure with the Ensemble Sampler, State objects, proposal setup, and storage backends is carried over into Eryn with small changes to account for the increased complexity. In a simple sense, Eryn is an advanced (and slightly more complicated) version of `emcee`.
52
+
53
+ If you use Eryn in your publication, please cite the paper [arXiv:2303.02164](https://arxiv.org/abs/2303.02164), its [zenodo](https://zenodo.org/record/7705496#.ZAhzukJKjlw), and [emcee](https://emcee.readthedocs.io/en/stable/). The documentation for Eryn can be found here: [mikekatz04.github.io/Eryn](https://mikekatz04.github.io/Eryn). You will find the code on Github: [github.com/mikekatz04/Eryn](https://github.com/mikekatz04/Eryn).
54
+
55
+ ## Getting Started
56
+
57
+ Below is a quick set of instructions to get you started with `eryn`.
58
+
59
+ ```
60
+ pip install eryn
61
+ ```
62
+ To import eryn:
63
+
64
+ ```
65
+ from eryn.ensemble import EnsembleSampler
66
+ ```
67
+
68
+ See [examples notebook](https://github.com/mikekatz04/Eryn/blob/main/examples/Eryn_tutorial.ipynb) for more info. You can also navigate the [Documentation](https://mikekatz04.github.io/Eryn/html/index.html) pages.
69
+
70
+
71
+ ### Prerequisites
72
+
73
+ Eryn has only a few python-based dependencies: `tqdm`, `corner` for plotting, `numpy`, `matplotlib`.
74
+
75
+ ### Installing
76
+
77
+ If you are not planning to develop the code, you can just install the latest version with the pip installation technique given above. Otherwise, you can just clone the repo and run `pip install .` inside of the Eryn directory. To run tests on Eryn during development, you can run the following in the main Eryn directory:
78
+ ```
79
+ python -m unittest discover
80
+ ```
81
+
82
+
83
+ ## Running the Tests
84
+
85
+ In the main directory of the package run in the terminal:
86
+ ```
87
+ python -m unittest discover
88
+ ```
89
+
90
+
91
+ ## Contributing
92
+
93
+ Please read [CONTRIBUTING.md](CONTRIBUTING) for details on our code of conduct, and the process for submitting pull requests to us.
94
+
95
+ ## Versioning
96
+
97
+ We use [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/mikekatz04/Eryn/tags).
98
+
99
+ ## Citation
100
+
101
+ When using this package, please cite at minimum the following sources:
102
+
103
+ ```
104
+ @article{Karnesis:2023ras,
105
+ author = "Karnesis, Nikolaos and Katz, Michael L. and Korsakova, Natalia and Gair, Jonathan R. and Stergioulas, Nikolaos",
106
+ title = "{Eryn : A multi-purpose sampler for Bayesian inference}",
107
+ eprint = "2303.02164",
108
+ archivePrefix = "arXiv",
109
+ primaryClass = "astro-ph.IM",
110
+ month = "3",
111
+ year = "2023"
112
+ }
113
+
114
+ @software{michael_katz_2023_7705496,
115
+ author = {Michael Katz and
116
+ Nikolaos Karnesis and
117
+ Natalia Korsakova},
118
+ title = {mikekatz04/Eryn: first full release},
119
+ month = mar,
120
+ year = 2023,
121
+ publisher = {Zenodo},
122
+ version = {v1.0.0},
123
+ doi = {10.5281/zenodo.7705496},
124
+ url = {https://doi.org/10.5281/zenodo.7705496}
125
+ }
126
+
127
+ @ARTICLE{2013PASP..125..306F,
128
+ author = {{Foreman-Mackey}, Daniel and {Hogg}, David W. and {Lang}, Dustin and {Goodman}, Jonathan},
129
+ title = "{emcee: The MCMC Hammer}",
130
+ journal = {\pasp},
131
+ keywords = {Astrophysics - Instrumentation and Methods for Astrophysics, Physics - Computational Physics, Statistics - Computation},
132
+ year = 2013,
133
+ month = mar,
134
+ volume = {125},
135
+ number = {925},
136
+ pages = {306},
137
+ doi = {10.1086/670067},
138
+ archivePrefix = {arXiv},
139
+ eprint = {1202.3665},
140
+ primaryClass = {astro-ph.IM},
141
+ adsurl = {https://ui.adsabs.harvard.edu/abs/2013PASP..125..306F},
142
+ adsnote = {Provided by the SAO/NASA Astrophysics Data System}
143
+ }
144
+
145
+ ```
146
+
147
+ Depending on which proposals are used, you may be required to cite more sources. Please make sure you do this properly.
148
+
149
+ ## Authors
150
+
151
+ * **Michael Katz**
152
+ * Nikos Karnesis
153
+ * Natalia Korsakova
154
+ * Jonathan Gair
155
+
156
+ ### Contibutors
157
+
158
+ * Maybe you!
159
+
160
+ ## License
161
+
162
+ This project is licensed under the GNU License - see the [LICENSE.md](LICENSE) file for details.
163
+
164
+ ## Acknowledgments
165
+
166
+ * We wish to thank S. Babak, M. Le Jeune, S. Marsat, T. Littenberg, and N. Cornish for their useful comments and very fruitful discussions.
167
+ * N Stergioulas and N Karnesis acknowledge support from the Gr-PRODEX 2019 funding program (PEA 4000132310).
@@ -0,0 +1,39 @@
1
+ eryn/CMakeLists.txt,sha256=rs-_qMYpJryM_FyvERto4RgQQ_NV4lkYvFzCNU7vvFc,1736
2
+ eryn/__init__.py,sha256=eMxCEUQyqtaUM8zTr6kDCxeuFWpxZsfY41TefWUNHXI,821
3
+ eryn/backends/__init__.py,sha256=yRQszA4WSofDDsSpTsA1V9eNw-pLVO_qalP5wpKjyZQ,380
4
+ eryn/backends/backend.py,sha256=Gcl6qk8UI1QKK74C8Kr9QYASWzuKLJhxPOj9OyiDSFI,46906
5
+ eryn/backends/hdfbackend.py,sha256=njW1KA2Anw9zxpLTYLkpNErNRBgNMA4VKidZXidkh-A,29414
6
+ eryn/ensemble.py,sha256=EPmlGl1-FoXqZ0Xi9iYdhn6INHiaViVFHh_QM_TlYfM,71743
7
+ eryn/git_version.py.in,sha256=dZ5WklaoF4dDsCVqhgw5jwr3kJCc8zjRX_LR90byZOw,139
8
+ eryn/model.py,sha256=5TeWTI6V-Xcuy5C2LI6AmtZZU-EkRSSuA7VojXNALk8,284
9
+ eryn/moves/__init__.py,sha256=9pWsSZSKLt05Ihd46vPASHwotTOHOPk_zEsCm8jWiw8,1081
10
+ eryn/moves/combine.py,sha256=YfIiRqObi11qBbTgqRQ3nMBr6a-ugGGBd1VgPSEosx4,4545
11
+ eryn/moves/delayedrejection.py,sha256=deaPPwNG2nKz-FAvi5NVTsrcoKONhy-LD15FLN0iLpY,7645
12
+ eryn/moves/distgen.py,sha256=jNTxL23KSradICJydeTsUcnE7BqMDTPmzuGh4ydQGkQ,3935
13
+ eryn/moves/distgenrj.py,sha256=szTgY1VYriJ1YYDJEOYT_kkjewk42BoXgQeZc41CO_c,9133
14
+ eryn/moves/gaussian.py,sha256=5No4tN3PFzkx5JugLwX8LboSXBt2DOHU8xegtj5zOGo,6853
15
+ eryn/moves/group.py,sha256=sm1iUvHJwuk_dvtmBTbmhZBbB_Esxl_4f2h0Ofx5p7s,10100
16
+ eryn/moves/groupstretch.py,sha256=tMXeCauYy_AyAhWM5kpgOcoQRdKLJYx2h85Tdfq6WLk,3920
17
+ eryn/moves/mh.py,sha256=63xvBRk6iNLn6EZmGb1W_buLmNw3WS7ch4kSK7yGfeE,6517
18
+ eryn/moves/move.py,sha256=8djVzFeLcpwh7hhu1sNTTFBiVDHrUPWc0TsN_FCPmRs,28952
19
+ eryn/moves/mtdistgen.py,sha256=ZCPY5lUgoyVcmPLl6jl5boBjakp4DzWfObQyo_MmOVA,5347
20
+ eryn/moves/mtdistgenrj.py,sha256=MP1KL796WcFWazys20ZXlTSzSISh61VU1jOArJz-b6U,8102
21
+ eryn/moves/multipletry.py,sha256=KSR6H7YPy5v-qxALt4X3jlGbs9UTLfTSLkTpK2Qcpgk,30156
22
+ eryn/moves/red_blue.py,sha256=UM9PSJi4H1f8oN-1ZCIevvnpt91Xgnlm-KY7LRFAOOU,13267
23
+ eryn/moves/rj.py,sha256=6krjJ5EsvgLZMTMgE9rStjjKtBIW6nw87ywRYbYtROU,15915
24
+ eryn/moves/stretch.py,sha256=auKjeN5elf9fqLR1-oDeR0pF1vdaRnKJEQcpI0mLgVU,8242
25
+ eryn/moves/tempering.py,sha256=e2doT8jVWSuaPpVUKIkWQjRe20T0i98w70wi-dz7buo,23977
26
+ eryn/pbar.py,sha256=uDDn8dMVHLD6EqZyk6vGhkOQwxgFm21Us9dz-nZE4oI,1330
27
+ eryn/prior.py,sha256=x4E5NS4v7Odag7a30OXQ-kJuoU3a6M6JnJuKlWGO6F4,14393
28
+ eryn/state.py,sha256=x4HZNrGhxnR6Ia2JrVskJGDS1Uk3AgQHgxJ4384Hpzs,31456
29
+ eryn/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
+ eryn/tests/test_eryn.py,sha256=JTac0NdiBfTa2-l8z0Q6S5oxr-C-UzH0uNTOE61jVFY,39792
31
+ eryn/utils/__init__.py,sha256=HzlQs1wg3J1xdrZjIMO34QHd0ZT58SQFCKEdclj7vpM,250
32
+ eryn/utils/periodic.py,sha256=Q07HKMNeUN8V_rauUjT7fKRwlYOd2AFsa9DekuRYUbk,4135
33
+ eryn/utils/stopping.py,sha256=fX1np10U3B-fpI3dGqEPZfqeYt8dc0x3PQGwrvYbbFU,5095
34
+ eryn/utils/transform.py,sha256=wzOYow7xHjqVOi8ZQDXBeoFj9y53cCtIeLggrQuo_sc,8895
35
+ eryn/utils/updates.py,sha256=U3T9UxPLabJzJuuB9s2OuX3vMD_2P7486SkgaFEkbLw,2137
36
+ eryn/utils/utility.py,sha256=Mxmx-XoLe5tEdzc2QuprpOd3u4Z2aYmR4aDVWRi6Jsk,11151
37
+ eryn-1.2.0.dist-info/WHEEL,sha256=pFCy50wRV2h7SjJ35YOsQUupaV45rMdgpNIvnXbG5bE,79
38
+ eryn-1.2.0.dist-info/METADATA,sha256=8KRio9HsI9zew0k5-keZndsrWuihfm0_az-IgmI6_is,6256
39
+ eryn-1.2.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.8.19
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any