westpa 2022.10__cp312-cp312-macosx_11_0_arm64.whl → 2022.12__cp312-cp312-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- westpa/_version.py +3 -3
- westpa/cli/core/w_truncate.py +15 -6
- westpa/cli/tools/w_assign.py +4 -4
- westpa/cli/tools/w_fluxanl.py +1 -3
- westpa/cli/tools/w_ntop.py +2 -2
- westpa/cli/tools/w_red.py +7 -2
- westpa/core/binning/_assign.cpython-312-darwin.so +0 -0
- westpa/core/binning/assign.py +11 -5
- westpa/core/binning/mab.py +352 -273
- westpa/core/data_manager.py +3 -3
- westpa/core/h5io.py +2 -2
- westpa/core/kinetics/_kinetics.cpython-312-darwin.so +0 -0
- westpa/core/kinetics/matrates.py +1 -1
- westpa/core/propagators/executable.py +11 -7
- westpa/core/reweight/_reweight.cpython-312-darwin.so +0 -0
- westpa/core/sim_manager.py +9 -4
- westpa/core/states.py +7 -7
- westpa/core/we_driver.py +4 -2
- westpa/fasthist/_fasthist.cpython-312-darwin.so +0 -0
- westpa/mclib/__init__.py +10 -3
- westpa/mclib/_mclib.cpython-312-darwin.so +0 -0
- westpa/oldtools/aframe/mcbs.py +9 -2
- westpa/oldtools/aframe/plotting.py +4 -4
- westpa/oldtools/cmds/w_ttimes.py +4 -1
- westpa/oldtools/stats/edfs.py +1 -1
- westpa/oldtools/stats/mcbs.py +9 -2
- westpa/trajtree/_trajtree.cpython-312-darwin.so +0 -0
- westpa/westext/stringmethod/string_method.py +1 -1
- westpa/westext/weed/ProbAdjustEquil.py +2 -2
- westpa/westext/weed/weed_driver.py +10 -0
- westpa/westext/wess/wess_driver.py +10 -0
- {westpa-2022.10.dist-info → westpa-2022.12.dist-info}/AUTHORS +8 -8
- {westpa-2022.10.dist-info → westpa-2022.12.dist-info}/METADATA +31 -21
- {westpa-2022.10.dist-info → westpa-2022.12.dist-info}/RECORD +38 -39
- {westpa-2022.10.dist-info → westpa-2022.12.dist-info}/WHEEL +2 -1
- westpa/fasthist/__main__.py +0 -110
- {westpa-2022.10.dist-info → westpa-2022.12.dist-info}/LICENSE +0 -0
- {westpa-2022.10.dist-info → westpa-2022.12.dist-info}/entry_points.txt +0 -0
- {westpa-2022.10.dist-info → westpa-2022.12.dist-info}/top_level.txt +0 -0
westpa/core/data_manager.py
CHANGED
|
@@ -178,7 +178,7 @@ istate_dtype = np.dtype(
|
|
|
178
178
|
('basis_state_id', seg_id_dtype), # Which basis state this state was generated from
|
|
179
179
|
('istate_type', istate_type_dtype), # What type this initial state is (generated or basis)
|
|
180
180
|
('istate_status', istate_status_dtype), # Whether this initial state is ready to go
|
|
181
|
-
('basis_auxref', vstr_dtype),
|
|
181
|
+
('basis_auxref', vstr_dtype), # for start states to point back to their original location
|
|
182
182
|
]
|
|
183
183
|
)
|
|
184
184
|
|
|
@@ -1549,7 +1549,7 @@ def create_dataset_from_dsopts(group, dsopts, shape=None, dtype=None, data=None,
|
|
|
1549
1549
|
# dsopts['file'] = str(dsopts['file']).format(n_iter=n_iter)
|
|
1550
1550
|
h5_auxfile = h5io.WESTPAH5File(dsopts['file'].format(n_iter=n_iter))
|
|
1551
1551
|
h5group = group
|
|
1552
|
-
if
|
|
1552
|
+
if ("iter_" + str(n_iter).zfill(8)) not in h5_auxfile:
|
|
1553
1553
|
h5_auxfile.create_group("iter_" + str(n_iter).zfill(8))
|
|
1554
1554
|
group = h5_auxfile[('/' + "iter_" + str(n_iter).zfill(8))]
|
|
1555
1555
|
|
|
@@ -1652,7 +1652,7 @@ def create_dataset_from_dsopts(group, dsopts, shape=None, dtype=None, data=None,
|
|
|
1652
1652
|
if 'file' in list(dsopts.keys()):
|
|
1653
1653
|
import h5py
|
|
1654
1654
|
|
|
1655
|
-
if
|
|
1655
|
+
if dsopts['h5path'] not in h5group:
|
|
1656
1656
|
h5group[dsopts['h5path']] = h5py.ExternalLink(
|
|
1657
1657
|
dsopts['file'].format(n_iter=n_iter), ("/" + "iter_" + str(n_iter).zfill(8) + "/" + dsopts['h5path'])
|
|
1658
1658
|
)
|
westpa/core/h5io.py
CHANGED
|
@@ -344,10 +344,10 @@ def label_axes(h5object, labels, units=None):
|
|
|
344
344
|
if len(units) and len(units) != len(labels):
|
|
345
345
|
raise ValueError('number of units labels does not match number of axes')
|
|
346
346
|
|
|
347
|
-
h5object.attrs['axis_labels'] = np.array([np.
|
|
347
|
+
h5object.attrs['axis_labels'] = np.array([np.bytes_(i) for i in labels])
|
|
348
348
|
|
|
349
349
|
if len(units):
|
|
350
|
-
h5object.attrs['axis_units'] = np.array([np.
|
|
350
|
+
h5object.attrs['axis_units'] = np.array([np.bytes_(i) for i in units])
|
|
351
351
|
|
|
352
352
|
|
|
353
353
|
NotGiven = object()
|
|
Binary file
|
westpa/core/kinetics/matrates.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import os
|
|
3
3
|
import shutil
|
|
4
|
-
import random
|
|
5
4
|
import signal
|
|
6
5
|
import subprocess
|
|
7
6
|
import sys
|
|
@@ -12,6 +11,7 @@ import pickle
|
|
|
12
11
|
from io import BytesIO
|
|
13
12
|
|
|
14
13
|
import numpy as np
|
|
14
|
+
from numpy.random import MT19937, Generator
|
|
15
15
|
|
|
16
16
|
import westpa
|
|
17
17
|
from westpa.core.extloader import get_object
|
|
@@ -228,6 +228,9 @@ class ExecutablePropagator(WESTPropagator):
|
|
|
228
228
|
self.initial_state_ref_template = config['west', 'data', 'data_refs', 'initial_state']
|
|
229
229
|
store_h5 = config.get(['west', 'data', 'data_refs', 'iteration']) is not None
|
|
230
230
|
|
|
231
|
+
# Create a persistent RNG for each worker
|
|
232
|
+
self.rng = Generator(MT19937())
|
|
233
|
+
|
|
231
234
|
# Load additional environment variables for all child processes
|
|
232
235
|
self.addtl_child_environ.update({k: str(v) for k, v in (config['west', 'executable', 'environ'] or {}).items()})
|
|
233
236
|
|
|
@@ -278,7 +281,7 @@ class ExecutablePropagator(WESTPropagator):
|
|
|
278
281
|
self.data_info['log'] = {'name': 'seglog', 'loader': seglog_loader, 'enabled': store_h5, 'filename': None, 'dir': False}
|
|
279
282
|
|
|
280
283
|
# Grab config from west.executable.datasets, else fallback to west.data.datasets.
|
|
281
|
-
dataset_configs = config.get(["west", "executable", "datasets"]
|
|
284
|
+
dataset_configs = config.get(["west", "executable", "datasets"]) or config.get(['west', 'data', 'datasets'], {})
|
|
282
285
|
for dsinfo in dataset_configs:
|
|
283
286
|
try:
|
|
284
287
|
dsname = dsinfo['name']
|
|
@@ -332,11 +335,11 @@ class ExecutablePropagator(WESTPropagator):
|
|
|
332
335
|
``subprocess.Popen()``. Every child process executed by ``exec_child()`` gets these.'''
|
|
333
336
|
|
|
334
337
|
return {
|
|
335
|
-
self.ENV_RAND16: str(
|
|
336
|
-
self.ENV_RAND32: str(
|
|
337
|
-
self.ENV_RAND64: str(
|
|
338
|
-
self.ENV_RAND128: str(
|
|
339
|
-
self.ENV_RANDFLOAT: str(
|
|
338
|
+
self.ENV_RAND16: str(self.rng.integers(2**16, dtype=np.uint16)),
|
|
339
|
+
self.ENV_RAND32: str(self.rng.integers(2**32, dtype=np.uint32)),
|
|
340
|
+
self.ENV_RAND64: str(self.rng.integers(2**64, dtype=np.uint64)),
|
|
341
|
+
self.ENV_RAND128: str(int(self.rng.integers(2**64, dtype=np.uint64)) + int(self.rng.integers(2**64, dtype=np.uint64))),
|
|
342
|
+
self.ENV_RANDFLOAT: str(self.rng.random()),
|
|
340
343
|
}
|
|
341
344
|
|
|
342
345
|
def exec_child(self, executable, environ=None, stdin=None, stdout=None, stderr=None, cwd=None):
|
|
@@ -375,6 +378,7 @@ class ExecutablePropagator(WESTPropagator):
|
|
|
375
378
|
# Do a subprocess.Popen.wait() to let the Popen instance (and subprocess module) know that
|
|
376
379
|
# we are done with the process, and to get a more friendly return code
|
|
377
380
|
rc = proc.wait()
|
|
381
|
+
|
|
378
382
|
return (rc, rusage)
|
|
379
383
|
|
|
380
384
|
def exec_child_from_child_info(self, child_info, template_args, environ):
|
|
Binary file
|
westpa/core/sim_manager.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import math
|
|
3
3
|
import operator
|
|
4
|
-
import random
|
|
5
4
|
import time
|
|
6
5
|
from datetime import timedelta
|
|
7
6
|
from pickle import PickleError
|
|
@@ -9,6 +8,7 @@ from itertools import zip_longest
|
|
|
9
8
|
from collections import Counter
|
|
10
9
|
|
|
11
10
|
import numpy as np
|
|
11
|
+
from numpy.random import Generator, MT19937
|
|
12
12
|
|
|
13
13
|
import westpa
|
|
14
14
|
from .data_manager import weight_dtype
|
|
@@ -98,6 +98,9 @@ class WESimManager:
|
|
|
98
98
|
# Tracking of binning
|
|
99
99
|
self.bin_mapper_hash = None # Hash of bin mapper from most recently-run WE, for use by post-WE analysis plugins
|
|
100
100
|
|
|
101
|
+
# Pseudo Random Number Generator
|
|
102
|
+
self.rng = Generator(MT19937())
|
|
103
|
+
|
|
101
104
|
def register_callback(self, hook, function, priority=0):
|
|
102
105
|
'''Registers a callback to execute during the given ``hook`` into the simulation loop. The optional
|
|
103
106
|
priority is used to order when the function is called relative to other registered callbacks.'''
|
|
@@ -229,13 +232,15 @@ class WESimManager:
|
|
|
229
232
|
|
|
230
233
|
def get_bstate_pcoords(self, basis_states, label='basis'):
|
|
231
234
|
'''For each of the given ``basis_states``, calculate progress coordinate values
|
|
232
|
-
as necessary. The HDF5 file is not updated.
|
|
235
|
+
as necessary. The HDF5 file is not updated. The BasisState objects are explicitly
|
|
236
|
+
copied from the futures in order to retain auxdata/restart files (under BasisState.data)
|
|
237
|
+
from certain work managers (e.g., the ``processes`` work manager.)'''
|
|
233
238
|
|
|
234
239
|
self.rc.pstatus('Calculating progress coordinate values for {} states.'.format(label))
|
|
235
240
|
futures = [self.work_manager.submit(wm_ops.get_pcoord, args=(basis_state,)) for basis_state in basis_states]
|
|
236
241
|
fmap = {future: i for (i, future) in enumerate(futures)}
|
|
237
242
|
for future in self.work_manager.as_completed(futures):
|
|
238
|
-
basis_states[fmap[future]]
|
|
243
|
+
basis_states[fmap[future]] = future.get_result()
|
|
239
244
|
|
|
240
245
|
def report_basis_states(self, basis_states, label='basis'):
|
|
241
246
|
pstatus = self.rc.pstatus
|
|
@@ -554,7 +559,7 @@ class WESimManager:
|
|
|
554
559
|
updated_states = []
|
|
555
560
|
for _i in range(n_istates_needed):
|
|
556
561
|
# Select a basis state according to its weight
|
|
557
|
-
ibstate = np.digitize([
|
|
562
|
+
ibstate = np.digitize([self.rng.random()], self.next_iter_bstate_cprobs)
|
|
558
563
|
basis_state = self.next_iter_bstates[ibstate[0]]
|
|
559
564
|
initial_state = self.data_manager.create_initial_states(1, n_iter=self.n_iter + 1)[0]
|
|
560
565
|
initial_state.iter_created = self.n_iter
|
westpa/core/states.py
CHANGED
|
@@ -20,7 +20,7 @@ class BasisState:
|
|
|
20
20
|
'''
|
|
21
21
|
|
|
22
22
|
def __init__(self, label, probability, pcoord=None, auxref=None, state_id=None):
|
|
23
|
-
self.label = str(label, encoding="UTF-8") if
|
|
23
|
+
self.label = str(label, encoding="UTF-8") if isinstance(label, bytes) else label
|
|
24
24
|
self.probability = probability
|
|
25
25
|
self.pcoord = np.atleast_1d(pcoord)
|
|
26
26
|
self.auxref = auxref
|
|
@@ -28,7 +28,7 @@ class BasisState:
|
|
|
28
28
|
self.data = {}
|
|
29
29
|
|
|
30
30
|
def __repr__(self):
|
|
31
|
-
return '{} state_id={self.state_id!
|
|
31
|
+
return '{} state_id={self.state_id!s} label={self.label!s} prob={self.probability!s} pcoord={self.pcoord!s}>'.format(
|
|
32
32
|
object.__repr__(self)[:-1], self=self
|
|
33
33
|
)
|
|
34
34
|
|
|
@@ -43,7 +43,7 @@ class BasisState:
|
|
|
43
43
|
max_auxref_len = max(8, max(len(state.auxref or '') for state in states))
|
|
44
44
|
fmt = (
|
|
45
45
|
'{state.label:<{max_label_len}s} {state.probability:12.7g} {auxref_str:<{max_auxref_len}s}'
|
|
46
|
-
' # state_id={state_id_str:s} pcoord={pcoord_str}\n'
|
|
46
|
+
' # state_id={state_id_str:s} pcoord={pcoord_str:s}\n'
|
|
47
47
|
)
|
|
48
48
|
fileobj.write(
|
|
49
49
|
'# {:{max_label_len}s} {:>12s} {:{max_auxref_len}s}\n'.format(
|
|
@@ -52,7 +52,7 @@ class BasisState:
|
|
|
52
52
|
)
|
|
53
53
|
for state in states:
|
|
54
54
|
state_id_str = str(state.state_id) if state.state_id is not None else 'None'
|
|
55
|
-
pcoord_str = str(
|
|
55
|
+
pcoord_str = str(state.pcoord.tolist())
|
|
56
56
|
auxref_str = 'None' if state.auxref == b'' or state.auxref == "b''" else str(state.auxref)
|
|
57
57
|
fileobj.write(
|
|
58
58
|
fmt.format(
|
|
@@ -99,7 +99,7 @@ class BasisState:
|
|
|
99
99
|
try:
|
|
100
100
|
probability = float(fields[1])
|
|
101
101
|
except ValueError:
|
|
102
|
-
raise ValueError('invalid probability ({!
|
|
102
|
+
raise ValueError('invalid probability ({!s}) {} line {:d}'.format(fields[1], statefile, lineno))
|
|
103
103
|
|
|
104
104
|
try:
|
|
105
105
|
auxref = fields[2].strip()
|
|
@@ -201,7 +201,7 @@ class InitialState:
|
|
|
201
201
|
self.data = {}
|
|
202
202
|
|
|
203
203
|
def __repr__(self):
|
|
204
|
-
return '{} state_id={self.state_id!
|
|
204
|
+
return '{} state_id={self.state_id!s} istate_type={self.istate_type!s} basis_state_id={self.basis_state_id!s} iter_created={self.iter_created!s} pcoord={self.pcoord!s}>'.format(
|
|
205
205
|
object.__repr__(self)[:-1], self=self
|
|
206
206
|
)
|
|
207
207
|
|
|
@@ -263,7 +263,7 @@ class TargetState:
|
|
|
263
263
|
self.state_id = state_id
|
|
264
264
|
|
|
265
265
|
def __repr__(self):
|
|
266
|
-
return '{} state_id={self.state_id!
|
|
266
|
+
return '{} state_id={self.state_id!s} label={self.label!s} pcoord={self.pcoord!s}>'.format(
|
|
267
267
|
object.__repr__(self)[:-1], self=self
|
|
268
268
|
)
|
|
269
269
|
|
westpa/core/we_driver.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import math
|
|
3
3
|
import operator
|
|
4
|
-
import random
|
|
5
4
|
|
|
6
5
|
import numpy as np
|
|
6
|
+
from numpy.random import Generator, MT19937
|
|
7
7
|
|
|
8
8
|
import westpa
|
|
9
9
|
from .segment import Segment
|
|
@@ -119,6 +119,8 @@ class WEDriver:
|
|
|
119
119
|
|
|
120
120
|
self.avail_initial_states = None
|
|
121
121
|
|
|
122
|
+
self.rng = Generator(MT19937())
|
|
123
|
+
|
|
122
124
|
# Make property for subgrouping function.
|
|
123
125
|
self.subgroup_function = _group_walkers_identity
|
|
124
126
|
self.subgroup_function_kwargs = {}
|
|
@@ -478,7 +480,7 @@ class WEDriver:
|
|
|
478
480
|
# sees where this value falls among the (sorted) weights of the segments being merged;
|
|
479
481
|
# this ensures that a walker with (e.g.) twice the weight of its brethren has twice the
|
|
480
482
|
# probability of having its history selected for continuation
|
|
481
|
-
iparent = np.digitize((
|
|
483
|
+
iparent = np.digitize((self.rng.uniform(0, glom.weight),), cumul_weight)[0]
|
|
482
484
|
gparent_seg = segments[iparent]
|
|
483
485
|
|
|
484
486
|
# Inherit history from this segment ("gparent" stands for "glom parent", as opposed to historical
|
|
Binary file
|
westpa/mclib/__init__.py
CHANGED
|
@@ -2,10 +2,15 @@
|
|
|
2
2
|
statistics.'''
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
|
+
from numpy.random import Generator, MT19937
|
|
5
6
|
|
|
6
7
|
from ._mclib import mcbs_correltime, get_bssize, mcbs_ci
|
|
7
8
|
|
|
8
9
|
|
|
10
|
+
def msort(input_array):
|
|
11
|
+
return np.sort(input_array, axis=0)
|
|
12
|
+
|
|
13
|
+
|
|
9
14
|
def mcbs_ci_correl(
|
|
10
15
|
estimator_datasets,
|
|
11
16
|
estimator,
|
|
@@ -98,6 +103,8 @@ def mcbs_ci_correl(
|
|
|
98
103
|
# We don't try and pretend we're doing any error analysis.
|
|
99
104
|
return return_set, return_set, return_set, 0, 1
|
|
100
105
|
|
|
106
|
+
rng = Generator(MT19937()) # RNG
|
|
107
|
+
|
|
101
108
|
# We need to pre-generate the data; why not do it here? We're already set up for it...
|
|
102
109
|
precalc_kwargs = estimator_kwargs.copy()
|
|
103
110
|
precalc_kwargs['stride'] = 1
|
|
@@ -144,10 +151,10 @@ def mcbs_ci_correl(
|
|
|
144
151
|
n_sets=n_sets,
|
|
145
152
|
args=args,
|
|
146
153
|
kwargs=estimator_kwargs,
|
|
147
|
-
sort=
|
|
154
|
+
sort=msort,
|
|
148
155
|
) + (correl_len,)
|
|
149
156
|
else:
|
|
150
|
-
subsample = subsample or (lambda x: x[
|
|
157
|
+
subsample = subsample or (lambda x: x[rng.integers(len(x))])
|
|
151
158
|
# Let's make sure we decimate every array properly...
|
|
152
159
|
decim_list = {}
|
|
153
160
|
for key, dset in estimator_datasets.items():
|
|
@@ -175,7 +182,7 @@ def mcbs_ci_correl(
|
|
|
175
182
|
n_sets=n_sets,
|
|
176
183
|
args=args,
|
|
177
184
|
kwargs=estimator_kwargs,
|
|
178
|
-
sort=
|
|
185
|
+
sort=msort,
|
|
179
186
|
) + (correl_len,)
|
|
180
187
|
|
|
181
188
|
|
|
Binary file
|
westpa/oldtools/aframe/mcbs.py
CHANGED
|
@@ -6,6 +6,7 @@ import logging
|
|
|
6
6
|
import math
|
|
7
7
|
|
|
8
8
|
import numpy as np
|
|
9
|
+
from numpy.random import Generator, MT19937
|
|
9
10
|
|
|
10
11
|
import westpa
|
|
11
12
|
from westpa.oldtools.aframe import AnalysisMixin
|
|
@@ -13,6 +14,10 @@ from westpa.oldtools.aframe import AnalysisMixin
|
|
|
13
14
|
log = logging.getLogger(__name__)
|
|
14
15
|
|
|
15
16
|
|
|
17
|
+
def msort(input_array):
|
|
18
|
+
return np.sort(input_array, axis=0)
|
|
19
|
+
|
|
20
|
+
|
|
16
21
|
class MCBSMixin(AnalysisMixin):
|
|
17
22
|
def __init__(self):
|
|
18
23
|
super().__init__()
|
|
@@ -96,12 +101,14 @@ def bootstrap_ci_ll(estimator, data, alpha, n_sets, storage, sort, eargs=(), ekw
|
|
|
96
101
|
value ``fhat`` of the estimator must be pre-calculated to allocate ``storage``, then its value may be
|
|
97
102
|
passed; otherwise, ``estimator(data,*eargs,**kwargs)`` will be called to calculate it.'''
|
|
98
103
|
|
|
104
|
+
rng = Generator(MT19937())
|
|
105
|
+
|
|
99
106
|
if fhat is None:
|
|
100
107
|
fhat = estimator(data, *eargs, **ekwargs)
|
|
101
108
|
dlen = len(data)
|
|
102
109
|
|
|
103
110
|
for iset in range(n_sets):
|
|
104
|
-
indices =
|
|
111
|
+
indices = rng.integers(dlen, size=(dlen,))
|
|
105
112
|
storage[iset] = estimator(data[indices], *eargs, **ekwargs)
|
|
106
113
|
|
|
107
114
|
synth_sorted = sort(storage)
|
|
@@ -117,7 +124,7 @@ def bootstrap_ci_ll(estimator, data, alpha, n_sets, storage, sort, eargs=(), ekw
|
|
|
117
124
|
del fhat, lb, ub, indices
|
|
118
125
|
|
|
119
126
|
|
|
120
|
-
def bootstrap_ci(estimator, data, alpha, n_sets=None, sort=
|
|
127
|
+
def bootstrap_ci(estimator, data, alpha, n_sets=None, sort=msort, eargs=(), ekwargs={}):
|
|
121
128
|
'''Perform a Monte Carlo bootstrap of a (1-alpha) confidence interval for the given ``estimator``.
|
|
122
129
|
Returns (fhat, ci_lower, ci_upper), where fhat is the result of ``estimator(data, *eargs, **ekwargs)``,
|
|
123
130
|
and ``ci_lower`` and ``ci_upper`` are the lower and upper bounds of the surrounding confidence
|
|
@@ -66,10 +66,10 @@ else:
|
|
|
66
66
|
cm_pdr = matplotlib.colors.LinearSegmentedColormap('pdr', _pdr_data, 2048)
|
|
67
67
|
cm_pdr_r = cm_pdr.reversed()
|
|
68
68
|
|
|
69
|
-
matplotlib.
|
|
70
|
-
matplotlib.
|
|
71
|
-
matplotlib.
|
|
72
|
-
matplotlib.
|
|
69
|
+
matplotlib.colormaps.register(cmap=cm_pdr, name='pdr')
|
|
70
|
+
matplotlib.colormaps.register(cmap=cm_pdr_r, name='pdr_r')
|
|
71
|
+
matplotlib.colormaps.register(cmap=cm_hovmol, name='hovmol')
|
|
72
|
+
matplotlib.colormaps.register(cmap=cm_hovmol_r, name='hovmol_r')
|
|
73
73
|
|
|
74
74
|
del cmap_data
|
|
75
75
|
|
westpa/oldtools/cmds/w_ttimes.py
CHANGED
|
@@ -2,6 +2,7 @@ import argparse
|
|
|
2
2
|
import logging
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
|
+
from numpy.random import Generator, MT19937
|
|
5
6
|
|
|
6
7
|
import westpa
|
|
7
8
|
|
|
@@ -42,6 +43,8 @@ class WTTimesBase:
|
|
|
42
43
|
self.fluxes = None
|
|
43
44
|
self.rates = None
|
|
44
45
|
|
|
46
|
+
self.rng = Generator(MT19937())
|
|
47
|
+
|
|
45
48
|
def add_args(self, parser, upcall=True):
|
|
46
49
|
'''Add arguments to a parser common to all analyses of this type.'''
|
|
47
50
|
if upcall:
|
|
@@ -162,7 +165,7 @@ class WTTimesBase:
|
|
|
162
165
|
end='',
|
|
163
166
|
)
|
|
164
167
|
westpa.rc.pflush()
|
|
165
|
-
indices =
|
|
168
|
+
indices = self.rng.integers(dlen, size=(dlen,))
|
|
166
169
|
# syn_weights = trans_weights[indices]
|
|
167
170
|
# syn_durations = trans_durations[indices]
|
|
168
171
|
# syn_fpts = trans_fpts[indices]
|
westpa/oldtools/stats/edfs.py
CHANGED
|
@@ -42,7 +42,7 @@ class EDF:
|
|
|
42
42
|
raise TypeError('values and weights have different lengths')
|
|
43
43
|
|
|
44
44
|
# Sort values
|
|
45
|
-
sort_indices = numpy.argsort(values)
|
|
45
|
+
sort_indices = numpy.argsort(values, kind='stable')
|
|
46
46
|
values = values[sort_indices]
|
|
47
47
|
weights = weights[sort_indices]
|
|
48
48
|
|
westpa/oldtools/stats/mcbs.py
CHANGED
|
@@ -5,6 +5,11 @@ Tools for Monte Carlo bootstrap error analysis
|
|
|
5
5
|
import math
|
|
6
6
|
|
|
7
7
|
import numpy as np
|
|
8
|
+
from numpy.random import Generator, MT19937
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def msort(input_array):
|
|
12
|
+
return np.sort(input_array, axis=0)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def add_mcbs_options(parser):
|
|
@@ -31,7 +36,7 @@ def get_bssize(alpha):
|
|
|
31
36
|
return int(10 ** (math.ceil(-math.log10(alpha)) + 1))
|
|
32
37
|
|
|
33
38
|
|
|
34
|
-
def bootstrap_ci(estimator, data, alpha, n_sets=None, args=(), kwargs={}, sort=
|
|
39
|
+
def bootstrap_ci(estimator, data, alpha, n_sets=None, args=(), kwargs={}, sort=msort, extended_output=False):
|
|
35
40
|
'''Perform a Monte Carlo bootstrap of a (1-alpha) confidence interval for the given ``estimator``.
|
|
36
41
|
Returns (fhat, ci_lower, ci_upper), where fhat is the result of ``estimator(data, *args, **kwargs)``,
|
|
37
42
|
and ``ci_lower`` and ``ci_upper`` are the lower and upper bounds of the surrounding confidence
|
|
@@ -69,8 +74,10 @@ def bootstrap_ci(estimator, data, alpha, n_sets=None, args=(), kwargs={}, sort=n
|
|
|
69
74
|
|
|
70
75
|
f_synth = np.empty((n_sets,) + estimator_shape, dtype=estimator_dtype)
|
|
71
76
|
|
|
77
|
+
rng = Generator(MT19937())
|
|
78
|
+
|
|
72
79
|
for i in range(0, n_sets):
|
|
73
|
-
indices =
|
|
80
|
+
indices = rng.integers(dlen, size=(dlen,))
|
|
74
81
|
f_synth[i] = estimator(data[indices], *args, **kwargs)
|
|
75
82
|
|
|
76
83
|
f_synth_sorted = sort(f_synth)
|
|
Binary file
|
|
@@ -62,7 +62,7 @@ def probAdjustEquil(binProb, rates, uncert, threshold=0.0, fullCalcClust=False,
|
|
|
62
62
|
j = j[nzi]
|
|
63
63
|
|
|
64
64
|
vals = ma.vstack((ratios_average.vals[i, j], ratios_average.vals[j, i]))
|
|
65
|
-
ias = ma.argsort(vals, axis=0, fill_value=np.inf)
|
|
65
|
+
ias = ma.argsort(vals, axis=0, kind='stable', fill_value=np.inf)
|
|
66
66
|
|
|
67
67
|
ordered_ind = np.vstack((i, j))
|
|
68
68
|
flip_ind = np.nonzero(ias[0, :]) # Find pairs in which to select ji rather than ij
|
|
@@ -73,7 +73,7 @@ def probAdjustEquil(binProb, rates, uncert, threshold=0.0, fullCalcClust=False,
|
|
|
73
73
|
uncertij = ratios_average.uncert[iind, jind] # Get the uncert for ij pairs
|
|
74
74
|
|
|
75
75
|
count = uncertij.count() # Count of the unmasked uncertainties
|
|
76
|
-
ias = ma.argsort(uncertij, fill_value=np.inf) # Get the indices that would sort uncertij
|
|
76
|
+
ias = ma.argsort(uncertij, kind='stable', fill_value=np.inf) # Get the indices that would sort uncertij
|
|
77
77
|
iind = iind[ias[:count]] # Sort the indices excluding masked/undefined values
|
|
78
78
|
jind = jind[ias[:count]]
|
|
79
79
|
|
|
@@ -95,7 +95,15 @@ class WEEDDriver:
|
|
|
95
95
|
|
|
96
96
|
with self.data_manager.lock:
|
|
97
97
|
weed_global_group = self.data_manager.we_h5file.require_group('weed')
|
|
98
|
+
reweighting_history_dataset = weed_global_group.require_dataset(
|
|
99
|
+
'reweighting_history', (1,), maxshape=(None,), dtype=int
|
|
100
|
+
)
|
|
98
101
|
last_reweighting = int(weed_global_group.attrs.get('last_reweighting', 0))
|
|
102
|
+
if last_reweighting > n_iter:
|
|
103
|
+
last_reweighting = n_iter - 1
|
|
104
|
+
reweighting_history = reweighting_history_dataset[:]
|
|
105
|
+
reweighting_history = reweighting_history[reweighting_history < n_iter]
|
|
106
|
+
reweighting_history_dataset.resize((reweighting_history.size), axis=0)
|
|
99
107
|
|
|
100
108
|
if n_iter - last_reweighting < self.reweight_period:
|
|
101
109
|
# Not time to reweight yet
|
|
@@ -172,6 +180,8 @@ class WEEDDriver:
|
|
|
172
180
|
for bin, newprob in zip(bins, binprobs):
|
|
173
181
|
bin.reweight(newprob)
|
|
174
182
|
|
|
183
|
+
reweighting_history_dataset.resize((reweighting_history_dataset.shape[0] + 1), axis=0)
|
|
184
|
+
reweighting_history_dataset[-1] = n_iter
|
|
175
185
|
weed_global_group.attrs['last_reweighting'] = n_iter
|
|
176
186
|
|
|
177
187
|
assert (
|
|
@@ -112,7 +112,15 @@ class WESSDriver:
|
|
|
112
112
|
|
|
113
113
|
with self.data_manager.lock:
|
|
114
114
|
wess_global_group = self.data_manager.we_h5file.require_group('wess')
|
|
115
|
+
reweighting_history_dataset = wess_global_group.require_dataset(
|
|
116
|
+
'reweighting_history', (1,), maxshape=(None,), dtype=int
|
|
117
|
+
)
|
|
115
118
|
last_reweighting = int(wess_global_group.attrs.get('last_reweighting', 0))
|
|
119
|
+
if last_reweighting > n_iter:
|
|
120
|
+
last_reweighting = n_iter - 1
|
|
121
|
+
reweighting_history = reweighting_history_dataset[:]
|
|
122
|
+
reweighting_history = reweighting_history[reweighting_history < n_iter]
|
|
123
|
+
reweighting_history_dataset.resize((reweighting_history.size), axis=0)
|
|
116
124
|
|
|
117
125
|
if n_iter - last_reweighting < self.reweight_period:
|
|
118
126
|
# Not time to reweight yet
|
|
@@ -197,6 +205,8 @@ class WESSDriver:
|
|
|
197
205
|
if len(bin):
|
|
198
206
|
bin.reweight(newprob)
|
|
199
207
|
|
|
208
|
+
reweighting_history_dataset.resize((reweighting_history_dataset.shape[0] + 1), axis=0)
|
|
209
|
+
reweighting_history_dataset[-1] = n_iter
|
|
200
210
|
wess_global_group.attrs['last_reweighting'] = n_iter
|
|
201
211
|
|
|
202
212
|
assert (
|
|
@@ -4,18 +4,18 @@ in collaboration with Daniel Zuckerman (zuckermd@ohsu.edu)
|
|
|
4
4
|
The original version of WESTPA was written by Matthew Zwier (matthew.zwier@drake.edu)
|
|
5
5
|
as part of his Ph.D. dissertation with Lillian Chong.
|
|
6
6
|
|
|
7
|
-
Other contributors are the following (
|
|
7
|
+
Other core contributors are the following (in alphabetical order)
|
|
8
8
|
|
|
9
9
|
Joshua Adelman
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
A. J. Pratt
|
|
15
|
-
Nicholas Rego
|
|
10
|
+
Anthony Bogetti
|
|
11
|
+
Jeremy Leung
|
|
12
|
+
AJ Pratt
|
|
13
|
+
John Russo
|
|
16
14
|
Ali Saglam
|
|
17
|
-
|
|
15
|
+
Jeff Thompson
|
|
18
16
|
Kim Wong
|
|
17
|
+
Darian Yang
|
|
18
|
+
She Zhang
|
|
19
19
|
|
|
20
20
|
The work manager interface is derived from the ``concurrent.futures`` module
|
|
21
21
|
of Python 3.2 by Brian Quinlan, (C) 2011 the Python Software Foundation.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
2
|
Name: westpa
|
|
3
|
-
Version: 2022.
|
|
3
|
+
Version: 2022.12
|
|
4
4
|
Summary: WESTPA is a package for constructing and running stochastic simulations using the "weighted ensemble" approach of Huber and Kim (1996).
|
|
5
5
|
Home-page: http://github.com/westpa/westpa
|
|
6
6
|
License: MIT
|
|
@@ -12,13 +12,14 @@ Classifier: Operating System :: POSIX
|
|
|
12
12
|
Classifier: Programming Language :: Python
|
|
13
13
|
Classifier: Programming Language :: Python :: 3
|
|
14
14
|
Classifier: Programming Language :: Cython
|
|
15
|
-
Requires-Python: >=3.
|
|
15
|
+
Requires-Python: >=3.9
|
|
16
16
|
License-File: LICENSE
|
|
17
17
|
License-File: AUTHORS
|
|
18
|
-
Requires-Dist: numpy
|
|
19
|
-
Requires-Dist:
|
|
20
|
-
Requires-Dist:
|
|
21
|
-
Requires-Dist:
|
|
18
|
+
Requires-Dist: numpy<3,>=1.25.0; python_version >= "3.10"
|
|
19
|
+
Requires-Dist: numpy<2,>=1.25.0; python_version < "3.10"
|
|
20
|
+
Requires-Dist: scipy>=0.19.1
|
|
21
|
+
Requires-Dist: h5py>=2.10
|
|
22
|
+
Requires-Dist: mdtraj>=1.9.5
|
|
22
23
|
Requires-Dist: pyyaml
|
|
23
24
|
Requires-Dist: pyzmq
|
|
24
25
|
Requires-Dist: matplotlib
|
|
@@ -27,19 +28,28 @@ Requires-Dist: ipykernel
|
|
|
27
28
|
Requires-Dist: tqdm
|
|
28
29
|
Requires-Dist: pandas
|
|
29
30
|
Requires-Dist: tables
|
|
30
|
-
Provides-Extra: dev
|
|
31
|
-
Requires-Dist: pytest ; extra == 'dev'
|
|
32
|
-
Requires-Dist: pytest-cov ; extra == 'dev'
|
|
33
|
-
Requires-Dist: pytest-rerunfailures ; extra == 'dev'
|
|
34
|
-
Requires-Dist: pytest-timeout ; extra == 'dev'
|
|
35
|
-
Requires-Dist: pre-commit ; extra == 'dev'
|
|
36
|
-
Provides-Extra: mpi
|
|
37
|
-
Requires-Dist: mpi4py ; extra == 'mpi'
|
|
38
31
|
Provides-Extra: tests
|
|
39
|
-
Requires-Dist: pytest
|
|
40
|
-
Requires-Dist: pytest-cov
|
|
41
|
-
Requires-Dist: pytest-rerunfailures
|
|
42
|
-
Requires-Dist: pytest-timeout
|
|
32
|
+
Requires-Dist: pytest; extra == "tests"
|
|
33
|
+
Requires-Dist: pytest-cov; extra == "tests"
|
|
34
|
+
Requires-Dist: pytest-rerunfailures; extra == "tests"
|
|
35
|
+
Requires-Dist: pytest-timeout; extra == "tests"
|
|
36
|
+
Provides-Extra: mpi
|
|
37
|
+
Requires-Dist: mpi4py; extra == "mpi"
|
|
38
|
+
Provides-Extra: dev
|
|
39
|
+
Requires-Dist: pytest; extra == "dev"
|
|
40
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
41
|
+
Requires-Dist: pytest-rerunfailures; extra == "dev"
|
|
42
|
+
Requires-Dist: pytest-timeout; extra == "dev"
|
|
43
|
+
Requires-Dist: pre-commit; extra == "dev"
|
|
44
|
+
Dynamic: classifier
|
|
45
|
+
Dynamic: description
|
|
46
|
+
Dynamic: home-page
|
|
47
|
+
Dynamic: keywords
|
|
48
|
+
Dynamic: license
|
|
49
|
+
Dynamic: provides-extra
|
|
50
|
+
Dynamic: requires-dist
|
|
51
|
+
Dynamic: requires-python
|
|
52
|
+
Dynamic: summary
|
|
43
53
|
|
|
44
54
|
===============
|
|
45
55
|
WESTPA 2.0
|
|
@@ -105,7 +115,7 @@ WESTPA is free software, licensed under the terms of the MIT License. See the fi
|
|
|
105
115
|
Requirements
|
|
106
116
|
------------
|
|
107
117
|
|
|
108
|
-
WESTPA is written in Python and requires version 3.
|
|
118
|
+
WESTPA is written in Python and requires version 3.9 or later. WESTPA also requires a number of Python scientific software packages.
|
|
109
119
|
The simplest way to meet these requirements is to download the
|
|
110
120
|
Anaconda Python distribution from www.anaconda.com (free for all users).
|
|
111
121
|
|
|
@@ -137,7 +147,7 @@ See the install instructions on our `wiki`_ for more detailed information.
|
|
|
137
147
|
|
|
138
148
|
To install from source (**not recommended**), start by downloading the corresponding tar.gz file from the `releases page`_. After downloading the file, unpack the file and install WESTPA by executing the following::
|
|
139
149
|
|
|
140
|
-
tar xvzf westpa-2022.
|
|
150
|
+
tar xvzf westpa-2022.11.tar.gz
|
|
141
151
|
cd westpa
|
|
142
152
|
python -m pip install -e .
|
|
143
153
|
|