westpa 2022.12__cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of westpa might be problematic. Click here for more details.
- westpa/__init__.py +14 -0
- westpa/_version.py +21 -0
- westpa/analysis/__init__.py +5 -0
- westpa/analysis/core.py +746 -0
- westpa/analysis/statistics.py +27 -0
- westpa/analysis/trajectories.py +360 -0
- westpa/cli/__init__.py +0 -0
- westpa/cli/core/__init__.py +0 -0
- westpa/cli/core/w_fork.py +152 -0
- westpa/cli/core/w_init.py +230 -0
- westpa/cli/core/w_run.py +77 -0
- westpa/cli/core/w_states.py +212 -0
- westpa/cli/core/w_succ.py +99 -0
- westpa/cli/core/w_truncate.py +68 -0
- westpa/cli/tools/__init__.py +0 -0
- westpa/cli/tools/ploterr.py +506 -0
- westpa/cli/tools/plothist.py +706 -0
- westpa/cli/tools/w_assign.py +596 -0
- westpa/cli/tools/w_bins.py +166 -0
- westpa/cli/tools/w_crawl.py +119 -0
- westpa/cli/tools/w_direct.py +547 -0
- westpa/cli/tools/w_dumpsegs.py +94 -0
- westpa/cli/tools/w_eddist.py +506 -0
- westpa/cli/tools/w_fluxanl.py +376 -0
- westpa/cli/tools/w_ipa.py +833 -0
- westpa/cli/tools/w_kinavg.py +127 -0
- westpa/cli/tools/w_kinetics.py +96 -0
- westpa/cli/tools/w_multi_west.py +414 -0
- westpa/cli/tools/w_ntop.py +213 -0
- westpa/cli/tools/w_pdist.py +515 -0
- westpa/cli/tools/w_postanalysis_matrix.py +82 -0
- westpa/cli/tools/w_postanalysis_reweight.py +53 -0
- westpa/cli/tools/w_red.py +491 -0
- westpa/cli/tools/w_reweight.py +780 -0
- westpa/cli/tools/w_select.py +226 -0
- westpa/cli/tools/w_stateprobs.py +111 -0
- westpa/cli/tools/w_trace.py +599 -0
- westpa/core/__init__.py +0 -0
- westpa/core/_rc.py +673 -0
- westpa/core/binning/__init__.py +55 -0
- westpa/core/binning/_assign.cpython-313-x86_64-linux-gnu.so +0 -0
- westpa/core/binning/assign.py +455 -0
- westpa/core/binning/binless.py +96 -0
- westpa/core/binning/binless_driver.py +54 -0
- westpa/core/binning/binless_manager.py +190 -0
- westpa/core/binning/bins.py +47 -0
- westpa/core/binning/mab.py +506 -0
- westpa/core/binning/mab_driver.py +54 -0
- westpa/core/binning/mab_manager.py +198 -0
- westpa/core/data_manager.py +1694 -0
- westpa/core/extloader.py +74 -0
- westpa/core/h5io.py +995 -0
- westpa/core/kinetics/__init__.py +24 -0
- westpa/core/kinetics/_kinetics.cpython-313-x86_64-linux-gnu.so +0 -0
- westpa/core/kinetics/events.py +147 -0
- westpa/core/kinetics/matrates.py +156 -0
- westpa/core/kinetics/rate_averaging.py +266 -0
- westpa/core/progress.py +218 -0
- westpa/core/propagators/__init__.py +54 -0
- westpa/core/propagators/executable.py +719 -0
- westpa/core/reweight/__init__.py +14 -0
- westpa/core/reweight/_reweight.cpython-313-x86_64-linux-gnu.so +0 -0
- westpa/core/reweight/matrix.py +126 -0
- westpa/core/segment.py +119 -0
- westpa/core/sim_manager.py +835 -0
- westpa/core/states.py +359 -0
- westpa/core/systems.py +93 -0
- westpa/core/textio.py +74 -0
- westpa/core/trajectory.py +330 -0
- westpa/core/we_driver.py +910 -0
- westpa/core/wm_ops.py +43 -0
- westpa/core/yamlcfg.py +391 -0
- westpa/fasthist/__init__.py +34 -0
- westpa/fasthist/_fasthist.cpython-313-x86_64-linux-gnu.so +0 -0
- westpa/mclib/__init__.py +271 -0
- westpa/mclib/__main__.py +28 -0
- westpa/mclib/_mclib.cpython-313-x86_64-linux-gnu.so +0 -0
- westpa/oldtools/__init__.py +4 -0
- westpa/oldtools/aframe/__init__.py +35 -0
- westpa/oldtools/aframe/atool.py +75 -0
- westpa/oldtools/aframe/base_mixin.py +26 -0
- westpa/oldtools/aframe/binning.py +178 -0
- westpa/oldtools/aframe/data_reader.py +560 -0
- westpa/oldtools/aframe/iter_range.py +200 -0
- westpa/oldtools/aframe/kinetics.py +117 -0
- westpa/oldtools/aframe/mcbs.py +153 -0
- westpa/oldtools/aframe/output.py +39 -0
- westpa/oldtools/aframe/plotting.py +90 -0
- westpa/oldtools/aframe/trajwalker.py +126 -0
- westpa/oldtools/aframe/transitions.py +469 -0
- westpa/oldtools/cmds/__init__.py +0 -0
- westpa/oldtools/cmds/w_ttimes.py +361 -0
- westpa/oldtools/files.py +34 -0
- westpa/oldtools/miscfn.py +23 -0
- westpa/oldtools/stats/__init__.py +4 -0
- westpa/oldtools/stats/accumulator.py +35 -0
- westpa/oldtools/stats/edfs.py +129 -0
- westpa/oldtools/stats/mcbs.py +96 -0
- westpa/tools/__init__.py +33 -0
- westpa/tools/binning.py +472 -0
- westpa/tools/core.py +340 -0
- westpa/tools/data_reader.py +159 -0
- westpa/tools/dtypes.py +31 -0
- westpa/tools/iter_range.py +198 -0
- westpa/tools/kinetics_tool.py +340 -0
- westpa/tools/plot.py +283 -0
- westpa/tools/progress.py +17 -0
- westpa/tools/selected_segs.py +154 -0
- westpa/tools/wipi.py +751 -0
- westpa/trajtree/__init__.py +4 -0
- westpa/trajtree/_trajtree.cpython-313-x86_64-linux-gnu.so +0 -0
- westpa/trajtree/trajtree.py +117 -0
- westpa/westext/__init__.py +0 -0
- westpa/westext/adaptvoronoi/__init__.py +3 -0
- westpa/westext/adaptvoronoi/adaptVor_driver.py +214 -0
- westpa/westext/hamsm_restarting/__init__.py +3 -0
- westpa/westext/hamsm_restarting/example_overrides.py +35 -0
- westpa/westext/hamsm_restarting/restart_driver.py +1165 -0
- westpa/westext/stringmethod/__init__.py +11 -0
- westpa/westext/stringmethod/fourier_fitting.py +69 -0
- westpa/westext/stringmethod/string_driver.py +253 -0
- westpa/westext/stringmethod/string_method.py +306 -0
- westpa/westext/weed/BinCluster.py +180 -0
- westpa/westext/weed/ProbAdjustEquil.py +100 -0
- westpa/westext/weed/UncertMath.py +247 -0
- westpa/westext/weed/__init__.py +10 -0
- westpa/westext/weed/weed_driver.py +192 -0
- westpa/westext/wess/ProbAdjust.py +101 -0
- westpa/westext/wess/__init__.py +6 -0
- westpa/westext/wess/wess_driver.py +217 -0
- westpa/work_managers/__init__.py +57 -0
- westpa/work_managers/core.py +396 -0
- westpa/work_managers/environment.py +134 -0
- westpa/work_managers/mpi.py +318 -0
- westpa/work_managers/processes.py +187 -0
- westpa/work_managers/serial.py +28 -0
- westpa/work_managers/threads.py +79 -0
- westpa/work_managers/zeromq/__init__.py +20 -0
- westpa/work_managers/zeromq/core.py +641 -0
- westpa/work_managers/zeromq/node.py +131 -0
- westpa/work_managers/zeromq/work_manager.py +526 -0
- westpa/work_managers/zeromq/worker.py +320 -0
- westpa-2022.12.dist-info/AUTHORS +22 -0
- westpa-2022.12.dist-info/LICENSE +21 -0
- westpa-2022.12.dist-info/METADATA +193 -0
- westpa-2022.12.dist-info/RECORD +149 -0
- westpa-2022.12.dist-info/WHEEL +6 -0
- westpa-2022.12.dist-info/entry_points.txt +29 -0
- westpa-2022.12.dist-info/top_level.txt +1 -0
westpa/mclib/__init__.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
'''A package for performing Monte Carlo bootstrap estimates of
|
|
2
|
+
statistics.'''
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
from numpy.random import Generator, MT19937
|
|
6
|
+
|
|
7
|
+
from ._mclib import mcbs_correltime, get_bssize, mcbs_ci
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def msort(input_array):
|
|
11
|
+
return np.sort(input_array, axis=0)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def mcbs_ci_correl(
|
|
15
|
+
estimator_datasets,
|
|
16
|
+
estimator,
|
|
17
|
+
alpha,
|
|
18
|
+
n_sets=None,
|
|
19
|
+
args=None,
|
|
20
|
+
autocorrel_alpha=None,
|
|
21
|
+
autocorrel_n_sets=None,
|
|
22
|
+
subsample=None,
|
|
23
|
+
do_correl=True,
|
|
24
|
+
mcbs_enable=None,
|
|
25
|
+
estimator_kwargs={},
|
|
26
|
+
):
|
|
27
|
+
'''Perform a Monte Carlo bootstrap estimate for the (1-``alpha``) confidence interval
|
|
28
|
+
on the given ``dataset`` with the given ``estimator``. This routine is appropriate
|
|
29
|
+
for time-correlated data, using the method described in Huber & Kim, "Weighted-ensemble
|
|
30
|
+
Brownian dynamics simulations for protein association reactions" (1996),
|
|
31
|
+
doi:10.1016/S0006-3495(96)79552-8 to determine a statistically-significant correlation time
|
|
32
|
+
and then reducing the dataset by a factor of that correlation time before running a "classic"
|
|
33
|
+
Monte Carlo bootstrap.
|
|
34
|
+
|
|
35
|
+
Returns ``(estimate, ci_lb, ci_ub, correl_time)`` where ``estimate`` is the application of the
|
|
36
|
+
given ``estimator`` to the input ``dataset``, ``ci_lb`` and ``ci_ub`` are the
|
|
37
|
+
lower and upper limits, respectively, of the (1-``alpha``) confidence interval on
|
|
38
|
+
``estimate``, and ``correl_time`` is the correlation time of the dataset, significant to
|
|
39
|
+
(1-``autocorrel_alpha``).
|
|
40
|
+
|
|
41
|
+
``estimator`` is called as ``estimator(dataset, *args, **kwargs)``. Common estimators include:
|
|
42
|
+
* np.mean -- calculate the confidence interval on the mean of ``dataset``
|
|
43
|
+
* np.median -- calculate a confidence interval on the median of ``dataset``
|
|
44
|
+
* np.std -- calculate a confidence interval on the standard deviation of ``datset``.
|
|
45
|
+
|
|
46
|
+
``n_sets`` is the number of synthetic data sets to generate using the given ``estimator``,
|
|
47
|
+
which will be chosen using `get_bssize()`_ if ``n_sets`` is not given.
|
|
48
|
+
|
|
49
|
+
``autocorrel_alpha`` (which defaults to ``alpha``) can be used to adjust the significance
|
|
50
|
+
level of the autocorrelation calculation. Note that too high a significance level (too low an
|
|
51
|
+
alpha) for evaluating the significance of autocorrelation values can result in a failure to
|
|
52
|
+
detect correlation if the autocorrelation function is noisy.
|
|
53
|
+
|
|
54
|
+
The given ``subsample`` function is used, if provided, to subsample the dataset prior to running
|
|
55
|
+
the full Monte Carlo bootstrap. If none is provided, then a random entry from each correlated
|
|
56
|
+
block is used as the value for that block. Other reasonable choices include ``np.mean``,
|
|
57
|
+
``np.median``, ``(lambda x: x[0])`` or ``(lambda x: x[-1])``. In particular, using
|
|
58
|
+
``subsample=np.mean`` will converge to the block averaged mean and standard error,
|
|
59
|
+
while accounting for any non-normality in the distribution of the mean.
|
|
60
|
+
'''
|
|
61
|
+
|
|
62
|
+
if alpha > 0.5:
|
|
63
|
+
raise ValueError('alpha ({}) > 0.5'.format(alpha))
|
|
64
|
+
|
|
65
|
+
autocorrel_alpha = alpha if not autocorrel_alpha else autocorrel_alpha
|
|
66
|
+
|
|
67
|
+
# We're now passing in dataset as a dict, so we need to enforce that for compatibility with older tools.
|
|
68
|
+
# This just takes our dataset and puts it into a dict, as it's likely that we're using
|
|
69
|
+
# mean or median as our estimators, which take "a" as argument input.
|
|
70
|
+
if not isinstance(estimator_datasets, dict):
|
|
71
|
+
# Enforcing the data structure.
|
|
72
|
+
pre_calculated = estimator_datasets
|
|
73
|
+
estimator_datasets = {'a': estimator_datasets}
|
|
74
|
+
# This also probably means our estimator isn't going to handle kwargs, so we'll watch out for that later in testing.
|
|
75
|
+
# We may have to replace the 'simple' estimator with a slightly more complex lambda function which simply ditches extra arguments.
|
|
76
|
+
for key, dset in estimator_datasets.items():
|
|
77
|
+
estimator_datasets[key] = np.asanyarray(dset)
|
|
78
|
+
dlen = dset.shape[0]
|
|
79
|
+
|
|
80
|
+
# Why do we have 'estimator_datasets'?
|
|
81
|
+
# Estimators may require many different sets of data to properly function; while we can send this in via the kwargs,
|
|
82
|
+
# we may wish to decimate only a certain subset (due to the block bootstrapping) of the input parameters.
|
|
83
|
+
# Therefore, 'estimator_datasets' should consist of datasets that must be sliced/decimated with the subsampling function.
|
|
84
|
+
# Some estimators (such as the reweighting) may not be able to be decimated in a straightforward manner with a subsample function,
|
|
85
|
+
# as we cannot pre-estimate the quantity without introducing error or bias. In those cases, we may wish to pass on all the data,
|
|
86
|
+
# but ensure that our estimator only includes certain iterations (and only in a certain way).
|
|
87
|
+
|
|
88
|
+
n_sets = n_sets or get_bssize(alpha)
|
|
89
|
+
autocorrel_n_sets = autocorrel_n_sets or get_bssize(autocorrel_alpha)
|
|
90
|
+
|
|
91
|
+
if mcbs_enable is False:
|
|
92
|
+
# While it's odd to support NOT doing the bootstrap in a library specifically designed for bootstrapping,
|
|
93
|
+
# supporting this functionality here makes writing the code a lot easier, as we can just pass in a flag.
|
|
94
|
+
# Specifically, this is for situations in which error is not desired (that is, only a reasonable mean is desired).
|
|
95
|
+
# It's often useful when doing a quick analysis.
|
|
96
|
+
estimator_datasets.update(estimator_kwargs)
|
|
97
|
+
try:
|
|
98
|
+
estimator_datasets.update({'stride': 1})
|
|
99
|
+
except Exception:
|
|
100
|
+
pass
|
|
101
|
+
|
|
102
|
+
return_set = estimator(**estimator_datasets)
|
|
103
|
+
# We don't try and pretend we're doing any error analysis.
|
|
104
|
+
return return_set, return_set, return_set, 0, 1
|
|
105
|
+
|
|
106
|
+
rng = Generator(MT19937()) # RNG
|
|
107
|
+
|
|
108
|
+
# We need to pre-generate the data; why not do it here? We're already set up for it...
|
|
109
|
+
precalc_kwargs = estimator_kwargs.copy()
|
|
110
|
+
precalc_kwargs['stride'] = 1
|
|
111
|
+
pre_calculated = []
|
|
112
|
+
for block in range(1, dlen + 1):
|
|
113
|
+
for key, dset in estimator_datasets.items():
|
|
114
|
+
precalc_kwargs[key] = dset[0:block]
|
|
115
|
+
pre_calculated.append(estimator(**precalc_kwargs))
|
|
116
|
+
# We need to get rid of any NaNs.
|
|
117
|
+
pre_calculated = np.asanyarray(pre_calculated)
|
|
118
|
+
pre_calculated = pre_calculated[np.isfinite(pre_calculated)]
|
|
119
|
+
# If this happens, we have a huge NaN problem. That is, our estimator is failing to return meaningful
|
|
120
|
+
# numbers. We should catch this when it happens, and so raise an exception, here.
|
|
121
|
+
# This is almost certainly due to estimator failure. Double check that calculation.
|
|
122
|
+
if pre_calculated.shape == (0,):
|
|
123
|
+
raise NameError("Looks like the estimator failed. This is likely a programming issue, and should be reported.")
|
|
124
|
+
# If pre-calculated is not None, we'll use that instead of dataset.
|
|
125
|
+
# We can also assume that it's a 1 dimensional set with nothing needed, so 'key' should work.
|
|
126
|
+
if do_correl is True:
|
|
127
|
+
correl_len = mcbs_correltime(pre_calculated, autocorrel_alpha, autocorrel_n_sets)
|
|
128
|
+
else:
|
|
129
|
+
correl_len = 0
|
|
130
|
+
if correl_len == len(pre_calculated):
|
|
131
|
+
# too correlated for meaningful calculations
|
|
132
|
+
estimator_datasets.update(estimator_kwargs)
|
|
133
|
+
try:
|
|
134
|
+
estimator_datasets.update({'stride': 1})
|
|
135
|
+
except Exception:
|
|
136
|
+
pass
|
|
137
|
+
|
|
138
|
+
return estimator(**estimator_datasets), pre_calculated.min(), pre_calculated.max(), (np.std(pre_calculated)), correl_len
|
|
139
|
+
|
|
140
|
+
# else, do a blocked bootstrap
|
|
141
|
+
stride = correl_len + 1
|
|
142
|
+
|
|
143
|
+
if stride == 1:
|
|
144
|
+
# Some estimators may require the stride, so we pass it in.
|
|
145
|
+
estimator_kwargs['stride'] = stride
|
|
146
|
+
return mcbs_ci(
|
|
147
|
+
dataset=estimator_datasets,
|
|
148
|
+
estimator=estimator,
|
|
149
|
+
alpha=alpha,
|
|
150
|
+
dlen=dlen,
|
|
151
|
+
n_sets=n_sets,
|
|
152
|
+
args=args,
|
|
153
|
+
kwargs=estimator_kwargs,
|
|
154
|
+
sort=msort,
|
|
155
|
+
) + (correl_len,)
|
|
156
|
+
else:
|
|
157
|
+
subsample = subsample or (lambda x: x[rng.integers(len(x))])
|
|
158
|
+
# Let's make sure we decimate every array properly...
|
|
159
|
+
decim_list = {}
|
|
160
|
+
for key, dset in estimator_datasets.items():
|
|
161
|
+
dset_shape = list(dset.shape)
|
|
162
|
+
n_slices = dset_shape[0] // stride
|
|
163
|
+
dset_shape[0] = n_slices
|
|
164
|
+
decim_set = np.empty((dset_shape), dtype=dset.dtype)
|
|
165
|
+
for iout, istart in enumerate(range(0, dset.shape[0] - stride + 1, stride)):
|
|
166
|
+
sl = dset[istart : istart + stride]
|
|
167
|
+
# We assume time is the 0th axis.
|
|
168
|
+
# Okay, so non-optimal. Population requires the axis subsampling to be done just so...
|
|
169
|
+
try:
|
|
170
|
+
decim_set[iout] = subsample(sl, axis=0)
|
|
171
|
+
except Exception:
|
|
172
|
+
decim_set[iout] = subsample(sl)
|
|
173
|
+
decim_list[key] = decim_set
|
|
174
|
+
dlen = dset_shape[0]
|
|
175
|
+
estimator_kwargs['stride'] = stride
|
|
176
|
+
|
|
177
|
+
return mcbs_ci(
|
|
178
|
+
dataset=decim_list,
|
|
179
|
+
estimator=estimator,
|
|
180
|
+
alpha=alpha,
|
|
181
|
+
dlen=dlen,
|
|
182
|
+
n_sets=n_sets,
|
|
183
|
+
args=args,
|
|
184
|
+
kwargs=estimator_kwargs,
|
|
185
|
+
sort=msort,
|
|
186
|
+
) + (correl_len,)
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
# These are blocks designed to evaluate simple information sets.
|
|
190
|
+
# Whether they should go here or in westtoools is somewhat up for debate.
|
|
191
|
+
# Currently, nothing actually uses them, so there's that.
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _1D_simple_eval_block(
|
|
195
|
+
iblock,
|
|
196
|
+
start,
|
|
197
|
+
stop,
|
|
198
|
+
nstates,
|
|
199
|
+
data_input,
|
|
200
|
+
name,
|
|
201
|
+
mcbs_alpha,
|
|
202
|
+
mcbs_nsets,
|
|
203
|
+
mcbs_acalpha,
|
|
204
|
+
do_correl,
|
|
205
|
+
mcbs_enable,
|
|
206
|
+
subsample=np.mean,
|
|
207
|
+
**extra
|
|
208
|
+
):
|
|
209
|
+
# This is actually appropriate for anything with a directly measured, 1D dataset, i.e.,
|
|
210
|
+
# Fluxes, color populations, and state populations.
|
|
211
|
+
results = []
|
|
212
|
+
for istate in range(nstates):
|
|
213
|
+
# Not sure if we need a jstate for these estimators, but we'll see.
|
|
214
|
+
# kwargs = {'istate': istate, 'jstate': 'B'}
|
|
215
|
+
estimator_datasets = {'dataset': data_input['dataset'][:, istate]}
|
|
216
|
+
ci_res = mcbs_ci_correl(
|
|
217
|
+
estimator_datasets,
|
|
218
|
+
estimator=(lambda stride, dataset: np.mean(dataset)),
|
|
219
|
+
alpha=mcbs_alpha,
|
|
220
|
+
n_sets=mcbs_nsets,
|
|
221
|
+
autocorrel_alpha=mcbs_acalpha,
|
|
222
|
+
subsample=subsample,
|
|
223
|
+
do_correl=do_correl,
|
|
224
|
+
mcbs_enable=mcbs_enable,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
results.append((name, iblock, istate, (start, stop) + ci_res))
|
|
228
|
+
|
|
229
|
+
return results
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _2D_simple_eval_block(
|
|
233
|
+
iblock,
|
|
234
|
+
start,
|
|
235
|
+
stop,
|
|
236
|
+
nstates,
|
|
237
|
+
data_input,
|
|
238
|
+
name,
|
|
239
|
+
mcbs_alpha,
|
|
240
|
+
mcbs_nsets,
|
|
241
|
+
mcbs_acalpha,
|
|
242
|
+
do_correl,
|
|
243
|
+
mcbs_enable,
|
|
244
|
+
subsample=np.mean,
|
|
245
|
+
**extra
|
|
246
|
+
):
|
|
247
|
+
# This is really just a simple 2D block for less complex datasets, but there it is.
|
|
248
|
+
# It's probably limited in this use case to conditional_fluxes, but anything that's an i to j process that is directly measured
|
|
249
|
+
# is suitable for use with this.
|
|
250
|
+
results = []
|
|
251
|
+
for istate in range(nstates):
|
|
252
|
+
for jstate in range(nstates):
|
|
253
|
+
if istate == jstate:
|
|
254
|
+
continue
|
|
255
|
+
# kwargs = {'istate': istate, 'jstate': jstate}
|
|
256
|
+
# dataset = {'dataset': cond_fluxes[:, istate, jstate]}
|
|
257
|
+
estimator_datasets = {'dataset': data_input['dataset'][:, istate, jstate]}
|
|
258
|
+
ci_res = mcbs_ci_correl(
|
|
259
|
+
estimator_datasets,
|
|
260
|
+
estimator=(lambda stride, dataset: np.mean(dataset)),
|
|
261
|
+
alpha=mcbs_alpha,
|
|
262
|
+
n_sets=mcbs_nsets,
|
|
263
|
+
autocorrel_alpha=mcbs_acalpha,
|
|
264
|
+
subsample=subsample,
|
|
265
|
+
do_correl=do_correl,
|
|
266
|
+
mcbs_enable=mcbs_enable,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
results.append((name, iblock, istate, jstate, (start, stop) + ci_res))
|
|
270
|
+
|
|
271
|
+
return results
|
westpa/mclib/__main__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
if __name__ == '__main__':
|
|
2
|
+
from . import autocorrel_elem
|
|
3
|
+
import numpy
|
|
4
|
+
from scipy.signal import correlate
|
|
5
|
+
|
|
6
|
+
n = 16
|
|
7
|
+
x = numpy.linspace(0, n * numpy.pi, 16 * n + 1)
|
|
8
|
+
a = numpy.cos(x) + numpy.exp(-((x / 2.0) ** 2)) + numpy.exp(-(x / 4.0))
|
|
9
|
+
pa = numpy.zeros((10000 * len(a),), numpy.float64)
|
|
10
|
+
pa[: len(a)] = a
|
|
11
|
+
|
|
12
|
+
print('<a> =', a.mean())
|
|
13
|
+
print('<a^2> =', ((a - a.mean()) ** 2).sum())
|
|
14
|
+
print('scipy.signal.correlate:')
|
|
15
|
+
acf0 = correlate(a, a)
|
|
16
|
+
acf0 = acf0[-len(a) :]
|
|
17
|
+
acf0 /= acf0.max()
|
|
18
|
+
print(acf0[: len(acf0) / 4])
|
|
19
|
+
|
|
20
|
+
# print 'scipy.signal.correlate (-mean):'
|
|
21
|
+
# acf0 = correlate(a-a.mean(),a-a.mean())
|
|
22
|
+
# acf0 = acf0[-len(a):]
|
|
23
|
+
# acf0 /= acf0.max()
|
|
24
|
+
# print acf0[:len(acf0)/4]
|
|
25
|
+
|
|
26
|
+
print('this module:')
|
|
27
|
+
acf = numpy.array([autocorrel_elem(pa, k) for k in range(len(a))])
|
|
28
|
+
print(acf[: len(acf) / 4])
|
|
Binary file
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""WEST Analyis framework -- an unholy mess of classes exploiting each other"""
|
|
2
|
+
|
|
3
|
+
from . import atool
|
|
4
|
+
from .atool import WESTAnalysisTool
|
|
5
|
+
from .base_mixin import ArgumentError, AnalysisMixin
|
|
6
|
+
from .binning import BinningMixin
|
|
7
|
+
from .data_reader import WESTDataReaderMixin, ExtDataReaderMixin, BFDataManager
|
|
8
|
+
from .iter_range import IterRangeMixin
|
|
9
|
+
from .kinetics import KineticsAnalysisMixin
|
|
10
|
+
from .mcbs import MCBSMixin
|
|
11
|
+
from .output import CommonOutputMixin
|
|
12
|
+
from .plotting import PlottingMixin
|
|
13
|
+
from .trajwalker import TrajWalker
|
|
14
|
+
from .transitions import TransitionAnalysisMixin, TransitionEventAccumulator, BFTransitionAnalysisMixin
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
'atool',
|
|
19
|
+
'AnalysisMixin',
|
|
20
|
+
'ArgumentError',
|
|
21
|
+
'WESTAnalysisTool',
|
|
22
|
+
'IterRangeMixin',
|
|
23
|
+
'WESTDataReaderMixin',
|
|
24
|
+
'ExtDataReaderMixin',
|
|
25
|
+
'BFDataManager',
|
|
26
|
+
'BinningMixin',
|
|
27
|
+
'MCBSMixin',
|
|
28
|
+
'TrajWalker',
|
|
29
|
+
'TransitionAnalysisMixin',
|
|
30
|
+
'TransitionEventAccumulator',
|
|
31
|
+
'BFTransitionAnalysisMixin',
|
|
32
|
+
'KineticsAnalysisMixin',
|
|
33
|
+
'CommonOutputMixin',
|
|
34
|
+
'PlottingMixin',
|
|
35
|
+
]
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
import h5py
|
|
5
|
+
|
|
6
|
+
log = logging.getLogger(__name__)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class WESTAnalysisTool:
|
|
10
|
+
def __init__(self):
|
|
11
|
+
super().__init__()
|
|
12
|
+
# Whether a west.cfg is required to run a program based on this tool
|
|
13
|
+
self.config_required = False
|
|
14
|
+
|
|
15
|
+
# Analysis HDF5 filename and object
|
|
16
|
+
self.anal_h5name = None
|
|
17
|
+
self.anal_h5file = None
|
|
18
|
+
|
|
19
|
+
# Whether this is being used in a brute analysis
|
|
20
|
+
self.bf_mode = False
|
|
21
|
+
|
|
22
|
+
# A way to override some arguments on a per-mixin basis without having to subclass
|
|
23
|
+
# (messy, but it doesn't seem crucial enough so far to make it cleaner)
|
|
24
|
+
self.include_args = {}
|
|
25
|
+
|
|
26
|
+
def add_args(self, parser, upcall=True):
|
|
27
|
+
'''Add arguments to a parser common to all analyses of this type.'''
|
|
28
|
+
if upcall:
|
|
29
|
+
try:
|
|
30
|
+
upfunc = super().add_args
|
|
31
|
+
except AttributeError:
|
|
32
|
+
pass
|
|
33
|
+
else:
|
|
34
|
+
upfunc(parser)
|
|
35
|
+
|
|
36
|
+
group = parser.add_argument_group('general analysis options')
|
|
37
|
+
group.add_argument(
|
|
38
|
+
'-A',
|
|
39
|
+
'--analysis-file',
|
|
40
|
+
dest='anal_h5name',
|
|
41
|
+
metavar='H5FILE',
|
|
42
|
+
default='analysis.h5',
|
|
43
|
+
help='Store intermediate and final results in H5FILE (default: %(default)s).',
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
def process_args(self, args, upcall=True):
|
|
47
|
+
self.anal_h5name = args.anal_h5name
|
|
48
|
+
|
|
49
|
+
if upcall:
|
|
50
|
+
try:
|
|
51
|
+
upfunc = super().process_args
|
|
52
|
+
except AttributeError:
|
|
53
|
+
pass
|
|
54
|
+
else:
|
|
55
|
+
upfunc(args)
|
|
56
|
+
|
|
57
|
+
def open_analysis_backing(self):
|
|
58
|
+
if self.anal_h5file is None:
|
|
59
|
+
self.anal_h5file = h5py.File(self.anal_h5name)
|
|
60
|
+
|
|
61
|
+
def close_analysis_backing(self):
|
|
62
|
+
try:
|
|
63
|
+
self.anal_h5file.close()
|
|
64
|
+
self.anal_h5file = None
|
|
65
|
+
except AttributeError:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
def require_analysis_group(self, groupname, replace=False):
|
|
69
|
+
self.open_analysis_backing()
|
|
70
|
+
if replace:
|
|
71
|
+
try:
|
|
72
|
+
del self.anal_h5file[groupname]
|
|
73
|
+
except KeyError:
|
|
74
|
+
pass
|
|
75
|
+
return self.anal_h5file.require_group(groupname)
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
class ArgumentError(RuntimeError):
|
|
2
|
+
def __init__(self, *args, **kwargs):
|
|
3
|
+
super().__init__(*args, **kwargs)
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AnalysisMixin:
|
|
7
|
+
def __init__(self):
|
|
8
|
+
super().__init__()
|
|
9
|
+
|
|
10
|
+
def add_args(self, parser, upcall=True):
|
|
11
|
+
if upcall:
|
|
12
|
+
try:
|
|
13
|
+
upfunc = super().add_args
|
|
14
|
+
except AttributeError:
|
|
15
|
+
pass
|
|
16
|
+
else:
|
|
17
|
+
upfunc(parser)
|
|
18
|
+
|
|
19
|
+
def process_args(self, args, upcall=True):
|
|
20
|
+
if upcall:
|
|
21
|
+
try:
|
|
22
|
+
upfunc = super().process_args
|
|
23
|
+
except AttributeError:
|
|
24
|
+
pass
|
|
25
|
+
else:
|
|
26
|
+
upfunc(args)
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
import westpa
|
|
6
|
+
from westpa.oldtools.aframe import AnalysisMixin
|
|
7
|
+
|
|
8
|
+
log = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BinningMixin(AnalysisMixin):
|
|
12
|
+
'''A mixin for performing binning on WEST data.'''
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__()
|
|
16
|
+
|
|
17
|
+
self.mapper = None
|
|
18
|
+
self.n_bins = None
|
|
19
|
+
|
|
20
|
+
self.discard_bin_assignments = False
|
|
21
|
+
self.binning_h5gname = 'binning'
|
|
22
|
+
self.binning_h5group = None
|
|
23
|
+
self.mapper_hash = None
|
|
24
|
+
|
|
25
|
+
def add_args(self, parser, upcall=True):
|
|
26
|
+
if upcall:
|
|
27
|
+
try:
|
|
28
|
+
upfunc = super().add_args
|
|
29
|
+
except AttributeError:
|
|
30
|
+
pass
|
|
31
|
+
else:
|
|
32
|
+
upfunc(parser)
|
|
33
|
+
|
|
34
|
+
group = parser.add_argument_group('binning options')
|
|
35
|
+
egroup = group.add_mutually_exclusive_group()
|
|
36
|
+
egroup.add_argument(
|
|
37
|
+
'--binexpr',
|
|
38
|
+
'--binbounds',
|
|
39
|
+
dest='binexpr',
|
|
40
|
+
help='''Construct rectilinear bins from BINEXPR. This must be a list of lists of bin boundaries
|
|
41
|
+
(one list of bin boundaries for each dimension of the progress coordinate), formatted as a Python
|
|
42
|
+
expression. E.g. "[[0,1,2,4,inf], [-inf,0,inf]]".''',
|
|
43
|
+
)
|
|
44
|
+
group.add_argument(
|
|
45
|
+
'--discard-bin-assignments',
|
|
46
|
+
dest='discard_bin_assignments',
|
|
47
|
+
action='store_true',
|
|
48
|
+
help='''Discard any existing bin assignments stored in the analysis HDF5 file.''',
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
def process_args(self, args, upcall=True):
|
|
52
|
+
if args.binexpr:
|
|
53
|
+
westpa.rc.pstatus("Constructing rectilinear bin boundaries from the following expression: '{}'".format(args.binexpr))
|
|
54
|
+
self.mapper = self.mapper_from_expr(args.binexpr)
|
|
55
|
+
else:
|
|
56
|
+
westpa.rc.pstatus('Loading bin boundaries from WEST system')
|
|
57
|
+
system = westpa.rc.get_system_driver()
|
|
58
|
+
self.mapper = system.bin_mapper
|
|
59
|
+
|
|
60
|
+
self.n_bins = self.mapper.nbins
|
|
61
|
+
_pdat, self.mapper_hash = self.mapper.pickle_and_hash()
|
|
62
|
+
westpa.rc.pstatus(' {:d} bins'.format(self.n_bins))
|
|
63
|
+
westpa.rc.pstatus(' identity hash {}'.format(self.mapper_hash))
|
|
64
|
+
|
|
65
|
+
self.discard_bin_assignments = bool(args.discard_bin_assignments)
|
|
66
|
+
|
|
67
|
+
if upcall:
|
|
68
|
+
try:
|
|
69
|
+
upfunc = super().process_args
|
|
70
|
+
except AttributeError:
|
|
71
|
+
pass
|
|
72
|
+
else:
|
|
73
|
+
upfunc(args)
|
|
74
|
+
|
|
75
|
+
def mapper_from_expr(self, expr):
|
|
76
|
+
from westpa.core.binning import RectilinearBinMapper
|
|
77
|
+
|
|
78
|
+
namespace = {'numpy': np, 'np': np, 'inf': float('inf')}
|
|
79
|
+
|
|
80
|
+
try:
|
|
81
|
+
return RectilinearBinMapper(eval(expr, namespace))
|
|
82
|
+
except TypeError as e:
|
|
83
|
+
if 'has no len' in str(e):
|
|
84
|
+
raise ValueError('invalid bin boundary specification; you probably forgot to make a list of lists')
|
|
85
|
+
|
|
86
|
+
def write_bin_labels(self, dest, header='# bin labels:\n', format='# bin {bin_index:{max_iwidth}d} -- {label!s}\n'):
|
|
87
|
+
'''Print labels for all bins in ``self.mapper`` to ``dest``. If provided, ``header``
|
|
88
|
+
is printed before any labels. The ``format`` string specifies how bin labels are to be printed. Valid entries are:
|
|
89
|
+
* ``bin_index`` -- the zero-based index of the bin
|
|
90
|
+
* ``label`` -- the label, as obtained by ``bin.label``
|
|
91
|
+
* ``max_iwidth`` -- the maximum width (in characters) of the bin index, for pretty alignment
|
|
92
|
+
'''
|
|
93
|
+
dest.write(header or '')
|
|
94
|
+
max_iwidth = len(str(self.mapper.nbins - 1))
|
|
95
|
+
for ibin, label in enumerate(self.mapper.labels):
|
|
96
|
+
dest.write(format.format(bin_index=ibin, label=label, max_iwidth=max_iwidth))
|
|
97
|
+
|
|
98
|
+
def require_binning_group(self):
|
|
99
|
+
if self.binning_h5group is None:
|
|
100
|
+
self.binning_h5group = self.anal_h5file.require_group(self.binning_h5gname)
|
|
101
|
+
return self.binning_h5group
|
|
102
|
+
|
|
103
|
+
def delete_binning_group(self):
|
|
104
|
+
self.binning_h5group = None
|
|
105
|
+
del self.anal_h5file[self.binning_h5gname]
|
|
106
|
+
|
|
107
|
+
def record_data_binhash(self, h5object):
|
|
108
|
+
'''Record the identity hash for self.mapper as an attribute on the given HDF5 object (group or dataset)'''
|
|
109
|
+
h5object.attrs['binhash'] = self.mapper_hash
|
|
110
|
+
|
|
111
|
+
def check_data_binhash(self, h5object):
|
|
112
|
+
'''Check whether the recorded bin identity hash on the given HDF5 object matches the identity hash for self.mapper'''
|
|
113
|
+
return h5object.attrs.get('binhash') == self.mapper_hash
|
|
114
|
+
|
|
115
|
+
def assign_to_bins(self):
|
|
116
|
+
'''Assign WEST segment data to bins. Requires the DataReader mixin to be in the inheritance tree'''
|
|
117
|
+
self.require_binning_group()
|
|
118
|
+
|
|
119
|
+
n_iters = self.last_iter - self.first_iter + 1
|
|
120
|
+
max_n_segs = self.max_iter_segs_in_range(self.first_iter, self.last_iter)
|
|
121
|
+
pcoord_len = self.get_pcoord_len(self.first_iter)
|
|
122
|
+
|
|
123
|
+
assignments = np.zeros((n_iters, max_n_segs, pcoord_len), np.min_scalar_type(self.n_bins))
|
|
124
|
+
populations = np.zeros((n_iters, pcoord_len, self.n_bins), np.float64)
|
|
125
|
+
|
|
126
|
+
westpa.rc.pstatus('Assigning to bins...')
|
|
127
|
+
|
|
128
|
+
for iiter, n_iter in enumerate(range(self.first_iter, self.last_iter + 1)):
|
|
129
|
+
westpa.rc.pstatus('\r Iteration {:d}'.format(n_iter), end='')
|
|
130
|
+
seg_index = self.get_seg_index(n_iter)
|
|
131
|
+
pcoords = self.get_iter_group(n_iter)['pcoord'][...]
|
|
132
|
+
weights = seg_index['weight']
|
|
133
|
+
|
|
134
|
+
for seg_id in range(len(seg_index)):
|
|
135
|
+
assignments[iiter, seg_id, :] = self.mapper.assign(pcoords[seg_id, :, :])
|
|
136
|
+
|
|
137
|
+
for it in range(pcoord_len):
|
|
138
|
+
populations[iiter, it, :] = np.bincount(assignments[iiter, : len(seg_index), it], weights, minlength=self.n_bins)
|
|
139
|
+
|
|
140
|
+
westpa.rc.pflush()
|
|
141
|
+
del pcoords, weights, seg_index
|
|
142
|
+
|
|
143
|
+
assignments_ds = self.binning_h5group.create_dataset('bin_assignments', data=assignments, compression='gzip')
|
|
144
|
+
populations_ds = self.binning_h5group.create_dataset('bin_populations', data=populations, compression='gzip')
|
|
145
|
+
|
|
146
|
+
for h5object in (self.binning_h5group, assignments_ds, populations_ds):
|
|
147
|
+
self.record_data_iter_range(h5object)
|
|
148
|
+
self.record_data_iter_step(h5object, 1)
|
|
149
|
+
self.record_data_binhash(h5object)
|
|
150
|
+
|
|
151
|
+
westpa.rc.pstatus()
|
|
152
|
+
|
|
153
|
+
def require_bin_assignments(self):
|
|
154
|
+
self.require_binning_group()
|
|
155
|
+
do_assign = False
|
|
156
|
+
if self.discard_bin_assignments:
|
|
157
|
+
westpa.rc.pstatus('Discarding existing bin assignments.')
|
|
158
|
+
do_assign = True
|
|
159
|
+
elif 'bin_assignments' not in self.binning_h5group:
|
|
160
|
+
do_assign = True
|
|
161
|
+
elif not self.check_data_iter_range_least(self.binning_h5group):
|
|
162
|
+
westpa.rc.pstatus('Existing bin assignments are for incompatible first/last iterations; deleting assignments.')
|
|
163
|
+
do_assign = True
|
|
164
|
+
elif not self.check_data_binhash(self.binning_h5group):
|
|
165
|
+
westpa.rc.pstatus('Bin definitions have changed; deleting existing bin assignments.')
|
|
166
|
+
do_assign = True
|
|
167
|
+
|
|
168
|
+
if do_assign:
|
|
169
|
+
self.delete_binning_group()
|
|
170
|
+
self.assign_to_bins()
|
|
171
|
+
else:
|
|
172
|
+
westpa.rc.pstatus('Using existing bin assignments.')
|
|
173
|
+
|
|
174
|
+
def get_bin_assignments(self, first_iter=None, last_iter=None):
|
|
175
|
+
return self.slice_per_iter_data(self.binning_h5group['bin_assignments'], first_iter, last_iter)
|
|
176
|
+
|
|
177
|
+
def get_bin_populations(self, first_iter=None, last_iter=None):
|
|
178
|
+
return self.slice_per_iter_data(self.binning_h5group['bin_populations'], first_iter, last_iter)
|