westpa 2022.12__cp313-cp313-macosx_10_13_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of westpa might be problematic. Click here for more details.
- westpa/__init__.py +14 -0
- westpa/_version.py +21 -0
- westpa/analysis/__init__.py +5 -0
- westpa/analysis/core.py +746 -0
- westpa/analysis/statistics.py +27 -0
- westpa/analysis/trajectories.py +360 -0
- westpa/cli/__init__.py +0 -0
- westpa/cli/core/__init__.py +0 -0
- westpa/cli/core/w_fork.py +152 -0
- westpa/cli/core/w_init.py +230 -0
- westpa/cli/core/w_run.py +77 -0
- westpa/cli/core/w_states.py +212 -0
- westpa/cli/core/w_succ.py +99 -0
- westpa/cli/core/w_truncate.py +68 -0
- westpa/cli/tools/__init__.py +0 -0
- westpa/cli/tools/ploterr.py +506 -0
- westpa/cli/tools/plothist.py +706 -0
- westpa/cli/tools/w_assign.py +596 -0
- westpa/cli/tools/w_bins.py +166 -0
- westpa/cli/tools/w_crawl.py +119 -0
- westpa/cli/tools/w_direct.py +547 -0
- westpa/cli/tools/w_dumpsegs.py +94 -0
- westpa/cli/tools/w_eddist.py +506 -0
- westpa/cli/tools/w_fluxanl.py +376 -0
- westpa/cli/tools/w_ipa.py +833 -0
- westpa/cli/tools/w_kinavg.py +127 -0
- westpa/cli/tools/w_kinetics.py +96 -0
- westpa/cli/tools/w_multi_west.py +414 -0
- westpa/cli/tools/w_ntop.py +213 -0
- westpa/cli/tools/w_pdist.py +515 -0
- westpa/cli/tools/w_postanalysis_matrix.py +82 -0
- westpa/cli/tools/w_postanalysis_reweight.py +53 -0
- westpa/cli/tools/w_red.py +491 -0
- westpa/cli/tools/w_reweight.py +780 -0
- westpa/cli/tools/w_select.py +226 -0
- westpa/cli/tools/w_stateprobs.py +111 -0
- westpa/cli/tools/w_trace.py +599 -0
- westpa/core/__init__.py +0 -0
- westpa/core/_rc.py +673 -0
- westpa/core/binning/__init__.py +55 -0
- westpa/core/binning/_assign.cpython-313-darwin.so +0 -0
- westpa/core/binning/assign.py +455 -0
- westpa/core/binning/binless.py +96 -0
- westpa/core/binning/binless_driver.py +54 -0
- westpa/core/binning/binless_manager.py +190 -0
- westpa/core/binning/bins.py +47 -0
- westpa/core/binning/mab.py +506 -0
- westpa/core/binning/mab_driver.py +54 -0
- westpa/core/binning/mab_manager.py +198 -0
- westpa/core/data_manager.py +1694 -0
- westpa/core/extloader.py +74 -0
- westpa/core/h5io.py +995 -0
- westpa/core/kinetics/__init__.py +24 -0
- westpa/core/kinetics/_kinetics.cpython-313-darwin.so +0 -0
- westpa/core/kinetics/events.py +147 -0
- westpa/core/kinetics/matrates.py +156 -0
- westpa/core/kinetics/rate_averaging.py +266 -0
- westpa/core/progress.py +218 -0
- westpa/core/propagators/__init__.py +54 -0
- westpa/core/propagators/executable.py +719 -0
- westpa/core/reweight/__init__.py +14 -0
- westpa/core/reweight/_reweight.cpython-313-darwin.so +0 -0
- westpa/core/reweight/matrix.py +126 -0
- westpa/core/segment.py +119 -0
- westpa/core/sim_manager.py +835 -0
- westpa/core/states.py +359 -0
- westpa/core/systems.py +93 -0
- westpa/core/textio.py +74 -0
- westpa/core/trajectory.py +330 -0
- westpa/core/we_driver.py +910 -0
- westpa/core/wm_ops.py +43 -0
- westpa/core/yamlcfg.py +391 -0
- westpa/fasthist/__init__.py +34 -0
- westpa/fasthist/_fasthist.cpython-313-darwin.so +0 -0
- westpa/mclib/__init__.py +271 -0
- westpa/mclib/__main__.py +28 -0
- westpa/mclib/_mclib.cpython-313-darwin.so +0 -0
- westpa/oldtools/__init__.py +4 -0
- westpa/oldtools/aframe/__init__.py +35 -0
- westpa/oldtools/aframe/atool.py +75 -0
- westpa/oldtools/aframe/base_mixin.py +26 -0
- westpa/oldtools/aframe/binning.py +178 -0
- westpa/oldtools/aframe/data_reader.py +560 -0
- westpa/oldtools/aframe/iter_range.py +200 -0
- westpa/oldtools/aframe/kinetics.py +117 -0
- westpa/oldtools/aframe/mcbs.py +153 -0
- westpa/oldtools/aframe/output.py +39 -0
- westpa/oldtools/aframe/plotting.py +90 -0
- westpa/oldtools/aframe/trajwalker.py +126 -0
- westpa/oldtools/aframe/transitions.py +469 -0
- westpa/oldtools/cmds/__init__.py +0 -0
- westpa/oldtools/cmds/w_ttimes.py +361 -0
- westpa/oldtools/files.py +34 -0
- westpa/oldtools/miscfn.py +23 -0
- westpa/oldtools/stats/__init__.py +4 -0
- westpa/oldtools/stats/accumulator.py +35 -0
- westpa/oldtools/stats/edfs.py +129 -0
- westpa/oldtools/stats/mcbs.py +96 -0
- westpa/tools/__init__.py +33 -0
- westpa/tools/binning.py +472 -0
- westpa/tools/core.py +340 -0
- westpa/tools/data_reader.py +159 -0
- westpa/tools/dtypes.py +31 -0
- westpa/tools/iter_range.py +198 -0
- westpa/tools/kinetics_tool.py +340 -0
- westpa/tools/plot.py +283 -0
- westpa/tools/progress.py +17 -0
- westpa/tools/selected_segs.py +154 -0
- westpa/tools/wipi.py +751 -0
- westpa/trajtree/__init__.py +4 -0
- westpa/trajtree/_trajtree.cpython-313-darwin.so +0 -0
- westpa/trajtree/trajtree.py +117 -0
- westpa/westext/__init__.py +0 -0
- westpa/westext/adaptvoronoi/__init__.py +3 -0
- westpa/westext/adaptvoronoi/adaptVor_driver.py +214 -0
- westpa/westext/hamsm_restarting/__init__.py +3 -0
- westpa/westext/hamsm_restarting/example_overrides.py +35 -0
- westpa/westext/hamsm_restarting/restart_driver.py +1165 -0
- westpa/westext/stringmethod/__init__.py +11 -0
- westpa/westext/stringmethod/fourier_fitting.py +69 -0
- westpa/westext/stringmethod/string_driver.py +253 -0
- westpa/westext/stringmethod/string_method.py +306 -0
- westpa/westext/weed/BinCluster.py +180 -0
- westpa/westext/weed/ProbAdjustEquil.py +100 -0
- westpa/westext/weed/UncertMath.py +247 -0
- westpa/westext/weed/__init__.py +10 -0
- westpa/westext/weed/weed_driver.py +192 -0
- westpa/westext/wess/ProbAdjust.py +101 -0
- westpa/westext/wess/__init__.py +6 -0
- westpa/westext/wess/wess_driver.py +217 -0
- westpa/work_managers/__init__.py +57 -0
- westpa/work_managers/core.py +396 -0
- westpa/work_managers/environment.py +134 -0
- westpa/work_managers/mpi.py +318 -0
- westpa/work_managers/processes.py +187 -0
- westpa/work_managers/serial.py +28 -0
- westpa/work_managers/threads.py +79 -0
- westpa/work_managers/zeromq/__init__.py +20 -0
- westpa/work_managers/zeromq/core.py +641 -0
- westpa/work_managers/zeromq/node.py +131 -0
- westpa/work_managers/zeromq/work_manager.py +526 -0
- westpa/work_managers/zeromq/worker.py +320 -0
- westpa-2022.12.dist-info/AUTHORS +22 -0
- westpa-2022.12.dist-info/LICENSE +21 -0
- westpa-2022.12.dist-info/METADATA +193 -0
- westpa-2022.12.dist-info/RECORD +149 -0
- westpa-2022.12.dist-info/WHEEL +6 -0
- westpa-2022.12.dist-info/entry_points.txt +29 -0
- westpa-2022.12.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,833 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import codecs
|
|
3
|
+
import hashlib
|
|
4
|
+
import os
|
|
5
|
+
import warnings
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
import westpa
|
|
10
|
+
from westpa.core import h5io
|
|
11
|
+
from westpa.cli.tools import w_assign, w_direct, w_reweight
|
|
12
|
+
|
|
13
|
+
from westpa.tools import WESTParallelTool, WESTDataReader, ProgressIndicatorComponent, Plotter
|
|
14
|
+
|
|
15
|
+
from westpa.tools import WIPIDataset, __get_data_for_iteration__, WIPIScheme
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
|
19
|
+
warnings.filterwarnings('ignore', category=RuntimeWarning)
|
|
20
|
+
warnings.filterwarnings('ignore', category=FutureWarning)
|
|
21
|
+
warnings.filterwarnings('ignore', category=ImportWarning)
|
|
22
|
+
warnings.filterwarnings('ignore')
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class WIPI(WESTParallelTool):
|
|
26
|
+
'''
|
|
27
|
+
Welcome to w_ipa (WESTPA Interactive Python Analysis)!
|
|
28
|
+
From here, you can run traces, look at weights, progress coordinates, etc.
|
|
29
|
+
This is considered a 'stateful' tool; that is, the data you are pulling is always pulled
|
|
30
|
+
from the current analysis scheme and iteration.
|
|
31
|
+
By default, the first analysis scheme in west.cfg is used, and you are set at iteration 1.
|
|
32
|
+
|
|
33
|
+
ALL PROPERTIES ARE ACCESSED VIA w or west
|
|
34
|
+
To see the current iteration, try:
|
|
35
|
+
|
|
36
|
+
w.iteration
|
|
37
|
+
OR
|
|
38
|
+
west.iteration
|
|
39
|
+
|
|
40
|
+
to set it, simply plug in a new value.
|
|
41
|
+
|
|
42
|
+
w.iteration = 100
|
|
43
|
+
|
|
44
|
+
To change/list the current analysis schemes:
|
|
45
|
+
|
|
46
|
+
w.list_schemes
|
|
47
|
+
w.scheme = OUTPUT FROM w.list_schemes
|
|
48
|
+
|
|
49
|
+
To see the states and bins defined in the current analysis scheme:
|
|
50
|
+
|
|
51
|
+
w.states
|
|
52
|
+
w.bin_labels
|
|
53
|
+
|
|
54
|
+
All information about the current iteration is available in an object called 'current':
|
|
55
|
+
|
|
56
|
+
w.current
|
|
57
|
+
walkers, summary, states, seg_id, weights, parents, kinavg, pcoord, bins, populations, and auxdata, if it exists.
|
|
58
|
+
|
|
59
|
+
In addition, the function w.trace(seg_id) will run a trace over a seg_id in the current iteration and return a dictionary
|
|
60
|
+
containing all pertinent information about that seg_id's history. It's best to store this, as the trace can be expensive.
|
|
61
|
+
|
|
62
|
+
Run help on any function or property for more information!
|
|
63
|
+
|
|
64
|
+
Happy analyzing!
|
|
65
|
+
|
|
66
|
+
'''
|
|
67
|
+
|
|
68
|
+
def __init__(self):
|
|
69
|
+
super().__init__()
|
|
70
|
+
self.data_reader = WESTDataReader()
|
|
71
|
+
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
|
|
72
|
+
self.progress = ProgressIndicatorComponent()
|
|
73
|
+
|
|
74
|
+
self._iter = 1
|
|
75
|
+
self.config_required = True
|
|
76
|
+
self.version = "1.0B"
|
|
77
|
+
# Set to matplotlib if you want that. But why would you?
|
|
78
|
+
# Well, whatever, we'll just set it to that for now.
|
|
79
|
+
self.interface = 'matplotlib'
|
|
80
|
+
self._scheme = None
|
|
81
|
+
global iteration
|
|
82
|
+
|
|
83
|
+
def add_args(self, parser):
|
|
84
|
+
self.progress.add_args(parser)
|
|
85
|
+
self.data_reader.add_args(parser)
|
|
86
|
+
rgroup = parser.add_argument_group('runtime options')
|
|
87
|
+
rgroup.add_argument(
|
|
88
|
+
'--analysis-only',
|
|
89
|
+
'-ao',
|
|
90
|
+
dest='analysis_mode',
|
|
91
|
+
action='store_true',
|
|
92
|
+
help='''Use this flag to run the analysis and return to the terminal.''',
|
|
93
|
+
)
|
|
94
|
+
rgroup.add_argument(
|
|
95
|
+
'--reanalyze',
|
|
96
|
+
'-ra',
|
|
97
|
+
dest='reanalyze',
|
|
98
|
+
action='store_true',
|
|
99
|
+
help='''Use this flag to delete the existing files and reanalyze.''',
|
|
100
|
+
)
|
|
101
|
+
rgroup.add_argument(
|
|
102
|
+
'--ignore-hash', '-ih', dest='ignore_hash', action='store_true', help='''Ignore hash and don't regenerate files.'''
|
|
103
|
+
)
|
|
104
|
+
rgroup.add_argument(
|
|
105
|
+
'--debug', '-d', dest='debug_mode', action='store_true', help='''Debug output largely intended for development.'''
|
|
106
|
+
)
|
|
107
|
+
rgroup.add_argument('--terminal', '-t', dest='plotting', action='store_true', help='''Plot output in terminal.''')
|
|
108
|
+
# There is almost certainly a better way to handle this, but we'll sort that later.
|
|
109
|
+
import argparse
|
|
110
|
+
|
|
111
|
+
rgroup.add_argument('--f', '-f', dest='extra', default='blah', help=argparse.SUPPRESS)
|
|
112
|
+
|
|
113
|
+
parser.set_defaults(compression=True)
|
|
114
|
+
|
|
115
|
+
def process_args(self, args):
|
|
116
|
+
self.progress.process_args(args)
|
|
117
|
+
self.data_reader.process_args(args)
|
|
118
|
+
with self.data_reader:
|
|
119
|
+
self.niters = self.data_reader.current_iteration - 1
|
|
120
|
+
self.__config = westpa.rc.config
|
|
121
|
+
self.__settings = self.__config['west']['analysis']
|
|
122
|
+
for ischeme, scheme in enumerate(self.__settings['analysis_schemes']):
|
|
123
|
+
if (
|
|
124
|
+
self.__settings['analysis_schemes'][scheme]['enabled'] is True
|
|
125
|
+
or self.__settings['analysis_schemes'][scheme]['enabled'] is None
|
|
126
|
+
):
|
|
127
|
+
self.scheme = scheme
|
|
128
|
+
self.data_args = args
|
|
129
|
+
self.analysis_mode = args.analysis_mode
|
|
130
|
+
self.reanalyze = args.reanalyze
|
|
131
|
+
self.ignore_hash = args.ignore_hash
|
|
132
|
+
self.debug_mode = args.debug_mode
|
|
133
|
+
if args.plotting:
|
|
134
|
+
self.interface = 'text'
|
|
135
|
+
|
|
136
|
+
def hash_args(self, args, extra=None, path=None):
|
|
137
|
+
'''Create unique hash stamp to determine if arguments/file is different from before.'''
|
|
138
|
+
'''Combine with iteration to know whether or not file needs updating.'''
|
|
139
|
+
# Why are we not loading this functionality into the individual tools?
|
|
140
|
+
# While it may certainly be useful to store arguments (and we may well do that),
|
|
141
|
+
# it's rather complex and nasty to deal with pickling and hashing arguments through
|
|
142
|
+
# the various namespaces.
|
|
143
|
+
# In addition, it's unlikely that the functionality is desired at the individual tool level,
|
|
144
|
+
# since we'll always just rewrite a file when we call the function.
|
|
145
|
+
# return hashlib.md5(pickle.dumps([args, extra])).hexdigest()
|
|
146
|
+
# We don't care about the path, so we'll remove it.
|
|
147
|
+
# Probably a better way to do this, but who cares.
|
|
148
|
+
cargs = list(args)
|
|
149
|
+
for iarg, arg in enumerate(cargs):
|
|
150
|
+
if path in arg:
|
|
151
|
+
cargs[iarg] = arg.replace(path, '').replace('/', '')
|
|
152
|
+
if arg == '--disable-averages':
|
|
153
|
+
cargs.remove('--disable-averages')
|
|
154
|
+
to_hash = cargs + [extra]
|
|
155
|
+
# print(args)
|
|
156
|
+
# print(to_hash)
|
|
157
|
+
# print(str(to_hash).encode('base64'))
|
|
158
|
+
if self.debug_mode:
|
|
159
|
+
for iarg, arg in enumerate(to_hash):
|
|
160
|
+
if isinstance(arg, list):
|
|
161
|
+
for il, l in enumerate(arg):
|
|
162
|
+
print('arg {num:02d} -- {arg:<20}'.format(num=il + iarg, arg=h5io.tostr(l)))
|
|
163
|
+
else:
|
|
164
|
+
print('arg {num:02d} -- {arg:<20}'.format(num=iarg, arg=h5io.tostr(arg)))
|
|
165
|
+
# print('args: {}'.format(to_hash))
|
|
166
|
+
# This SHOULD produce the same output, maybe? That would be nice, anyway.
|
|
167
|
+
# But we'll need to test it more.
|
|
168
|
+
return hashlib.md5(base64.b64encode(str(to_hash).encode())).hexdigest()
|
|
169
|
+
|
|
170
|
+
def stamp_hash(self, h5file_name, new_hash):
|
|
171
|
+
'''Loads a file, stamps it, and returns the opened file in read only'''
|
|
172
|
+
h5file = h5io.WESTPAH5File(h5file_name, 'r+')
|
|
173
|
+
h5file.attrs['arg_hash'] = new_hash
|
|
174
|
+
h5file.close()
|
|
175
|
+
h5file = h5io.WESTPAH5File(h5file_name, 'r')
|
|
176
|
+
return h5file
|
|
177
|
+
|
|
178
|
+
def analysis_structure(self):
|
|
179
|
+
'''
|
|
180
|
+
Run automatically on startup. Parses through the configuration file, and loads up all the data files from the different
|
|
181
|
+
analysis schematics. If they don't exist, it creates them automatically by hooking in to existing analysis routines
|
|
182
|
+
and going from there.
|
|
183
|
+
|
|
184
|
+
It does this by calling in the make_parser_and_process function for w_{assign,reweight,direct} using a custom built list
|
|
185
|
+
of args. The user can specify everything in the configuration file that would have been specified on the command line.
|
|
186
|
+
|
|
187
|
+
For instance, were one to call w_direct as follows:
|
|
188
|
+
|
|
189
|
+
w_direct --evolution cumulative --step-iter 1 --disable-correl
|
|
190
|
+
|
|
191
|
+
the west.cfg would look as follows:
|
|
192
|
+
|
|
193
|
+
west:
|
|
194
|
+
analysis:
|
|
195
|
+
w_direct:
|
|
196
|
+
evolution: cumulative
|
|
197
|
+
step_iter: 1
|
|
198
|
+
extra: ['disable-correl']
|
|
199
|
+
|
|
200
|
+
Alternatively, if one wishes to use the same options for both w_direct and w_reweight, the key 'w_direct' can be replaced
|
|
201
|
+
with 'kinetics'.
|
|
202
|
+
'''
|
|
203
|
+
# Make sure everything exists.
|
|
204
|
+
try:
|
|
205
|
+
os.mkdir(self.__settings['directory'])
|
|
206
|
+
except Exception:
|
|
207
|
+
pass
|
|
208
|
+
# Now, check to see whether they exist, and then load them.
|
|
209
|
+
self.__analysis_schemes__ = {}
|
|
210
|
+
# We really need to implement some sort of default behavior if an analysis scheme isn't set.
|
|
211
|
+
# Right now, we just crash. That isn't really graceful.
|
|
212
|
+
for scheme in self.__settings['analysis_schemes']:
|
|
213
|
+
if self.__settings['analysis_schemes'][scheme]['enabled']:
|
|
214
|
+
if self.work_manager.running is False:
|
|
215
|
+
self.work_manager.startup()
|
|
216
|
+
path = os.path.join(os.getcwd(), self.__settings['directory'], scheme)
|
|
217
|
+
# if 'postanalysis' in self.__settings['analysis_schemes'][scheme] and 'postanalysis' in self.__settings['postanalysis']:
|
|
218
|
+
# Should clean this up. But it uses the default global setting if a by-scheme one isn't set.
|
|
219
|
+
if 'postanalysis' in self.__settings:
|
|
220
|
+
if 'postanalysis' in self.__settings['analysis_schemes'][scheme]:
|
|
221
|
+
pass
|
|
222
|
+
else:
|
|
223
|
+
self.__settings['analysis_schemes'][scheme]['postanalysis'] = self.__settings['postanalysis']
|
|
224
|
+
try:
|
|
225
|
+
os.mkdir(path)
|
|
226
|
+
except Exception:
|
|
227
|
+
pass
|
|
228
|
+
self.__analysis_schemes__[scheme] = {}
|
|
229
|
+
try:
|
|
230
|
+
if (
|
|
231
|
+
self.__settings['analysis_schemes'][scheme]['postanalysis'] is True
|
|
232
|
+
or self.__settings['postanalysis'] is True
|
|
233
|
+
):
|
|
234
|
+
analysis_files = ['assign', 'direct', 'reweight']
|
|
235
|
+
else:
|
|
236
|
+
analysis_files = ['assign', 'direct']
|
|
237
|
+
except Exception:
|
|
238
|
+
analysis_files = ['assign', 'direct']
|
|
239
|
+
self.__settings['analysis_schemes'][scheme]['postanalysis'] = False
|
|
240
|
+
reanalyze_kinetics = False
|
|
241
|
+
assign_hash = None
|
|
242
|
+
for name in analysis_files:
|
|
243
|
+
arg_hash = None
|
|
244
|
+
if self.reanalyze is True:
|
|
245
|
+
reanalyze_kinetics = True
|
|
246
|
+
try:
|
|
247
|
+
os.remove(os.path.join(path, '{}.h5'.format(name)))
|
|
248
|
+
except Exception:
|
|
249
|
+
pass
|
|
250
|
+
else:
|
|
251
|
+
try:
|
|
252
|
+
# Try to load the hash. If we fail to load the hash or the file, we need to reload.
|
|
253
|
+
# if self.reanalyze == True:
|
|
254
|
+
# raise ValueError('Reanalyze set to true.')
|
|
255
|
+
self.__analysis_schemes__[scheme][name] = h5io.WESTPAH5File(
|
|
256
|
+
os.path.join(path, '{}.h5'.format(name)), 'r'
|
|
257
|
+
)
|
|
258
|
+
arg_hash = self.__analysis_schemes__[scheme][name].attrs['arg_hash']
|
|
259
|
+
if name == 'assign':
|
|
260
|
+
assign_hash = arg_hash
|
|
261
|
+
except Exception:
|
|
262
|
+
pass
|
|
263
|
+
# We shouldn't rely on this.
|
|
264
|
+
# self.reanalyze = True
|
|
265
|
+
if True:
|
|
266
|
+
if name == 'assign':
|
|
267
|
+
assign = w_assign.WAssign()
|
|
268
|
+
|
|
269
|
+
w_assign_config = {'output': os.path.join(path, '{}.h5'.format(name))}
|
|
270
|
+
try:
|
|
271
|
+
w_assign_config.update(self.__settings['w_assign'])
|
|
272
|
+
except Exception:
|
|
273
|
+
pass
|
|
274
|
+
try:
|
|
275
|
+
w_assign_config.update(self.__settings['analysis_schemes'][scheme]['w_assign'])
|
|
276
|
+
except Exception:
|
|
277
|
+
pass
|
|
278
|
+
args = []
|
|
279
|
+
for key, value in w_assign_config.items():
|
|
280
|
+
if key != 'extra':
|
|
281
|
+
args.append(str('--') + str(key).replace('_', '-'))
|
|
282
|
+
args.append(str(value))
|
|
283
|
+
# This is for stuff like disabling correlation analysis, etc.
|
|
284
|
+
if 'extra' in list(w_assign_config.keys()):
|
|
285
|
+
# We're sorting to ensure that the order doesn't matter.
|
|
286
|
+
for value in sorted(w_assign_config['extra']):
|
|
287
|
+
args.append(str('--') + str(value).replace('_', '-'))
|
|
288
|
+
# We're just calling the built in function.
|
|
289
|
+
# This is a lot cleaner than what we had in before, and far more workable.
|
|
290
|
+
args.append('--config-from-file')
|
|
291
|
+
args.append('--scheme-name')
|
|
292
|
+
args.append('{}'.format(scheme))
|
|
293
|
+
# Why are we calling this if we're not sure we're remaking the file?
|
|
294
|
+
# We need to load up the bin mapper and states and see if they're the same.
|
|
295
|
+
assign.make_parser_and_process(args=args)
|
|
296
|
+
import pickle
|
|
297
|
+
|
|
298
|
+
# new_hash = self.hash_args(args=args, path=path, extra=[self.niters, pickle.dumps(assign.binning.mapper), assign.states])
|
|
299
|
+
# We need to encode it properly to ensure that some OS specific thing doesn't kill us. Same goes for the args, ultimately.
|
|
300
|
+
# Mostly, we just need to ensure that we're consistent.
|
|
301
|
+
new_hash = self.hash_args(
|
|
302
|
+
args=args,
|
|
303
|
+
path=path,
|
|
304
|
+
extra=[
|
|
305
|
+
int(self.niters),
|
|
306
|
+
codecs.encode(pickle.dumps(assign.binning.mapper), "base64"),
|
|
307
|
+
base64.b64encode(str(assign.states).encode()),
|
|
308
|
+
],
|
|
309
|
+
)
|
|
310
|
+
# Let's check the hash. If the hash is the same, we don't need to reload.
|
|
311
|
+
if self.debug_mode is True:
|
|
312
|
+
print('{:<10}: old hash, new hash -- {}, {}'.format(name, arg_hash, new_hash))
|
|
313
|
+
if self.ignore_hash is False and (arg_hash != new_hash or self.reanalyze is True):
|
|
314
|
+
# If the hashes are different, or we need to reanalyze, delete the file.
|
|
315
|
+
try:
|
|
316
|
+
os.remove(os.path.join(path, '{}.h5'.format(name)))
|
|
317
|
+
except Exception:
|
|
318
|
+
pass
|
|
319
|
+
print('Reanalyzing file {}.h5 for scheme {}.'.format(name, scheme))
|
|
320
|
+
# reanalyze_kinetics = True
|
|
321
|
+
# We want to use the work manager we have here. Otherwise, just let the tool sort out what it needs, honestly.
|
|
322
|
+
assign.work_manager = self.work_manager
|
|
323
|
+
|
|
324
|
+
assign.go()
|
|
325
|
+
assign.data_reader.close()
|
|
326
|
+
|
|
327
|
+
# Stamp w/ hash, then reload as read only.
|
|
328
|
+
self.__analysis_schemes__[scheme][name] = self.stamp_hash(
|
|
329
|
+
os.path.join(path, '{}.h5'.format(name)), new_hash
|
|
330
|
+
)
|
|
331
|
+
del assign
|
|
332
|
+
# Update the assignment hash.
|
|
333
|
+
assign_hash = new_hash
|
|
334
|
+
|
|
335
|
+
# Since these are all contained within one tool, now, we want it to just... load everything.
|
|
336
|
+
if name == 'direct' or name == 'reweight':
|
|
337
|
+
if name == 'direct':
|
|
338
|
+
analysis = w_direct.WDirect()
|
|
339
|
+
if name == 'reweight':
|
|
340
|
+
analysis = w_reweight.WReweight()
|
|
341
|
+
|
|
342
|
+
analysis_config = {
|
|
343
|
+
'assignments': os.path.join(path, '{}.h5'.format('assign')),
|
|
344
|
+
'output': os.path.join(path, '{}.h5'.format(name)),
|
|
345
|
+
'kinetics': os.path.join(path, '{}.h5'.format(name)),
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
# Pull from general analysis options, then general SPECIFIC options for each analysis,
|
|
349
|
+
# then general options for that analysis scheme, then specific options for the analysis type in the scheme.
|
|
350
|
+
|
|
351
|
+
try:
|
|
352
|
+
analysis_config.update(self.__settings['kinetics'])
|
|
353
|
+
except Exception:
|
|
354
|
+
pass
|
|
355
|
+
try:
|
|
356
|
+
analysis_config.update(self.__settings['w_{}'.format(name)])
|
|
357
|
+
except Exception:
|
|
358
|
+
pass
|
|
359
|
+
try:
|
|
360
|
+
analysis_config.update(self.__settings['analysis_schemes'][scheme]['kinetics'])
|
|
361
|
+
except Exception:
|
|
362
|
+
pass
|
|
363
|
+
try:
|
|
364
|
+
analysis_config.update(self.__settings['analysis_schemes'][scheme]['w_{}'.format(name)])
|
|
365
|
+
except Exception:
|
|
366
|
+
pass
|
|
367
|
+
|
|
368
|
+
# We're pulling in a default set of arguments, then updating them with arguments from the west.cfg file, if appropriate, after setting the appropriate command
|
|
369
|
+
# Then, we call the magic function 'make_parser_and_process' with the arguments we've pulled in.
|
|
370
|
+
# The tool has no real idea it's being called outside of its actual function, and we're good to go.
|
|
371
|
+
args = ['all']
|
|
372
|
+
for key, value in analysis_config.items():
|
|
373
|
+
if key != 'extra':
|
|
374
|
+
args.append(str('--') + str(key).replace('_', '-'))
|
|
375
|
+
args.append(str(value))
|
|
376
|
+
# This is for stuff like disabling correlation analysis, etc.
|
|
377
|
+
if 'extra' in list(analysis_config.keys()):
|
|
378
|
+
for value in sorted(analysis_config['extra']):
|
|
379
|
+
args.append(str('--') + str(value).replace('_', '-'))
|
|
380
|
+
# We want to not display the averages, so...
|
|
381
|
+
args.append('--disable-averages')
|
|
382
|
+
new_hash = self.hash_args(args=args, path=path, extra=[int(self.niters), assign_hash])
|
|
383
|
+
# if arg_hash != new_hash or self.reanalyze == True or reanalyze_kinetics == True:
|
|
384
|
+
if self.debug_mode is True:
|
|
385
|
+
print('{:<10}: old hash, new hash -- {}, {}'.format(name, arg_hash, new_hash))
|
|
386
|
+
if self.ignore_hash is False and (arg_hash != new_hash or reanalyze_kinetics is True):
|
|
387
|
+
try:
|
|
388
|
+
os.remove(os.path.join(path, '{}.h5'.format(name)))
|
|
389
|
+
except Exception:
|
|
390
|
+
pass
|
|
391
|
+
print('Reanalyzing file {}.h5 for scheme {}.'.format(name, scheme))
|
|
392
|
+
analysis.make_parser_and_process(args=args)
|
|
393
|
+
# We want to hook into the existing work manager.
|
|
394
|
+
analysis.work_manager = self.work_manager
|
|
395
|
+
|
|
396
|
+
analysis.go()
|
|
397
|
+
|
|
398
|
+
# Open!
|
|
399
|
+
self.__analysis_schemes__[scheme][name] = self.stamp_hash(
|
|
400
|
+
os.path.join(path, '{}.h5'.format(name)), new_hash
|
|
401
|
+
)
|
|
402
|
+
del analysis
|
|
403
|
+
|
|
404
|
+
# Make sure this doesn't get too far out, here. We need to keep it alive as long as we're actually analyzing things.
|
|
405
|
+
# self.work_manager.shutdown()
|
|
406
|
+
print("")
|
|
407
|
+
print("Complete!")
|
|
408
|
+
|
|
409
|
+
@property
|
|
410
|
+
def assign(self):
|
|
411
|
+
return self.__analysis_schemes__[str(self.scheme)]['assign']
|
|
412
|
+
|
|
413
|
+
@property
|
|
414
|
+
def direct(self):
|
|
415
|
+
"""
|
|
416
|
+
The output from w_kinavg.py from the current scheme.
|
|
417
|
+
"""
|
|
418
|
+
return self.__analysis_schemes__[str(self.scheme)]['direct']
|
|
419
|
+
|
|
420
|
+
@property
|
|
421
|
+
def state_labels(self):
|
|
422
|
+
print("State labels and definitions!")
|
|
423
|
+
for istate, state in enumerate(self.assign['state_labels']):
|
|
424
|
+
print('{}: {}'.format(istate, state))
|
|
425
|
+
print('{}: {}'.format(istate + 1, 'Unknown'))
|
|
426
|
+
|
|
427
|
+
@property
|
|
428
|
+
def bin_labels(self):
|
|
429
|
+
print("Bin definitions! ")
|
|
430
|
+
for istate, state in enumerate(self.assign['bin_labels']):
|
|
431
|
+
print('{}: {}'.format(istate, state))
|
|
432
|
+
|
|
433
|
+
@property
|
|
434
|
+
def west(self):
|
|
435
|
+
return self.data_reader.data_manager.we_h5file
|
|
436
|
+
|
|
437
|
+
@property
|
|
438
|
+
def reweight(self):
|
|
439
|
+
if self.__settings['analysis_schemes'][str(self.scheme)]['postanalysis'] is True:
|
|
440
|
+
return self.__analysis_schemes__[str(self.scheme)]['reweight']
|
|
441
|
+
else:
|
|
442
|
+
value = "This sort of analysis has not been enabled."
|
|
443
|
+
current = {
|
|
444
|
+
'bin_prob_evolution': value,
|
|
445
|
+
'color_prob_evolution': value,
|
|
446
|
+
'conditional_flux_evolution': value,
|
|
447
|
+
'rate_evolution': value,
|
|
448
|
+
'state_labels': value,
|
|
449
|
+
'state_prob_evolution': value,
|
|
450
|
+
}
|
|
451
|
+
current.update({'bin_populations': value, 'iterations': value})
|
|
452
|
+
return current
|
|
453
|
+
|
|
454
|
+
@property
|
|
455
|
+
def scheme(self):
|
|
456
|
+
'''
|
|
457
|
+
Returns and sets what scheme is currently in use.
|
|
458
|
+
To see what schemes are available, run:
|
|
459
|
+
|
|
460
|
+
w.list_schemes
|
|
461
|
+
|
|
462
|
+
'''
|
|
463
|
+
# Let's do this a few different ways.
|
|
464
|
+
# We want to return things about the DIFFERENT schemes, if possible.
|
|
465
|
+
if self._scheme is None:
|
|
466
|
+
self._scheme = WIPIScheme(
|
|
467
|
+
scheme=self.__analysis_schemes__, name=self._schemename, parent=self, settings=self.__settings
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# This just ensures that when we call it, it's clean.
|
|
471
|
+
self._scheme.name = None
|
|
472
|
+
return self._scheme
|
|
473
|
+
|
|
474
|
+
@scheme.setter
|
|
475
|
+
def scheme(self, scheme):
|
|
476
|
+
self._future = None
|
|
477
|
+
self._current = None
|
|
478
|
+
self._past = None
|
|
479
|
+
if scheme in self.__settings['analysis_schemes']:
|
|
480
|
+
pass
|
|
481
|
+
else:
|
|
482
|
+
for ischeme, schemename in enumerate(self.__settings['analysis_schemes']):
|
|
483
|
+
if ischeme == scheme:
|
|
484
|
+
scheme = schemename
|
|
485
|
+
if (
|
|
486
|
+
self.__settings['analysis_schemes'][scheme]['enabled'] is True
|
|
487
|
+
or self.__settings['analysis_schemes'][scheme]['enabled'] is None
|
|
488
|
+
):
|
|
489
|
+
self._schemename = scheme
|
|
490
|
+
else:
|
|
491
|
+
print("Scheme cannot be changed to scheme: {}; it is not enabled!".format(scheme))
|
|
492
|
+
|
|
493
|
+
@property
|
|
494
|
+
def list_schemes(self):
|
|
495
|
+
'''
|
|
496
|
+
Lists what schemes are configured in west.cfg file.
|
|
497
|
+
Schemes should be structured as follows, in west.cfg:
|
|
498
|
+
|
|
499
|
+
west:
|
|
500
|
+
system:
|
|
501
|
+
analysis:
|
|
502
|
+
directory: analysis
|
|
503
|
+
analysis_schemes:
|
|
504
|
+
scheme.1:
|
|
505
|
+
enabled: True
|
|
506
|
+
states:
|
|
507
|
+
- label: unbound
|
|
508
|
+
coords: [[7.0]]
|
|
509
|
+
- label: bound
|
|
510
|
+
coords: [[2.7]]
|
|
511
|
+
bins:
|
|
512
|
+
- type: RectilinearBinMapper
|
|
513
|
+
boundaries: [[0.0, 2.80, 7, 10000]]
|
|
514
|
+
'''
|
|
515
|
+
# print("The following schemes are available:")
|
|
516
|
+
# print("")
|
|
517
|
+
# for ischeme, scheme in enumerate(self.__settings['analysis_schemes']):
|
|
518
|
+
# print('{}. Scheme: {}'.format(ischeme, scheme))
|
|
519
|
+
# print("")
|
|
520
|
+
# print("Set via name, or via the index listed.")
|
|
521
|
+
# print("")
|
|
522
|
+
# print("Current scheme: {}".format(self.scheme))
|
|
523
|
+
self._scheme.list_schemes
|
|
524
|
+
|
|
525
|
+
@property
|
|
526
|
+
def iteration(self):
|
|
527
|
+
'''
|
|
528
|
+
Returns/sets the current iteration.
|
|
529
|
+
'''
|
|
530
|
+
# print("The current iteration is {}".format(self._iter))
|
|
531
|
+
return self._iter
|
|
532
|
+
|
|
533
|
+
@iteration.setter
|
|
534
|
+
def iteration(self, value):
|
|
535
|
+
print("Setting iteration to iter {}.".format(value))
|
|
536
|
+
if value <= 0:
|
|
537
|
+
print("Iteration must begin at 1.")
|
|
538
|
+
value = 1
|
|
539
|
+
if value > self.niters:
|
|
540
|
+
print("Cannot go beyond {} iterations!".format(self.niters))
|
|
541
|
+
print("Setting to {}".format(self.niters))
|
|
542
|
+
value = self.niters
|
|
543
|
+
# We want to trigger a rebuild on our current/past/future bits.
|
|
544
|
+
# The scheme should automatically reset to the proper iteration, but
|
|
545
|
+
# future needs to be manually triggered.
|
|
546
|
+
self._iter = value
|
|
547
|
+
self._future = None
|
|
548
|
+
return self._iter
|
|
549
|
+
|
|
550
|
+
@property
|
|
551
|
+
def current(self):
|
|
552
|
+
'''
|
|
553
|
+
The current iteration. See help for __get_data_for_iteration__
|
|
554
|
+
'''
|
|
555
|
+
return self.scheme[self.scheme.scheme].current
|
|
556
|
+
|
|
557
|
+
@property
|
|
558
|
+
def past(self):
|
|
559
|
+
'''
|
|
560
|
+
The previous iteration. See help for __get_data_for_iteration__
|
|
561
|
+
'''
|
|
562
|
+
return self.scheme[self.scheme.scheme].past
|
|
563
|
+
|
|
564
|
+
def trace(self, seg_id):
|
|
565
|
+
'''
|
|
566
|
+
Runs a trace on a seg_id within the current iteration, all the way back to the beginning,
|
|
567
|
+
returning a dictionary containing all interesting information:
|
|
568
|
+
|
|
569
|
+
seg_id, pcoord, states, bins, weights, iteration, auxdata (optional)
|
|
570
|
+
|
|
571
|
+
sorted in chronological order.
|
|
572
|
+
|
|
573
|
+
|
|
574
|
+
Call with a seg_id.
|
|
575
|
+
'''
|
|
576
|
+
if seg_id >= self.current.walkers:
|
|
577
|
+
print("Walker seg_id # {} is beyond the max count of {} walkers.".format(seg_id, self.current.walkers))
|
|
578
|
+
return 1
|
|
579
|
+
pi = self.progress.indicator
|
|
580
|
+
with pi:
|
|
581
|
+
pi.new_operation('Tracing scheme:iter:seg_id {}:{}:{}'.format(self.scheme, self.iteration, seg_id), self.iteration)
|
|
582
|
+
current = {'seg_id': [], 'pcoord': [], 'states': [], 'weights': [], 'iteration': [], 'bins': []}
|
|
583
|
+
keys = []
|
|
584
|
+
try:
|
|
585
|
+
current['auxdata'] = {}
|
|
586
|
+
for key in list(self.current['auxdata'].keys()):
|
|
587
|
+
current['auxdata'][key] = []
|
|
588
|
+
key = []
|
|
589
|
+
except Exception:
|
|
590
|
+
pass
|
|
591
|
+
for iter in reversed(list(range(1, self.iteration + 1))):
|
|
592
|
+
iter_group = self.data_reader.get_iter_group(iter)
|
|
593
|
+
current['pcoord'].append(iter_group['pcoord'][seg_id, :, :])
|
|
594
|
+
current['states'].append(self.assign['trajlabels'][iter - 1, seg_id, :])
|
|
595
|
+
current['bins'].append(self.assign['assignments'][iter - 1, seg_id, :])
|
|
596
|
+
current['seg_id'].append(seg_id)
|
|
597
|
+
current['weights'].append(iter_group['seg_index']['weight'][seg_id])
|
|
598
|
+
current['iteration'].append(iter)
|
|
599
|
+
try:
|
|
600
|
+
for key in keys:
|
|
601
|
+
current['auxdata'][key].append(iter_group['auxdata'][key][seg_id])
|
|
602
|
+
except Exception:
|
|
603
|
+
pass
|
|
604
|
+
seg_id = iter_group['seg_index']['parent_id'][seg_id]
|
|
605
|
+
if seg_id < 0:
|
|
606
|
+
# Necessary for steady state simulations. This means they started in that iteration.
|
|
607
|
+
break
|
|
608
|
+
pi.progress += 1
|
|
609
|
+
current['seg_id'] = list(reversed(current['seg_id']))
|
|
610
|
+
current['iteration'] = list(reversed(current['iteration']))
|
|
611
|
+
current['states'] = np.concatenate(np.array(list(reversed(current['states']))))
|
|
612
|
+
current['bins'] = np.concatenate(np.array(list(reversed(current['bins']))))
|
|
613
|
+
current['weights'] = np.array(list(reversed(current['weights'])))
|
|
614
|
+
current['pcoord'] = np.concatenate(np.array(list(reversed(current['pcoord']))))
|
|
615
|
+
try:
|
|
616
|
+
for key in keys():
|
|
617
|
+
current['auxdata'][key] = np.concatenate(np.array(list(reversed(current['auxdata'][key]))))
|
|
618
|
+
except Exception:
|
|
619
|
+
pass
|
|
620
|
+
current['state_labels'] = self.assign['state_labels']
|
|
621
|
+
for i in ['pcoord', 'states', 'bins', 'weights']:
|
|
622
|
+
current[i] = WIPIDataset(raw=current[i], key=i)
|
|
623
|
+
if i == 'weights':
|
|
624
|
+
current[i].plotter = Plotter(
|
|
625
|
+
np.log10(current[i].raw), str('log10 of ' + str(i)), iteration=current[i].raw.shape[0], interface=self.interface
|
|
626
|
+
)
|
|
627
|
+
else:
|
|
628
|
+
current[i].plotter = Plotter(current[i].raw, i, iteration=current[i].raw.shape[0], interface=self.interface)
|
|
629
|
+
current[i].plot = current[i].plotter.plot
|
|
630
|
+
return WIPIDataset(raw=current, key=seg_id)
|
|
631
|
+
|
|
632
|
+
@property
|
|
633
|
+
def future(self, value=None):
|
|
634
|
+
'''
|
|
635
|
+
Similar to current/past, but keyed differently and returns different datasets.
|
|
636
|
+
See help for Future.
|
|
637
|
+
'''
|
|
638
|
+
if self._future is None:
|
|
639
|
+
self._future = self.Future(raw=self.__get_children__(), key=None)
|
|
640
|
+
self._future.iteration = self.iteration + 1
|
|
641
|
+
return self._future
|
|
642
|
+
|
|
643
|
+
class Future(WIPIDataset):
|
|
644
|
+
# This isn't a real fancy one.
|
|
645
|
+
def __getitem__(self, value):
|
|
646
|
+
if isinstance(value, str):
|
|
647
|
+
print(list(self.__dict__.keys()))
|
|
648
|
+
try:
|
|
649
|
+
return self.__dict__['raw'][value]
|
|
650
|
+
except Exception:
|
|
651
|
+
print('{} is not a valid data structure.'.format(value))
|
|
652
|
+
elif isinstance(value, int) or isinstance(value, np.int64):
|
|
653
|
+
# Otherwise, we assume they're trying to index for a seg_id.
|
|
654
|
+
# if value < self.parent.walkers:
|
|
655
|
+
current = {}
|
|
656
|
+
current['pcoord'] = self.__dict__['raw']['pcoord'][value]
|
|
657
|
+
current['states'] = self.__dict__['raw']['states'][value]
|
|
658
|
+
current['bins'] = self.__dict__['raw']['bins'][value]
|
|
659
|
+
current['parents'] = self.__dict__['raw']['parents'][value]
|
|
660
|
+
current['seg_id'] = self.__dict__['raw']['seg_id'][value]
|
|
661
|
+
current['weights'] = self.__dict__['raw']['weights'][value]
|
|
662
|
+
try:
|
|
663
|
+
current['auxdata'] = {}
|
|
664
|
+
for key in list(self.__dict__['raw']['auxdata'].keys()):
|
|
665
|
+
current['auxdata'][key] = self.__dict__['raw']['auxdata'][key][value]
|
|
666
|
+
except Exception:
|
|
667
|
+
pass
|
|
668
|
+
current = WIPIDataset(current, 'Segment {} in Iter {}'.format(value, self.iteration))
|
|
669
|
+
return current
|
|
670
|
+
|
|
671
|
+
def __get_children__(self):
|
|
672
|
+
'''
|
|
673
|
+
Returns all information about the children of a given walker in the current iteration.
|
|
674
|
+
Used to generate and create the future object, if necessary.
|
|
675
|
+
'''
|
|
676
|
+
|
|
677
|
+
if self.iteration == self.niters:
|
|
678
|
+
print("Currently at iteration {}, which is the max. There are no children!".format(self.iteration))
|
|
679
|
+
return 0
|
|
680
|
+
iter_data = __get_data_for_iteration__(value=self.iteration + 1, parent=self)
|
|
681
|
+
future = {
|
|
682
|
+
'weights': [],
|
|
683
|
+
'pcoord': [],
|
|
684
|
+
'parents': [],
|
|
685
|
+
'summary': iter_data['summary'],
|
|
686
|
+
'seg_id': [],
|
|
687
|
+
'walkers': iter_data['walkers'],
|
|
688
|
+
'states': [],
|
|
689
|
+
'bins': [],
|
|
690
|
+
}
|
|
691
|
+
for seg_id in range(0, self.current.walkers):
|
|
692
|
+
children = np.where(iter_data['parents'] == seg_id)[0]
|
|
693
|
+
if len(children) == 0:
|
|
694
|
+
error = "No children for seg_id {}.".format(seg_id)
|
|
695
|
+
future['weights'].append(error)
|
|
696
|
+
future['pcoord'].append(error)
|
|
697
|
+
future['parents'].append(error)
|
|
698
|
+
future['seg_id'].append(error)
|
|
699
|
+
future['states'].append(error)
|
|
700
|
+
future['bins'].append(error)
|
|
701
|
+
else:
|
|
702
|
+
# Now, we're gonna put them in the thing.
|
|
703
|
+
value = self.iteration + 1
|
|
704
|
+
future['weights'].append(iter_data['weights'][children])
|
|
705
|
+
future['pcoord'].append(iter_data['pcoord'][...][children, :, :])
|
|
706
|
+
try:
|
|
707
|
+
aux_data = iter_data['auxdata'][...][children, :, :]
|
|
708
|
+
try:
|
|
709
|
+
future['aux_data'].append(aux_data)
|
|
710
|
+
except Exception:
|
|
711
|
+
future['aux_data'] = aux_data
|
|
712
|
+
except Exception:
|
|
713
|
+
pass
|
|
714
|
+
future['parents'].append(iter_data['parents'][children])
|
|
715
|
+
future['seg_id'].append(iter_data['seg_id'][children])
|
|
716
|
+
future['states'].append(self.assign['trajlabels'][value - 1, children, :])
|
|
717
|
+
future['bins'].append(self.assign['assignments'][value - 1, children, :])
|
|
718
|
+
return future
|
|
719
|
+
|
|
720
|
+
def go(self):
|
|
721
|
+
'''
|
|
722
|
+
Function automatically called by main() when launched via the command line interface.
|
|
723
|
+
Generally, call main, not this function.
|
|
724
|
+
'''
|
|
725
|
+
w = self
|
|
726
|
+
|
|
727
|
+
print("")
|
|
728
|
+
print("Welcome to w_ipa (WESTPA Interactive Python Analysis) v. {}!".format(w.version))
|
|
729
|
+
print("Run w.introduction for a more thorough introduction, or w.help to see a list of options.")
|
|
730
|
+
print("Running analysis & loading files.")
|
|
731
|
+
self.data_reader.open()
|
|
732
|
+
self.analysis_structure()
|
|
733
|
+
# Seems to be consistent with other tools, such as w_assign. For setting the iterations.
|
|
734
|
+
self.data_reader.open()
|
|
735
|
+
self.niters = self.data_reader.current_iteration - 1
|
|
736
|
+
self.iteration = self.niters
|
|
737
|
+
try:
|
|
738
|
+
print('Your current scheme, system and iteration are : {}, {}, {}'.format(w.scheme, os.getcwd(), w.iteration))
|
|
739
|
+
except Exception:
|
|
740
|
+
pass
|
|
741
|
+
|
|
742
|
+
@property
|
|
743
|
+
def introduction(self):
|
|
744
|
+
'''
|
|
745
|
+
Just spits out an introduction, in case someone doesn't call help.
|
|
746
|
+
'''
|
|
747
|
+
help_string = '''
|
|
748
|
+
Call as a dictionary item or a .attribute:
|
|
749
|
+
|
|
750
|
+
w.past, w.current, w.future:
|
|
751
|
+
|
|
752
|
+
{current}
|
|
753
|
+
|
|
754
|
+
Raw schemes can be accessed as follows:
|
|
755
|
+
|
|
756
|
+
w.scheme.{scheme_keys}
|
|
757
|
+
|
|
758
|
+
and contain mostly the same datasets associated with w.
|
|
759
|
+
|
|
760
|
+
The following give raw access to the h5 files associated with the current scheme
|
|
761
|
+
|
|
762
|
+
w.west
|
|
763
|
+
w.assign
|
|
764
|
+
w.direct
|
|
765
|
+
w.reweight
|
|
766
|
+
|
|
767
|
+
OTHER:
|
|
768
|
+
|
|
769
|
+
{w}
|
|
770
|
+
|
|
771
|
+
'''.format(
|
|
772
|
+
current=self.__format_keys__(self.current.__dir__(), split=' ', offset=12),
|
|
773
|
+
scheme_keys=self.__format_keys__(list(self._scheme.raw.keys())),
|
|
774
|
+
w=self.__format_keys__(self.__dir__(), offset=8, max_length=0, split='', prepend='w.'),
|
|
775
|
+
)
|
|
776
|
+
print(help_string)
|
|
777
|
+
|
|
778
|
+
# Just a little function to be used with the introduction.
|
|
779
|
+
def __format_keys__(self, keys, split='/', offset=0, max_length=80, prepend=''):
|
|
780
|
+
rtn = ''
|
|
781
|
+
run_length = 0
|
|
782
|
+
for key in keys:
|
|
783
|
+
rtn += prepend + str(key) + split
|
|
784
|
+
run_length += len(str(key))
|
|
785
|
+
if run_length >= max_length:
|
|
786
|
+
run_length = offset
|
|
787
|
+
rtn += '\n' + ' ' * offset
|
|
788
|
+
if rtn[-1] == split:
|
|
789
|
+
return rtn[:-1]
|
|
790
|
+
else:
|
|
791
|
+
return rtn
|
|
792
|
+
|
|
793
|
+
@property
|
|
794
|
+
def help(self):
|
|
795
|
+
'''Just a minor function to call help on itself. Only in here to really help someone get help.'''
|
|
796
|
+
help(self)
|
|
797
|
+
|
|
798
|
+
def _repr_pretty_(self, p, cycle):
|
|
799
|
+
self.introduction
|
|
800
|
+
return " "
|
|
801
|
+
|
|
802
|
+
def __dir__(self):
|
|
803
|
+
return_list = ['past', 'current', 'future']
|
|
804
|
+
# For the moment, don't expose direct, reweight, or assign, as these are scheme dependent files.
|
|
805
|
+
# They do exist, and always link to the current scheme, however.
|
|
806
|
+
return_list += ['iteration', 'niters', 'scheme', 'list_schemes', 'bin_labels', 'state_labels', 'west', 'trace']
|
|
807
|
+
return sorted(set(return_list))
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
def entry_point():
|
|
811
|
+
west = WIPI()
|
|
812
|
+
w = west
|
|
813
|
+
# We're gonna print some defaults.
|
|
814
|
+
w.main()
|
|
815
|
+
if w.analysis_mode is False:
|
|
816
|
+
from IPython import embed
|
|
817
|
+
import IPython
|
|
818
|
+
|
|
819
|
+
# We're using this to set magic commands.
|
|
820
|
+
# Mostly, we're using it to allow tab completion of objects stored in dictionaries.
|
|
821
|
+
try:
|
|
822
|
+
# Worked on MacOS. Probably just an older version.
|
|
823
|
+
c = IPython.Config()
|
|
824
|
+
except Exception:
|
|
825
|
+
# Seems to be necessary on Linux, and likely on newer installs.
|
|
826
|
+
c = IPython.terminal.ipapp.load_default_config()
|
|
827
|
+
c.IPCompleter.greedy = True
|
|
828
|
+
embed(banner1='', exit_msg='Leaving w_ipa... goodbye.', config=c)
|
|
829
|
+
print("")
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
if __name__ == '__main__':
|
|
833
|
+
entry_point()
|