westpa 2022.13__cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. westpa/__init__.py +14 -0
  2. westpa/_version.py +21 -0
  3. westpa/analysis/__init__.py +5 -0
  4. westpa/analysis/core.py +749 -0
  5. westpa/analysis/statistics.py +27 -0
  6. westpa/analysis/trajectories.py +369 -0
  7. westpa/cli/__init__.py +0 -0
  8. westpa/cli/core/__init__.py +0 -0
  9. westpa/cli/core/w_fork.py +152 -0
  10. westpa/cli/core/w_init.py +230 -0
  11. westpa/cli/core/w_run.py +77 -0
  12. westpa/cli/core/w_states.py +212 -0
  13. westpa/cli/core/w_succ.py +99 -0
  14. westpa/cli/core/w_truncate.py +68 -0
  15. westpa/cli/tools/__init__.py +0 -0
  16. westpa/cli/tools/ploterr.py +506 -0
  17. westpa/cli/tools/plothist.py +706 -0
  18. westpa/cli/tools/w_assign.py +597 -0
  19. westpa/cli/tools/w_bins.py +166 -0
  20. westpa/cli/tools/w_crawl.py +119 -0
  21. westpa/cli/tools/w_direct.py +557 -0
  22. westpa/cli/tools/w_dumpsegs.py +94 -0
  23. westpa/cli/tools/w_eddist.py +506 -0
  24. westpa/cli/tools/w_fluxanl.py +376 -0
  25. westpa/cli/tools/w_ipa.py +832 -0
  26. westpa/cli/tools/w_kinavg.py +127 -0
  27. westpa/cli/tools/w_kinetics.py +96 -0
  28. westpa/cli/tools/w_multi_west.py +414 -0
  29. westpa/cli/tools/w_ntop.py +213 -0
  30. westpa/cli/tools/w_pdist.py +515 -0
  31. westpa/cli/tools/w_postanalysis_matrix.py +82 -0
  32. westpa/cli/tools/w_postanalysis_reweight.py +53 -0
  33. westpa/cli/tools/w_red.py +491 -0
  34. westpa/cli/tools/w_reweight.py +780 -0
  35. westpa/cli/tools/w_select.py +226 -0
  36. westpa/cli/tools/w_stateprobs.py +111 -0
  37. westpa/cli/tools/w_timings.py +113 -0
  38. westpa/cli/tools/w_trace.py +599 -0
  39. westpa/core/__init__.py +0 -0
  40. westpa/core/_rc.py +673 -0
  41. westpa/core/binning/__init__.py +55 -0
  42. westpa/core/binning/_assign.c +36018 -0
  43. westpa/core/binning/_assign.cpython-312-aarch64-linux-gnu.so +0 -0
  44. westpa/core/binning/_assign.pyx +370 -0
  45. westpa/core/binning/assign.py +454 -0
  46. westpa/core/binning/binless.py +96 -0
  47. westpa/core/binning/binless_driver.py +54 -0
  48. westpa/core/binning/binless_manager.py +189 -0
  49. westpa/core/binning/bins.py +47 -0
  50. westpa/core/binning/mab.py +506 -0
  51. westpa/core/binning/mab_driver.py +54 -0
  52. westpa/core/binning/mab_manager.py +197 -0
  53. westpa/core/data_manager.py +1761 -0
  54. westpa/core/extloader.py +74 -0
  55. westpa/core/h5io.py +1079 -0
  56. westpa/core/kinetics/__init__.py +24 -0
  57. westpa/core/kinetics/_kinetics.c +45174 -0
  58. westpa/core/kinetics/_kinetics.cpython-312-aarch64-linux-gnu.so +0 -0
  59. westpa/core/kinetics/_kinetics.pyx +815 -0
  60. westpa/core/kinetics/events.py +147 -0
  61. westpa/core/kinetics/matrates.py +156 -0
  62. westpa/core/kinetics/rate_averaging.py +266 -0
  63. westpa/core/progress.py +218 -0
  64. westpa/core/propagators/__init__.py +54 -0
  65. westpa/core/propagators/executable.py +592 -0
  66. westpa/core/propagators/loaders.py +196 -0
  67. westpa/core/reweight/__init__.py +14 -0
  68. westpa/core/reweight/_reweight.c +36899 -0
  69. westpa/core/reweight/_reweight.cpython-312-aarch64-linux-gnu.so +0 -0
  70. westpa/core/reweight/_reweight.pyx +439 -0
  71. westpa/core/reweight/matrix.py +126 -0
  72. westpa/core/segment.py +119 -0
  73. westpa/core/sim_manager.py +839 -0
  74. westpa/core/states.py +359 -0
  75. westpa/core/systems.py +93 -0
  76. westpa/core/textio.py +74 -0
  77. westpa/core/trajectory.py +603 -0
  78. westpa/core/we_driver.py +910 -0
  79. westpa/core/wm_ops.py +43 -0
  80. westpa/core/yamlcfg.py +298 -0
  81. westpa/fasthist/__init__.py +34 -0
  82. westpa/fasthist/_fasthist.c +38755 -0
  83. westpa/fasthist/_fasthist.cpython-312-aarch64-linux-gnu.so +0 -0
  84. westpa/fasthist/_fasthist.pyx +222 -0
  85. westpa/mclib/__init__.py +271 -0
  86. westpa/mclib/__main__.py +28 -0
  87. westpa/mclib/_mclib.c +34610 -0
  88. westpa/mclib/_mclib.cpython-312-aarch64-linux-gnu.so +0 -0
  89. westpa/mclib/_mclib.pyx +226 -0
  90. westpa/oldtools/__init__.py +4 -0
  91. westpa/oldtools/aframe/__init__.py +35 -0
  92. westpa/oldtools/aframe/atool.py +75 -0
  93. westpa/oldtools/aframe/base_mixin.py +26 -0
  94. westpa/oldtools/aframe/binning.py +178 -0
  95. westpa/oldtools/aframe/data_reader.py +560 -0
  96. westpa/oldtools/aframe/iter_range.py +200 -0
  97. westpa/oldtools/aframe/kinetics.py +117 -0
  98. westpa/oldtools/aframe/mcbs.py +153 -0
  99. westpa/oldtools/aframe/output.py +39 -0
  100. westpa/oldtools/aframe/plotting.py +88 -0
  101. westpa/oldtools/aframe/trajwalker.py +126 -0
  102. westpa/oldtools/aframe/transitions.py +469 -0
  103. westpa/oldtools/cmds/__init__.py +0 -0
  104. westpa/oldtools/cmds/w_ttimes.py +361 -0
  105. westpa/oldtools/files.py +34 -0
  106. westpa/oldtools/miscfn.py +23 -0
  107. westpa/oldtools/stats/__init__.py +4 -0
  108. westpa/oldtools/stats/accumulator.py +35 -0
  109. westpa/oldtools/stats/edfs.py +129 -0
  110. westpa/oldtools/stats/mcbs.py +96 -0
  111. westpa/tools/__init__.py +33 -0
  112. westpa/tools/binning.py +472 -0
  113. westpa/tools/core.py +340 -0
  114. westpa/tools/data_reader.py +159 -0
  115. westpa/tools/dtypes.py +31 -0
  116. westpa/tools/iter_range.py +198 -0
  117. westpa/tools/kinetics_tool.py +343 -0
  118. westpa/tools/plot.py +283 -0
  119. westpa/tools/progress.py +17 -0
  120. westpa/tools/selected_segs.py +154 -0
  121. westpa/tools/wipi.py +751 -0
  122. westpa/trajtree/__init__.py +4 -0
  123. westpa/trajtree/_trajtree.c +17829 -0
  124. westpa/trajtree/_trajtree.cpython-312-aarch64-linux-gnu.so +0 -0
  125. westpa/trajtree/_trajtree.pyx +130 -0
  126. westpa/trajtree/trajtree.py +117 -0
  127. westpa/westext/__init__.py +0 -0
  128. westpa/westext/adaptvoronoi/__init__.py +3 -0
  129. westpa/westext/adaptvoronoi/adaptVor_driver.py +214 -0
  130. westpa/westext/hamsm_restarting/__init__.py +3 -0
  131. westpa/westext/hamsm_restarting/example_overrides.py +35 -0
  132. westpa/westext/hamsm_restarting/restart_driver.py +1165 -0
  133. westpa/westext/stringmethod/__init__.py +11 -0
  134. westpa/westext/stringmethod/fourier_fitting.py +69 -0
  135. westpa/westext/stringmethod/string_driver.py +253 -0
  136. westpa/westext/stringmethod/string_method.py +306 -0
  137. westpa/westext/weed/BinCluster.py +180 -0
  138. westpa/westext/weed/ProbAdjustEquil.py +100 -0
  139. westpa/westext/weed/UncertMath.py +247 -0
  140. westpa/westext/weed/__init__.py +10 -0
  141. westpa/westext/weed/weed_driver.py +192 -0
  142. westpa/westext/wess/ProbAdjust.py +101 -0
  143. westpa/westext/wess/__init__.py +6 -0
  144. westpa/westext/wess/wess_driver.py +217 -0
  145. westpa/work_managers/__init__.py +57 -0
  146. westpa/work_managers/core.py +396 -0
  147. westpa/work_managers/environment.py +134 -0
  148. westpa/work_managers/mpi.py +318 -0
  149. westpa/work_managers/processes.py +201 -0
  150. westpa/work_managers/serial.py +28 -0
  151. westpa/work_managers/threads.py +79 -0
  152. westpa/work_managers/zeromq/__init__.py +20 -0
  153. westpa/work_managers/zeromq/core.py +635 -0
  154. westpa/work_managers/zeromq/node.py +131 -0
  155. westpa/work_managers/zeromq/work_manager.py +526 -0
  156. westpa/work_managers/zeromq/worker.py +320 -0
  157. westpa-2022.13.dist-info/METADATA +179 -0
  158. westpa-2022.13.dist-info/RECORD +162 -0
  159. westpa-2022.13.dist-info/WHEEL +7 -0
  160. westpa-2022.13.dist-info/entry_points.txt +30 -0
  161. westpa-2022.13.dist-info/licenses/LICENSE +21 -0
  162. westpa-2022.13.dist-info/top_level.txt +1 -0
@@ -0,0 +1,815 @@
1
+ import warnings
2
+
3
+ import cython
4
+ import numpy as np
5
+ cimport numpy as np
6
+
7
+ ctypedef np.uint16_t index_t
8
+ ctypedef np.float64_t weight_t
9
+ ctypedef np.uint8_t bool_t
10
+ ctypedef np.int64_t seg_id_t
11
+ ctypedef np.uintp_t uint_t # 32 bits on 32-bit systems, 64 bits on 64-bit systems
12
+
13
+ cdef double NAN = np.nan
14
+
15
+ weight_dtype = np.float64
16
+ index_dtype = np.uint16
17
+ bool_dtype = np.bool_
18
+
19
+ from westpa.core.binning.assign import UNKNOWN_INDEX as _UNKNOWN_INDEX
20
+ cdef index_t UNKNOWN_INDEX = _UNKNOWN_INDEX
21
+
22
+ @cython.boundscheck(False)
23
+ @cython.wraparound(False)
24
+ cpdef flux_assign(np.ndarray[weight_t, ndim=1] weights,
25
+ np.ndarray[index_t, ndim=1] init_assignments,
26
+ np.ndarray[index_t, ndim=1] final_assignments,
27
+ np.ndarray[weight_t, ndim=2] flux_matrix):
28
+ cdef:
29
+ Py_ssize_t m,n
30
+ index_t i, j
31
+ n = len(weights)
32
+ for m from 0 <= m < n:
33
+ i = init_assignments[m]
34
+ j = final_assignments[m]
35
+ flux_matrix[i,j] += weights[m]
36
+ return
37
+
38
+ @cython.boundscheck(False)
39
+ @cython.wraparound(False)
40
+ cpdef pop_assign(np.ndarray[weight_t, ndim=1] weights,
41
+ np.ndarray[index_t, ndim=1] assignments,
42
+ np.ndarray[weight_t, ndim=1] populations):
43
+ cdef:
44
+ Py_ssize_t m,n
45
+ index_t i,
46
+ n = len(weights)
47
+ for m from 0 <= m < n:
48
+ i = assignments[m]
49
+ populations[i] += weights[m]
50
+ return
51
+
52
+ @cython.cdivision(True)
53
+ @cython.boundscheck(False)
54
+ @cython.wraparound(False)
55
+ cpdef calc_rates(weight_t[:,::1] fluxes,
56
+ weight_t[::1] populations,
57
+ weight_t[:,::1] rates,
58
+ bool_t[:,::1] mask):
59
+ '''Calculate a rate matrices from flux and population matrices. A matrix of the same
60
+ shape as fluxes, is also produced, to be used for generating a mask for the rate
61
+ matrices where initial state populations are zero.'''
62
+
63
+ cdef:
64
+ Py_ssize_t narrays, nbins
65
+ index_t iarray, i, j
66
+
67
+ nbins = fluxes.shape[0]
68
+
69
+ with nogil:
70
+ for i in range(nbins):
71
+ if populations[i] == 0.0:
72
+ for j in range(nbins):
73
+ mask[i,j] = 1
74
+ rates[i,j] = 0.0
75
+ else:
76
+ for j in range(nbins):
77
+ mask[i,j] = 0
78
+ rates[i,j] = fluxes[i,j] / populations[i]
79
+
80
+
81
+ @cython.cdivision(True)
82
+ @cython.boundscheck(False)
83
+ @cython.wraparound(False)
84
+ cpdef weight_t calculate_labeled_fluxes_alllags(Py_ssize_t nstates,
85
+ weights,
86
+ parent_ids,
87
+ micro_assignments,
88
+ traj_assignments,
89
+ weight_t[:,:,:,:] fluxes) except 0.0:
90
+ cdef:
91
+ Py_ssize_t niters = len(weights), nsegs, npts
92
+ weight_t twindow = 0.0
93
+ long lastiter, firstiter, iiter, windowlen
94
+ long seg_id, current_id, parent_id
95
+ index_t ibin, ilabel, fbin, flabel
96
+ weight_t weight
97
+ weight_t[:] lweights
98
+
99
+ index_t[:,:] lmicro
100
+ index_t[:,:] ltraj
101
+
102
+ # We need to trace backward in each window, so we go from end to beginning
103
+
104
+ for lastiter in range(niters-1,-1,-1):
105
+ for windowlen in range(1,niters+1):
106
+ firstiter = lastiter-windowlen+1
107
+ if firstiter < 0: continue
108
+
109
+ # we loop over all trajectories that are alive as of the last iteration
110
+ # in the averaging window
111
+ lweights = weights[lastiter]
112
+ lmicro = micro_assignments[lastiter]
113
+ ltraj = traj_assignments[lastiter]
114
+ nsegs = lmicro.shape[0]
115
+ npts = lmicro.shape[1]
116
+
117
+ for seg_id in range(nsegs):
118
+ weight = lweights[seg_id]
119
+ fbin = lmicro[seg_id,npts-1]
120
+ flabel = ltraj[seg_id,npts-1]
121
+
122
+ # trace upwards in history to firstiter
123
+ iiter = lastiter
124
+ current_id = seg_id
125
+ parent_id = parent_ids[iiter][seg_id]
126
+ while iiter > firstiter and parent_id >= 0:
127
+ iiter -= 1
128
+ current_id = parent_id
129
+ parent_id = parent_ids[iiter][current_id]
130
+
131
+ ibin = micro_assignments[iiter][current_id][0]
132
+ ilabel = traj_assignments[iiter][current_id][0]
133
+
134
+ if ilabel >= nstates or flabel >= nstates:
135
+ raise ValueError('invalid state index (ilabel={},flabel={})'.format(ilabel,flabel))
136
+
137
+ fluxes[ilabel,flabel,ibin,fbin] += weight
138
+ twindow += weight*windowlen
139
+ return twindow
140
+
141
+ @cython.cdivision(True)
142
+ @cython.boundscheck(False)
143
+ @cython.wraparound(False)
144
+ cpdef weight_t calculate_labeled_fluxes(Py_ssize_t nstates,
145
+ weights,
146
+ parent_ids,
147
+ micro_assignments,
148
+ traj_assignments,
149
+ weight_t[:,:,:,:] fluxes) except 0.0:
150
+ cdef:
151
+ Py_ssize_t niters = len(weights), nsegs, npts
152
+ weight_t twindow = 0.0
153
+ long lastiter, firstiter, iiter, windowlen
154
+ long seg_id, current_id, parent_id
155
+ index_t ibin, ilabel, fbin, flabel
156
+ weight_t weight
157
+ weight_t[:] lweights
158
+
159
+ index_t[:,:] lmicro
160
+ index_t[:,:] ltraj
161
+
162
+ # we loop over all trajectories that are alive as of the last iteration
163
+ # in the averaging window
164
+
165
+ lastiter = niters-1
166
+ windowlen = niters
167
+
168
+ lweights = weights[lastiter]
169
+ lmicro = micro_assignments[lastiter]
170
+ ltraj = traj_assignments[lastiter]
171
+ nsegs = lmicro.shape[0]
172
+ npts = lmicro.shape[1]
173
+
174
+ for seg_id in range(nsegs):
175
+ weight = lweights[seg_id]
176
+ fbin = lmicro[seg_id,npts-1]
177
+ flabel = ltraj[seg_id,npts-1]
178
+
179
+ # trace upwards in history to firstiter
180
+ iiter = lastiter
181
+ current_id = seg_id
182
+ parent_id = parent_ids[iiter][seg_id]
183
+ while iiter > 0 and parent_id >= 0:
184
+ iiter -= 1
185
+ current_id = parent_id
186
+ parent_id = parent_ids[iiter][current_id]
187
+
188
+ assert iiter == 0 or parent_id < 0
189
+ assert 0 <= iiter < niters
190
+ assert current_id >= 0
191
+
192
+ ibin = micro_assignments[iiter][current_id][0]
193
+ ilabel = traj_assignments[iiter][current_id][0]
194
+
195
+ #if ilabel >= nstates or flabel >= nstates:
196
+ # raise ValueError('invalid state index (ilabel={},flabel={})'.format(ilabel,flabel))
197
+ if ilabel < nstates and flabel < nstates:
198
+ fluxes[ilabel,flabel,ibin,fbin] += weight
199
+ twindow += weight*windowlen
200
+ return twindow
201
+
202
+
203
+ @cython.boundscheck(False)
204
+ @cython.wraparound(False)
205
+ cpdef object nested_to_flat_matrix(weight_t[:,:,:,:] input):
206
+ '''Convert nested flux/rate matrix into a flat supermatrix.'''
207
+
208
+ cdef:
209
+ Py_ssize_t nstates = input.shape[0], nbins=input.shape[3], istate, ibin, jstate, jbin
210
+ weight_t[:,:] _output
211
+
212
+ output = np.empty((nstates*nbins,nstates*nbins), weight_dtype)
213
+ _output = output
214
+
215
+ for istate in range(nstates):
216
+ for jstate in range(nstates):
217
+ for ibin in range(nbins):
218
+ for jbin in range(nbins):
219
+ #_output[istate*nbins+ibin, jstate*nbins+jbin] = input[istate, jstate, ibin, jbin]
220
+ _output[ibin*nstates+istate,jbin*nstates+jstate] = input[istate,jstate,ibin,jbin]
221
+
222
+ return output
223
+
224
+ @cython.boundscheck(False)
225
+ @cython.wraparound(False)
226
+ cpdef object nested_to_flat_vector(weight_t[:,:] input):
227
+ '''Convert nested labeled population vector into a flat vector.'''
228
+
229
+ cdef:
230
+ Py_ssize_t nstates = input.shape[0], nbins=input.shape[1], istate, ibin
231
+ weight_t[:] _output
232
+
233
+ output = np.empty((nstates*nbins,), weight_dtype)
234
+ _output = output
235
+
236
+ for istate in range(nstates):
237
+ for ibin in range(nbins):
238
+ #_output[istate*nbins+ibin] = input[istate, ibin]
239
+ _output[ibin*nstates+istate] = input[istate,ibin]
240
+
241
+ return output
242
+
243
+ @cython.boundscheck(False)
244
+ @cython.wraparound(False)
245
+ cpdef object flat_to_nested_matrix(Py_ssize_t nstates, Py_ssize_t nbins, weight_t[:,:] input):
246
+ '''Convert flat supermatrix into nested matrix.'''
247
+
248
+ cdef:
249
+ Py_ssize_t istate, jstate, ibin, jbin
250
+ weight_t[:,:,:,:] _output
251
+
252
+ if input.shape[0] != nstates*nbins or input.shape[1] != nstates*nbins:
253
+ # since input.shape is a C vector rather than a tuple, we can't print
254
+ # it easily
255
+ raise TypeError('input has incorrect shape for {} states and {} bins'.format(nstates, nbins))
256
+
257
+ output = np.empty((nstates, nstates, nbins, nbins), weight_dtype)
258
+ _output = output
259
+
260
+ for istate in range(nstates):
261
+ for jstate in range(nstates):
262
+ for ibin in range(nbins):
263
+ for jbin in range(nbins):
264
+ #_output[istate,jstate,ibin,jbin] = input[istate*nbins+ibin, jstate*nbins+jbin]
265
+ _output[istate,jstate,ibin,jbin] = input[ibin*nstates+istate,jbin*nstates+jstate]
266
+ return output
267
+
268
+ @cython.boundscheck(False)
269
+ @cython.wraparound(False)
270
+ cpdef object flat_to_nested_vector(Py_ssize_t nstates, Py_ssize_t nbins, weight_t[:] input):
271
+ '''Convert flat "supervector" into nested vector.'''
272
+
273
+ cdef:
274
+ Py_ssize_t istate, ibin
275
+ weight_t[:,:] _output
276
+
277
+ if input.shape[0] != nstates*nbins:
278
+ raise TypeError('input has incorrect shape for {} states and {} bins'.format(nstates, nbins))
279
+
280
+ output = np.empty((nstates, nbins), weight_dtype)
281
+ _output = output
282
+
283
+ for istate in xrange(nstates):
284
+ for ibin in xrange(nbins):
285
+ #_output[istate,ibin] = input[istate*nbins+ibin]
286
+ _output[istate,ibin] = input[ibin*nstates+istate]
287
+
288
+ return output
289
+
290
+ @cython.boundscheck(True)
291
+ @cython.wraparound(False)
292
+ @cython.cdivision(True)
293
+ cpdef _reduce_labeled_rate_matrix_to_macro(Py_ssize_t nstates, Py_ssize_t nbins, weight_t[:,:] rates, weight_t[:] pops):
294
+ '''Reduce a labeled microstate rate matrix into a macrostate rate matrix. This is
295
+ for internal use, where the rates/pops vectors have been blocked by state.'''
296
+
297
+ cdef:
298
+ Py_ssize_t istate, jstate, ibin, jbin
299
+ weight_t[:,:] _macro_rates
300
+ weight_t sspop, rate_elem, traj_ens_pop
301
+
302
+ macro_rates = np.zeros((nstates, nstates), np.float64)
303
+ _macro_rates = macro_rates
304
+
305
+ for istate in xrange(nstates):
306
+ for jstate in xrange(nstates):
307
+ for ibin in xrange(nbins):
308
+ for jbin in xrange(nbins):
309
+ sspop = pops[ibin*nstates+istate]
310
+ rateelem = rates[ibin*nstates+istate,jbin*nstates+jstate]
311
+ _macro_rates[istate,jstate] += sspop*rateelem
312
+
313
+ # Normalize by total population in each trajectory ensemble
314
+ for istate in xrange(nstates):
315
+ #traj_ens_pop = pops[istate].sum()
316
+ traj_ens_pop = 0
317
+ for ibin in xrange(nbins):
318
+ traj_ens_pop += pops[ibin*nstates+istate]
319
+
320
+ for jstate in xrange(nstates):
321
+ _macro_rates[istate, jstate] /= traj_ens_pop
322
+
323
+ return macro_rates
324
+
325
+
326
+ @cython.boundscheck(False)
327
+ @cython.wraparound(False)
328
+ @cython.cdivision(True)
329
+ cpdef labeled_flux_to_rate(weight_t[:,:,:,:] labeled_fluxes, weight_t[:,:] labeled_pops, object output=None):
330
+ '''Convert a labeled flux matrix and corresponding labeled bin populations to
331
+ a labeled rate matrix.'''
332
+
333
+ cdef:
334
+ Py_ssize_t istate, jstate, ibin, jbin, nstates, nbins
335
+ weight_t[:,:,:,:] _rates
336
+
337
+ nstates = labeled_fluxes.shape[0]
338
+ nbins = labeled_fluxes.shape[2]
339
+
340
+ if output is None:
341
+ output = np.empty_like(labeled_fluxes)
342
+ _rates = output
343
+
344
+ with nogil:
345
+ for istate in xrange(nstates):
346
+ for jstate in xrange(nstates):
347
+ for ibin in xrange(nbins):
348
+ for jbin in xrange(nbins):
349
+ if labeled_pops[istate,ibin] == 0.0:
350
+ if labeled_fluxes[istate,jstate,ibin,jbin] > 0.0:
351
+ with gil:
352
+ #raise ValueError('flux matrix entry nonzero but population zero')
353
+ warnings.warn('flux matrix entry nonzero but population zero')
354
+
355
+ _rates[istate,jstate,ibin,jbin] = 0.0
356
+ else:
357
+ _rates[istate,jstate,ibin,jbin] = labeled_fluxes[istate,jstate,ibin,jbin] / labeled_pops[istate,ibin]
358
+ return output
359
+
360
+ @cython.boundscheck(False)
361
+ @cython.wraparound(False)
362
+ @cython.cdivision(True)
363
+ cpdef sequence_macro_flux_to_rate(weight_t[:] dataset, weight_t[:,:] pops, Py_ssize_t istate, Py_ssize_t jstate, bint pairwise=True, stride=None):
364
+ '''Convert a sequence of macrostate fluxes and corresponding list of trajectory ensemble populations
365
+ to a sequence of rate matrices.
366
+
367
+ If the optional ``pairwise`` is true (the default), then rates are normalized according to the
368
+ relative probability of the initial state among the pair of states (initial, final); this is
369
+ probably what you want, as these rates will then depend only on the definitions of the states
370
+ involved (and never the remaining states). Otherwise (``pairwise'' is false), the rates are
371
+ normalized according the probability of the initial state among *all* other states.'''
372
+
373
+ cdef:
374
+ Py_ssize_t iiter, nstates, itersum
375
+ weight_t[:] _rates, _fluxsum, _pairsum, _psum
376
+
377
+ rates = np.zeros((dataset.shape[0]), dtype=weight_dtype)
378
+ #rates = :W
379
+ fluxsum = np.zeros((dataset.shape[0]), dtype=weight_dtype)
380
+ psum = np.zeros((dataset.shape[0]), dtype=weight_dtype)
381
+ pairsum = np.zeros((dataset.shape[0]), dtype=weight_dtype)
382
+ _fluxsum = fluxsum
383
+ _pairsum = pairsum
384
+ _psum = psum
385
+ _rates = rates
386
+
387
+ # We want to modify this to be the SUM of fluxes up till this point, divided by the SUM of the population till then.
388
+ with nogil:
389
+ for iiter in xrange(dataset.shape[0]):
390
+ if iiter == 0:
391
+ # We need to catch if we haven't entered the istate or jstate yet.
392
+ # Otherwise, we introduce a NaN into the calculation that we cannot recover from.
393
+ if pairwise and (pops[0,istate] + pops[0,jstate]) != 0.0:
394
+ _psum[0] = pops[0, istate] / (pops[0,istate] + pops[0,jstate])
395
+ else:
396
+ _psum[0] = pops[0, istate]
397
+ _fluxsum[0] = dataset[0]
398
+ else:
399
+ if pairwise and (pops[iiter,istate] + pops[iiter,jstate]) != 0.0:
400
+ _psum[iiter] = (pops[iiter, istate] / (pops[iiter,istate] + pops[iiter,jstate])) + _psum[iiter-1]
401
+ else:
402
+ _psum[iiter] = pops[iiter,istate] + _psum[iiter-1]
403
+ _fluxsum[iiter] = dataset[iiter] + _fluxsum[iiter-1]
404
+ if _psum[iiter] > 0 and _fluxsum[iiter] > 0:
405
+ _rates[iiter] = _fluxsum[iiter] / _psum[iiter]
406
+ else:
407
+ _rates[iiter] = 0.0
408
+
409
+ return rates[iiter]
410
+
411
+ """
412
+ In the following ``state`` is a 5-tuple of the following arrays of doubles:
413
+ last_time[nsegs]
414
+ last_entries[nsegs,nstates]
415
+ last_exits[nsegs,nstates]
416
+ last_exits_td[nsegs,nstates]
417
+ last_completions[nsegs,nstates]
418
+
419
+
420
+ It is intended to be opaque the the calling routines
421
+ """
422
+
423
+ @cython.boundscheck(False)
424
+ @cython.wraparound(False)
425
+ cpdef _fast_transition_state_copy(Py_ssize_t iiter,
426
+ Py_ssize_t nstates,
427
+ seg_id_t[:] parent_ids,
428
+ object last_state):
429
+ cdef:
430
+ bint has_last_state = 0
431
+ Py_ssize_t nsegs, seg_id, parent_id
432
+ double[:] _last_time, _prev_last_time
433
+ double[:,:] _last_entries, _last_exits, _prev_last_entries, _prev_last_exits, _last_exits_td, _prev_last_exits_td
434
+ double[:,:,:] _last_completions, _prev_last_completions
435
+
436
+
437
+ nsegs = parent_ids.shape[0]
438
+
439
+ last_time = np.empty((nsegs,), np.double)
440
+ # Use nstates + 1 to account for possible unknown states
441
+ last_entries = np.empty((nsegs,nstates+1), np.double)
442
+ last_exits = np.empty((nsegs,nstates+1), np.double)
443
+ last_exits_td = np.empty((nsegs,nstates+1), np.double)
444
+ last_completions = np.empty((nsegs,nstates+1,nstates+1), np.double)
445
+
446
+ _last_time = last_time
447
+ _last_entries = last_entries
448
+ _last_exits = last_exits
449
+ _last_exits_td = last_exits_td
450
+ _last_completions = last_completions
451
+
452
+ has_last_state = (last_state is not None)
453
+
454
+ if has_last_state:
455
+ _prev_last_time = last_state[0]
456
+ _prev_last_entries = last_state[1]
457
+ _prev_last_exits = last_state[2]
458
+ _prev_last_exits_td = last_state[3]
459
+ _prev_last_completions = last_state[4]
460
+
461
+ for seg_id in xrange(nsegs):
462
+ parent_id = parent_ids[seg_id]
463
+
464
+ if not has_last_state or parent_id < 0:
465
+ _last_time[seg_id] = 0.0
466
+ _last_entries[seg_id,:] = 0.0
467
+ _last_exits[seg_id,:] = 0.0
468
+ _last_exits_td[seg_id,:] = 0.0
469
+ _last_completions[seg_id,:,:] = 0.0
470
+ else:
471
+ _last_time[seg_id] = _prev_last_time[parent_id]
472
+ _last_entries[seg_id,:] = _prev_last_entries[parent_id,:]
473
+ _last_exits[seg_id,:] = _prev_last_exits[parent_id,:]
474
+ _last_exits_td[seg_id,:] = _prev_last_exits_td[parent_id,:]
475
+ _last_completions[seg_id,:,:] = _prev_last_completions[parent_id,:,:]
476
+
477
+ return (last_time, last_entries, last_exits, last_exits_td, last_completions)
478
+
479
+
480
+ @cython.boundscheck(False)
481
+ @cython.wraparound(False)
482
+ cpdef find_macrostate_transitions(Py_ssize_t nstates,
483
+ weight_t[:] weights,
484
+ index_t[:,:] label_assignments,
485
+ index_t[:,:] state_assignments,
486
+ double dt,
487
+ object state,
488
+ weight_t[:,:] macro_fluxes,
489
+ uint_t[:,:] macro_counts,
490
+ weight_t[:] target_fluxes,
491
+ uint_t[:] target_counts,
492
+ object durations):
493
+ cdef:
494
+ Py_ssize_t nsegs, npts, seg_id, ipt
495
+ double itime, tm, t_ed
496
+ double[:] _last_time
497
+ double[:,:] _last_entries, _last_exits, _last_exits_td
498
+ double[:,:,:] _last_completions
499
+ index_t flabel, ilabel, iistate, slabel
500
+ weight_t _weight
501
+ """
502
+ A cythoned function designed to track how long macrostate transitions take. Requires the simulation
503
+ to have already been binned and placed into macrostates, as appropriate. Called by functions such as
504
+ w_kinetics to generate kinetics information in the 'per-tau' format.
505
+
506
+ Parameters
507
+ ----------
508
+
509
+ weights : weight_t
510
+ Weights, typically from the main .h5 file of the simulation. Likely called from the data reader
511
+ of the calling function.
512
+ label_assignments : index_t
513
+ Macrostate label assignments, as compatible with those outputted by w_assign. Bins are marked as
514
+ states (or ignored) in a previous step. Should be in the form of a 'tag', or 'color'; in this
515
+ dataset, once a walker has been marked with a macrostate, it does not lose the macrostate
516
+ assigment, even upon leaving the appropriately defined state bin, until it enters another state bin.
517
+ state_assignments : index_t
518
+ Macrostate label assignments, but without any 'color' tagging.
519
+ dt : double
520
+ The number of timesteps of the system.
521
+ state : object
522
+ The output of _fast_transition_state_copy.
523
+ macro_fluxes : weight_t
524
+ An array that contains state to state fluxes.
525
+ macro_counts : uint_t
526
+ An array that contains the observed number of state to state fluxes.
527
+ target_fluxes : weight_t
528
+ An array that contains the fluxes into the target state from any state.
529
+ target_counts : uint_t
530
+ An array that contains the observed number of fluxes into the target state from any state.
531
+ durations : list_like (object)
532
+ A list containing the calculated duration information, including the iistate, flabel (state to state),
533
+ event duration, the weight of all walkers involved, and all seg_ids.
534
+
535
+ """
536
+
537
+
538
+ nsegs = label_assignments.shape[0]
539
+ npts = label_assignments.shape[1]
540
+
541
+ _last_time = state[0]
542
+ _last_entries = state[1]
543
+ _last_exits = state[2]
544
+ _last_exits_td = state[3]
545
+ _last_completions = state[4]
546
+
547
+ for seg_id in xrange(nsegs):
548
+ itime = _last_time[seg_id]
549
+ _weight = weights[seg_id]
550
+
551
+ # transitions never occur between the (overlapping) end point of previous iteration and beginning of
552
+ # current iteration, so it suffices to start looking at timepoint 1 (and backwards to timepoint 0)
553
+ for ipt in range(1,npts):
554
+ tm = itime + ipt*dt
555
+ flabel = label_assignments[seg_id,ipt]
556
+ ilabel = label_assignments[seg_id,ipt-1]
557
+ slabel = state_assignments[seg_id,ipt]
558
+
559
+ # if we have left our state transition barrier...
560
+ if flabel == slabel:
561
+ _last_exits_td[seg_id,flabel] = tm
562
+
563
+ if flabel != ilabel:
564
+ target_fluxes[flabel] += _weight
565
+ target_counts[flabel] += 1
566
+ _last_exits[seg_id,ilabel] = tm
567
+ _last_entries[seg_id,flabel] = tm
568
+
569
+ for iistate in xrange(nstates):
570
+ # if we have more recently returned to iistate than arrived at flabel from iistate,
571
+ # we note a new completed transition from iistate to flabel
572
+ # equality applies only for 0, which means we're counting an arrival from the
573
+ # state where the trajectory started
574
+ if _last_exits[seg_id, iistate] > 0 and _last_entries[seg_id,iistate] >= _last_completions[seg_id,iistate,flabel]:
575
+ macro_fluxes[iistate,flabel] += _weight
576
+ macro_counts[iistate,flabel] += 1
577
+ _last_completions[seg_id,iistate,flabel] = tm
578
+
579
+ # omit circular transitions (for now) because it causes the transition
580
+ # list to explode
581
+ if iistate != flabel:
582
+ t_ed = tm - _last_exits_td[seg_id,iistate]
583
+ durations.append((iistate,flabel,t_ed,_weight, seg_id))
584
+ _last_time[seg_id] = tm
585
+
586
+
587
+ cdef class StreamingStats2D:
588
+ '''Calculate mean and variance of a series of two-dimensional arrays of shape (nbins, nbins)
589
+ using an online algorithm. The statistics are accumulated along what would be axis=0 if the
590
+ input arrays were stacked vertically.
591
+
592
+ This code has been adapted from:
593
+ http://www.johndcook.com/skewness_kurtosis.html'''
594
+
595
+ cdef weight_t[:,::1] _M1
596
+ cdef weight_t[:,::1] _M2
597
+ cdef uint_t[:,::1] _n
598
+ cdef Py_ssize_t _sz0, _sz1
599
+
600
+ def __init__(self, tuple shape):
601
+
602
+ assert len(shape) == 2
603
+
604
+ self._n = np.zeros(shape, dtype=np.uint)
605
+ self._M1 = np.zeros(shape, dtype=weight_dtype)
606
+ self._M2 = np.zeros(shape, dtype=weight_dtype)
607
+ self._sz0, self._sz1 = shape
608
+
609
+ @cython.boundscheck(False)
610
+ @cython.wraparound(False)
611
+ @cython.cdivision(True)
612
+ def update(self, weight_t[:,::1] x, bool_t[:,::1] mask):
613
+ '''Update the running set of statistics given
614
+
615
+ Parameters
616
+ ----------
617
+ x : 2d ndarray
618
+ values from a single observation
619
+ mask : 2d ndarray
620
+ A uint8 array to exclude entries from the accumulated statistics.
621
+ '''
622
+
623
+ cdef:
624
+ index_t i, j
625
+ int n1
626
+ double delta, delta_n, term1
627
+
628
+ assert x.shape[0] == mask.shape[0] == self._sz0
629
+ assert x.shape[1] == mask.shape[1] == self._sz1
630
+
631
+ with nogil:
632
+ for i in range(self._sz0):
633
+ for j in range(self._sz1):
634
+ if not mask[i,j]:
635
+ n1 = self._n[i,j]
636
+ self._n[i,j] += 1
637
+ delta = x[i,j] - self._M1[i,j]
638
+ delta_n = delta / self._n[i,j]
639
+ term1 = delta * delta_n * n1
640
+ self._M1[i,j] += delta_n
641
+ self._M2[i,j] += term1
642
+
643
+ @cython.boundscheck(False)
644
+ @cython.wraparound(False)
645
+ @cython.cdivision(True)
646
+ def __add__(StreamingStats2D self, StreamingStats2D other):
647
+ cdef:
648
+ index_t i, j
649
+ int n1
650
+ double delta, delta2
651
+ StreamingStats2D combined
652
+
653
+ combined = StreamingStats2D((self._sz0, self._sz1))
654
+
655
+ for i in range(self._sz0):
656
+ for j in range(self._sz1):
657
+ combined._n[i,j] = self._n[i,j] + other._n[i,j]
658
+ delta = other._M1[i,j] - self._M1[i,j]
659
+ delta2 = delta * delta
660
+ combined._M1[i,j] = (other._n[i,j]*other._M1[i,j] + self._n[i,j]*self._M1[i,j]) / combined._n[i,j]
661
+ combined._M2[i,j] = other._M2[i,j] + self._M2[i,j] + (delta2 * self._n[i,j] * other._n[i,j]) / combined._n[i,j]
662
+
663
+ return combined
664
+
665
+
666
+ def __iadd__(StreamingStats2D self, StreamingStats2D other):
667
+ combined = self + other
668
+ self = combined
669
+ return self
670
+
671
+
672
+ property mean:
673
+ def __get__(self):
674
+ tmp = np.asarray(self._M1)
675
+ return np.nan_to_num(tmp)
676
+
677
+ property var:
678
+ def __get__(self):
679
+ tmp_m = np.asarray(self._M2)
680
+ tmp_n = np.asarray(self._n)
681
+ return np.nan_to_num(tmp_m / tmp_n)
682
+
683
+ property n:
684
+ def __get__(self):
685
+ return np.asarray(self._n)
686
+
687
+ def __set__(self, val):
688
+ self._n = val[:]
689
+
690
+ property M1:
691
+ def __get__(self):
692
+ return np.asarray(self._M1)
693
+
694
+ def __set__(self, val):
695
+ self._M1 = val[:]
696
+
697
+ property M2:
698
+ def __get__(self):
699
+ return np.asarray(self._M2)
700
+
701
+ def __set__(self, val):
702
+ self._M2 = val[:]
703
+
704
+
705
+ cdef class StreamingStats1D:
706
+ '''Calculate mean and variance of a series of one-dimensional arrays of shape (nbins,)
707
+ using an online algorithm. The statistics are accumulated along what would be axis=0 if the
708
+ input arrays were stacked vertically.
709
+
710
+ This code has been adapted from:
711
+ http://www.johndcook.com/skewness_kurtosis.html'''
712
+
713
+ cdef weight_t[::1] _M1
714
+ cdef weight_t[::1] _M2
715
+ cdef uint_t[::1] _n
716
+ cdef Py_ssize_t _sz0
717
+
718
+ def __init__(self, int nbins):
719
+
720
+ self._n = np.zeros((nbins,), dtype=np.uint)
721
+ self._M1 = np.zeros((nbins,), dtype=weight_dtype)
722
+ self._M2 = np.zeros((nbins,), dtype=weight_dtype)
723
+ self._sz0 = nbins
724
+
725
+ @cython.boundscheck(False)
726
+ @cython.wraparound(False)
727
+ @cython.cdivision(True)
728
+ def update(self, weight_t[::1] x, bool_t[::1] mask):
729
+ '''Update the running set of statistics given
730
+
731
+ Parameters
732
+ ----------
733
+ x : 1d ndarray
734
+ values from a single observation
735
+ mask : 1d ndarray
736
+ A uint8 array to exclude entries from the accumulated statistics.
737
+ '''
738
+
739
+ cdef:
740
+ index_t i
741
+ int n1
742
+ double delta, delta_n, term1
743
+
744
+ assert x.shape[0] == mask.shape[0] == self._sz0
745
+
746
+ with nogil:
747
+ for i in range(self._sz0):
748
+ if not mask[i]:
749
+ n1 = self._n[i]
750
+ self._n[i] += 1
751
+ delta = x[i] - self._M1[i]
752
+ delta_n = delta / self._n[i]
753
+ term1 = delta * delta_n * n1
754
+ self._M1[i] += delta_n
755
+ self._M2[i] += term1
756
+
757
+ @cython.boundscheck(False)
758
+ @cython.wraparound(False)
759
+ @cython.cdivision(True)
760
+ def __add__(StreamingStats1D self, StreamingStats1D other):
761
+ cdef:
762
+ index_t i
763
+ int n1
764
+ double delta, delta2
765
+ StreamingStats1D combined
766
+
767
+ combined = StreamingStats1D(self._sz0)
768
+
769
+ for i in range(self._sz0):
770
+ combined._n[i] = self._n[i] + other._n[i]
771
+ delta = other._M1[i] - self._M1[i]
772
+ delta2 = delta * delta
773
+ combined._M1[i] = (other._n[i]*other._M1[i] + self._n[i]*self._M1[i]) / combined._n[i]
774
+ combined._M2[i] = other._M2[i] + self._M2[i] + (delta2 * self._n[i] * other._n[i]) / combined._n[i]
775
+
776
+ return combined
777
+
778
+
779
+ def __iadd__(StreamingStats1D self, StreamingStats1D other):
780
+ combined = self + other
781
+ self = combined
782
+ return self
783
+
784
+
785
+ property mean:
786
+ def __get__(self):
787
+ tmp = np.asarray(self._M1)
788
+ return np.nan_to_num(tmp)
789
+
790
+ property var:
791
+ def __get__(self):
792
+ tmp_m = np.asarray(self._M2)
793
+ tmp_n = np.asarray(self._n)
794
+ return np.nan_to_num(tmp_m / tmp_n)
795
+
796
+ property n:
797
+ def __get__(self):
798
+ return np.asarray(self._n)
799
+
800
+ def __set__(self, val):
801
+ self._n = val[:]
802
+
803
+ property M1:
804
+ def __get__(self):
805
+ return np.asarray(self._M1)
806
+
807
+ def __set__(self, val):
808
+ self._M1 = val[:]
809
+
810
+ property M2:
811
+ def __get__(self):
812
+ return np.asarray(self._M2)
813
+
814
+ def __set__(self, val):
815
+ self._M2 = val[:]