westpa 2022.10__cp312-cp312-macosx_10_9_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of westpa might be problematic. Click here for more details.
- westpa/__init__.py +14 -0
- westpa/_version.py +21 -0
- westpa/analysis/__init__.py +5 -0
- westpa/analysis/core.py +746 -0
- westpa/analysis/statistics.py +27 -0
- westpa/analysis/trajectories.py +360 -0
- westpa/cli/__init__.py +0 -0
- westpa/cli/core/__init__.py +0 -0
- westpa/cli/core/w_fork.py +152 -0
- westpa/cli/core/w_init.py +230 -0
- westpa/cli/core/w_run.py +77 -0
- westpa/cli/core/w_states.py +212 -0
- westpa/cli/core/w_succ.py +99 -0
- westpa/cli/core/w_truncate.py +59 -0
- westpa/cli/tools/__init__.py +0 -0
- westpa/cli/tools/ploterr.py +506 -0
- westpa/cli/tools/plothist.py +706 -0
- westpa/cli/tools/w_assign.py +596 -0
- westpa/cli/tools/w_bins.py +166 -0
- westpa/cli/tools/w_crawl.py +119 -0
- westpa/cli/tools/w_direct.py +547 -0
- westpa/cli/tools/w_dumpsegs.py +94 -0
- westpa/cli/tools/w_eddist.py +506 -0
- westpa/cli/tools/w_fluxanl.py +378 -0
- westpa/cli/tools/w_ipa.py +833 -0
- westpa/cli/tools/w_kinavg.py +127 -0
- westpa/cli/tools/w_kinetics.py +96 -0
- westpa/cli/tools/w_multi_west.py +414 -0
- westpa/cli/tools/w_ntop.py +213 -0
- westpa/cli/tools/w_pdist.py +515 -0
- westpa/cli/tools/w_postanalysis_matrix.py +82 -0
- westpa/cli/tools/w_postanalysis_reweight.py +53 -0
- westpa/cli/tools/w_red.py +486 -0
- westpa/cli/tools/w_reweight.py +780 -0
- westpa/cli/tools/w_select.py +226 -0
- westpa/cli/tools/w_stateprobs.py +111 -0
- westpa/cli/tools/w_trace.py +599 -0
- westpa/core/__init__.py +0 -0
- westpa/core/_rc.py +673 -0
- westpa/core/binning/__init__.py +55 -0
- westpa/core/binning/_assign.cpython-312-darwin.so +0 -0
- westpa/core/binning/assign.py +449 -0
- westpa/core/binning/binless.py +96 -0
- westpa/core/binning/binless_driver.py +54 -0
- westpa/core/binning/binless_manager.py +190 -0
- westpa/core/binning/bins.py +47 -0
- westpa/core/binning/mab.py +427 -0
- westpa/core/binning/mab_driver.py +54 -0
- westpa/core/binning/mab_manager.py +198 -0
- westpa/core/data_manager.py +1694 -0
- westpa/core/extloader.py +74 -0
- westpa/core/h5io.py +995 -0
- westpa/core/kinetics/__init__.py +24 -0
- westpa/core/kinetics/_kinetics.cpython-312-darwin.so +0 -0
- westpa/core/kinetics/events.py +147 -0
- westpa/core/kinetics/matrates.py +156 -0
- westpa/core/kinetics/rate_averaging.py +266 -0
- westpa/core/progress.py +218 -0
- westpa/core/propagators/__init__.py +54 -0
- westpa/core/propagators/executable.py +715 -0
- westpa/core/reweight/__init__.py +14 -0
- westpa/core/reweight/_reweight.cpython-312-darwin.so +0 -0
- westpa/core/reweight/matrix.py +126 -0
- westpa/core/segment.py +119 -0
- westpa/core/sim_manager.py +830 -0
- westpa/core/states.py +359 -0
- westpa/core/systems.py +93 -0
- westpa/core/textio.py +74 -0
- westpa/core/trajectory.py +330 -0
- westpa/core/we_driver.py +908 -0
- westpa/core/wm_ops.py +43 -0
- westpa/core/yamlcfg.py +391 -0
- westpa/fasthist/__init__.py +34 -0
- westpa/fasthist/__main__.py +110 -0
- westpa/fasthist/_fasthist.cpython-312-darwin.so +0 -0
- westpa/mclib/__init__.py +264 -0
- westpa/mclib/__main__.py +28 -0
- westpa/mclib/_mclib.cpython-312-darwin.so +0 -0
- westpa/oldtools/__init__.py +4 -0
- westpa/oldtools/aframe/__init__.py +35 -0
- westpa/oldtools/aframe/atool.py +75 -0
- westpa/oldtools/aframe/base_mixin.py +26 -0
- westpa/oldtools/aframe/binning.py +178 -0
- westpa/oldtools/aframe/data_reader.py +560 -0
- westpa/oldtools/aframe/iter_range.py +200 -0
- westpa/oldtools/aframe/kinetics.py +117 -0
- westpa/oldtools/aframe/mcbs.py +146 -0
- westpa/oldtools/aframe/output.py +39 -0
- westpa/oldtools/aframe/plotting.py +90 -0
- westpa/oldtools/aframe/trajwalker.py +126 -0
- westpa/oldtools/aframe/transitions.py +469 -0
- westpa/oldtools/cmds/__init__.py +0 -0
- westpa/oldtools/cmds/w_ttimes.py +358 -0
- westpa/oldtools/files.py +34 -0
- westpa/oldtools/miscfn.py +23 -0
- westpa/oldtools/stats/__init__.py +4 -0
- westpa/oldtools/stats/accumulator.py +35 -0
- westpa/oldtools/stats/edfs.py +129 -0
- westpa/oldtools/stats/mcbs.py +89 -0
- westpa/tools/__init__.py +33 -0
- westpa/tools/binning.py +472 -0
- westpa/tools/core.py +340 -0
- westpa/tools/data_reader.py +159 -0
- westpa/tools/dtypes.py +31 -0
- westpa/tools/iter_range.py +198 -0
- westpa/tools/kinetics_tool.py +340 -0
- westpa/tools/plot.py +283 -0
- westpa/tools/progress.py +17 -0
- westpa/tools/selected_segs.py +154 -0
- westpa/tools/wipi.py +751 -0
- westpa/trajtree/__init__.py +4 -0
- westpa/trajtree/_trajtree.cpython-312-darwin.so +0 -0
- westpa/trajtree/trajtree.py +117 -0
- westpa/westext/__init__.py +0 -0
- westpa/westext/adaptvoronoi/__init__.py +3 -0
- westpa/westext/adaptvoronoi/adaptVor_driver.py +214 -0
- westpa/westext/hamsm_restarting/__init__.py +3 -0
- westpa/westext/hamsm_restarting/example_overrides.py +35 -0
- westpa/westext/hamsm_restarting/restart_driver.py +1165 -0
- westpa/westext/stringmethod/__init__.py +11 -0
- westpa/westext/stringmethod/fourier_fitting.py +69 -0
- westpa/westext/stringmethod/string_driver.py +253 -0
- westpa/westext/stringmethod/string_method.py +306 -0
- westpa/westext/weed/BinCluster.py +180 -0
- westpa/westext/weed/ProbAdjustEquil.py +100 -0
- westpa/westext/weed/UncertMath.py +247 -0
- westpa/westext/weed/__init__.py +10 -0
- westpa/westext/weed/weed_driver.py +182 -0
- westpa/westext/wess/ProbAdjust.py +101 -0
- westpa/westext/wess/__init__.py +6 -0
- westpa/westext/wess/wess_driver.py +207 -0
- westpa/work_managers/__init__.py +57 -0
- westpa/work_managers/core.py +396 -0
- westpa/work_managers/environment.py +134 -0
- westpa/work_managers/mpi.py +318 -0
- westpa/work_managers/processes.py +187 -0
- westpa/work_managers/serial.py +28 -0
- westpa/work_managers/threads.py +79 -0
- westpa/work_managers/zeromq/__init__.py +20 -0
- westpa/work_managers/zeromq/core.py +641 -0
- westpa/work_managers/zeromq/node.py +131 -0
- westpa/work_managers/zeromq/work_manager.py +526 -0
- westpa/work_managers/zeromq/worker.py +320 -0
- westpa-2022.10.dist-info/AUTHORS +22 -0
- westpa-2022.10.dist-info/LICENSE +21 -0
- westpa-2022.10.dist-info/METADATA +183 -0
- westpa-2022.10.dist-info/RECORD +150 -0
- westpa-2022.10.dist-info/WHEEL +5 -0
- westpa-2022.10.dist-info/entry_points.txt +29 -0
- westpa-2022.10.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
from . import UncertMath
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ClusterList:
|
|
8
|
+
def __init__(self, ratios, nbins):
|
|
9
|
+
super().__init__()
|
|
10
|
+
self.nbins = nbins
|
|
11
|
+
self.ratios = ratios
|
|
12
|
+
|
|
13
|
+
# Create an array to hold bin assignments and initial set all to -1 == not clustered
|
|
14
|
+
self.bin_assign = np.empty((nbins,), dtype=int)
|
|
15
|
+
self.bin_assign.fill(-1)
|
|
16
|
+
|
|
17
|
+
# Initialize an ucert container to hold per bin information; initially mask all elements
|
|
18
|
+
# note: masking is implicit since rates set to 0
|
|
19
|
+
dum_data = np.zeros((nbins,))
|
|
20
|
+
self.bin_data = UncertMath.UncertContainer(dum_data.copy(), dum_data.copy(), dum_data.copy())
|
|
21
|
+
|
|
22
|
+
self.cluster_id = 0 # ID of newest cluster
|
|
23
|
+
self.cluster_contents = {} # Dictionary containing sets of bin ids with keys = cluster ids
|
|
24
|
+
|
|
25
|
+
def join(self, pairs):
|
|
26
|
+
"""Join clusters given a tuple (i,j) of bin pairs"""
|
|
27
|
+
|
|
28
|
+
for i, j in zip(*pairs):
|
|
29
|
+
# Both bins not joined
|
|
30
|
+
if self.bin_assign[i] == -1 and self.bin_assign[j] == -1:
|
|
31
|
+
# Create new cluster
|
|
32
|
+
self.bin_assign[i] = self.cluster_id
|
|
33
|
+
self.bin_assign[j] = self.cluster_id
|
|
34
|
+
|
|
35
|
+
self.cluster_contents[self.cluster_id] = {i, j}
|
|
36
|
+
self.cluster_id += 1
|
|
37
|
+
|
|
38
|
+
rij = self.ratios[i, j]
|
|
39
|
+
denom = rij + 1.0
|
|
40
|
+
self.bin_data[i] = rij / denom # relative probability for bin i
|
|
41
|
+
self.bin_data[j] = denom.recip() # relative probability for bin j
|
|
42
|
+
|
|
43
|
+
# Only one bin previously assigned to a cluster
|
|
44
|
+
elif self.bin_assign[i] == -1 or self.bin_assign[j] == -1:
|
|
45
|
+
if self.bin_assign[i] == -1:
|
|
46
|
+
idum, jdum = i, j
|
|
47
|
+
else:
|
|
48
|
+
idum, jdum = j, i
|
|
49
|
+
|
|
50
|
+
jclust = self.bin_assign[jdum]
|
|
51
|
+
jclust_mid = np.where(self.bin_assign == jclust)[0] # index of bins in jclust
|
|
52
|
+
|
|
53
|
+
rik = self.ratios[idum, jclust_mid]
|
|
54
|
+
pk = self.bin_data[jclust_mid]
|
|
55
|
+
piTmp = rik * pk # estimate for p_idum / P_cluster based on 'path' through bin k
|
|
56
|
+
# Note that here P_cluster is value before addition of bin idum
|
|
57
|
+
piTmp_avg = piTmp.weighted_average(axis=0)
|
|
58
|
+
|
|
59
|
+
# now, compute relative prob of each bin in *new* cluster (including bin idum)
|
|
60
|
+
denom = piTmp_avg + 1.0
|
|
61
|
+
self.bin_data[idum] = piTmp_avg / denom
|
|
62
|
+
|
|
63
|
+
# Update bins already in cluster
|
|
64
|
+
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / denom
|
|
65
|
+
|
|
66
|
+
# Move bin idum into cluster jclust
|
|
67
|
+
self.bin_assign[idum] = jclust
|
|
68
|
+
self.cluster_contents[jclust].update({idum})
|
|
69
|
+
|
|
70
|
+
# Both bins previously assigned to different cluster; Join clusters
|
|
71
|
+
elif not self.bin_assign[i] == self.bin_assign[j]:
|
|
72
|
+
iclust = self.bin_assign[i]
|
|
73
|
+
jclust = self.bin_assign[j]
|
|
74
|
+
|
|
75
|
+
iclust_mid = np.where(self.bin_assign == iclust)[0] # indx of bins in cluster i
|
|
76
|
+
jclust_mid = np.where(self.bin_assign == jclust)[0] # indx of bins in cluster j
|
|
77
|
+
|
|
78
|
+
niclust = iclust_mid.size
|
|
79
|
+
njclust = jclust_mid.size
|
|
80
|
+
|
|
81
|
+
dum_data = np.zeros((niclust * njclust,))
|
|
82
|
+
ij_cluster_ratio = UncertMath.UncertContainer(dum_data.copy(), dum_data.copy(), dum_data.copy())
|
|
83
|
+
|
|
84
|
+
for count, im in enumerate(iclust_mid):
|
|
85
|
+
rij = self.ratios[im, jclust_mid]
|
|
86
|
+
pi = self.bin_data[im]
|
|
87
|
+
pj = self.bin_data[jclust_mid]
|
|
88
|
+
|
|
89
|
+
ij_cluster_ratio[count * njclust : (count + 1) * njclust] = rij * pj / pi
|
|
90
|
+
|
|
91
|
+
ij_cluster_ratio = ij_cluster_ratio.weighted_average(axis=0)
|
|
92
|
+
|
|
93
|
+
idenom = ij_cluster_ratio.recip() + 1.0
|
|
94
|
+
jdenom = ij_cluster_ratio + 1.0
|
|
95
|
+
|
|
96
|
+
self.bin_data[iclust_mid] = self.bin_data[iclust_mid] / idenom
|
|
97
|
+
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / jdenom
|
|
98
|
+
|
|
99
|
+
# Join all bins in cluster j into cluster iclust
|
|
100
|
+
self.bin_assign[jclust_mid] = iclust
|
|
101
|
+
|
|
102
|
+
# Move contents of jclust into iclust
|
|
103
|
+
self.cluster_contents[iclust].update(self.cluster_contents[jclust])
|
|
104
|
+
|
|
105
|
+
# Clear contents of jclust
|
|
106
|
+
self.cluster_contents[jclust].clear()
|
|
107
|
+
|
|
108
|
+
if len(self.cluster_contents[iclust]) == self.nbins:
|
|
109
|
+
break
|
|
110
|
+
|
|
111
|
+
def join_simple(self, pairs):
|
|
112
|
+
"""Join clusters using direct ratios given a tuple (i,j) of bin pairs"""
|
|
113
|
+
|
|
114
|
+
for i, j in zip(*pairs):
|
|
115
|
+
# Both bins not joined
|
|
116
|
+
if self.bin_assign[i] == -1 and self.bin_assign[j] == -1:
|
|
117
|
+
# Create new cluster
|
|
118
|
+
self.bin_assign[i] = self.cluster_id
|
|
119
|
+
self.bin_assign[j] = self.cluster_id
|
|
120
|
+
|
|
121
|
+
self.cluster_contents[self.cluster_id] = {i, j}
|
|
122
|
+
self.cluster_id += 1
|
|
123
|
+
|
|
124
|
+
rij = self.ratios[i, j]
|
|
125
|
+
denom = rij + 1.0
|
|
126
|
+
self.bin_data[i] = rij / denom # relative probability for bin i
|
|
127
|
+
self.bin_data[j] = denom.recip() # relative probability for bin j
|
|
128
|
+
|
|
129
|
+
# Only one bin previously assigned to a cluster
|
|
130
|
+
elif self.bin_assign[i] == -1 or self.bin_assign[j] == -1:
|
|
131
|
+
if self.bin_assign[i] == -1:
|
|
132
|
+
idum, jdum = i, j
|
|
133
|
+
else:
|
|
134
|
+
idum, jdum = j, i
|
|
135
|
+
|
|
136
|
+
jclust = self.bin_assign[jdum]
|
|
137
|
+
rik = self.ratios[idum, jdum]
|
|
138
|
+
pk = self.bin_data[jdum]
|
|
139
|
+
piTmp = rik * pk # estimate for p_idum / P_cluster based on 'path' through bin k
|
|
140
|
+
# Note that here P_cluster is value before addition of bin idum
|
|
141
|
+
# now, compute relative prob of each bin in *new* cluster (including bin idum)
|
|
142
|
+
denom = piTmp + 1.0
|
|
143
|
+
self.bin_data[idum] = piTmp / denom
|
|
144
|
+
|
|
145
|
+
# Update bins already in cluster
|
|
146
|
+
jclust_mid = np.where(self.bin_assign == jclust) # index of bins in jclust
|
|
147
|
+
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / denom
|
|
148
|
+
|
|
149
|
+
# Move bin idum into cluster jclust
|
|
150
|
+
self.bin_assign[idum] = jclust
|
|
151
|
+
self.cluster_contents[jclust].update({idum})
|
|
152
|
+
|
|
153
|
+
# Both bins previously assigned to different cluster; Join clusters
|
|
154
|
+
elif not self.bin_assign[i] == self.bin_assign[j]:
|
|
155
|
+
iclust = self.bin_assign[i]
|
|
156
|
+
jclust = self.bin_assign[j]
|
|
157
|
+
rij = self.ratios[i, j]
|
|
158
|
+
pi = self.bin_data[i]
|
|
159
|
+
pj = self.bin_data[j]
|
|
160
|
+
ij_cluster_ratio = rij * pj / pi
|
|
161
|
+
idenom = ij_cluster_ratio.recip() + 1.0
|
|
162
|
+
jdenom = ij_cluster_ratio + 1.0
|
|
163
|
+
|
|
164
|
+
iclust_mid = np.where(self.bin_assign == iclust)
|
|
165
|
+
self.bin_data[iclust_mid] = self.bin_data[iclust_mid] / idenom
|
|
166
|
+
|
|
167
|
+
jclust_mid = np.where(self.bin_assign == jclust)
|
|
168
|
+
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / jdenom
|
|
169
|
+
|
|
170
|
+
# Join all bins in cluster j into cluster iclust
|
|
171
|
+
self.bin_assign[jclust_mid] = iclust
|
|
172
|
+
|
|
173
|
+
# Move contents of jclust into iclust
|
|
174
|
+
self.cluster_contents[iclust].update(self.cluster_contents[jclust])
|
|
175
|
+
|
|
176
|
+
# Clear contents of jclust
|
|
177
|
+
self.cluster_contents[jclust].clear()
|
|
178
|
+
|
|
179
|
+
if len(self.cluster_contents[iclust]) == self.nbins:
|
|
180
|
+
break
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import numpy.ma as ma
|
|
3
|
+
|
|
4
|
+
from . import UncertMath
|
|
5
|
+
from . import BinCluster
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def probAdjustEquil(binProb, rates, uncert, threshold=0.0, fullCalcClust=False, fullCalcBins=False):
|
|
9
|
+
"""This function adjusts bin pops in binProb using rates and uncert matrices
|
|
10
|
+
fullCalcBins --> True for weighted avg, False for simple calc
|
|
11
|
+
fullCalcClust --> True for weighted avg, False for simple calc
|
|
12
|
+
threshold --> minimum weight (relative to max) for another value to be averaged
|
|
13
|
+
only matters if fullCalcBins == True (or later perhaps if fullCalcClust == True)
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
# Check that rate matrix is square
|
|
17
|
+
Ni, Nj = rates.shape
|
|
18
|
+
if Ni != Nj:
|
|
19
|
+
print('\nWARNING: Not a square matrix!\n')
|
|
20
|
+
|
|
21
|
+
zi = np.where(binProb == 0.0)[0] # indices of bins with zero probability
|
|
22
|
+
|
|
23
|
+
rates_uncert = UncertMath.UncertContainer(rates, rates - uncert, rates + uncert)
|
|
24
|
+
|
|
25
|
+
# STEP 1a: Create matrix of ratios of probabilities based on DIRECT estimates
|
|
26
|
+
# that is, ij element is p_i / p_j = k_ji / k_ij
|
|
27
|
+
|
|
28
|
+
ratios_direct = rates_uncert.transpose() / rates_uncert
|
|
29
|
+
|
|
30
|
+
# STEP 1b: Create averaged matrix of ratios of probabilities based on both direct and indirect estimates
|
|
31
|
+
# Indirect means '3rd bin' estimates: p_i / p_j = ( k_ki / k_ik ) ( k_jk / k_kj )
|
|
32
|
+
# Turns out this is not helpful, so generally set fullCalcBins = 0
|
|
33
|
+
if fullCalcBins:
|
|
34
|
+
# Calculate indirect ratios using Einstein Summation convention where
|
|
35
|
+
# ratios_indirect_kij = ( k_ki / k_ik ) ( k_jk / k_kj ) = ratios_direct_ik * ratios_direct_kj
|
|
36
|
+
ri_vals = np.einsum('ik,kj->kij', ratios_direct.vals, ratios_direct.vals)
|
|
37
|
+
ri_min = np.einsum('ik,kj->kij', ratios_direct.dmin, ratios_direct.dmin)
|
|
38
|
+
ri_max = np.einsum('ik,kj->kij', ratios_direct.dmax, ratios_direct.dmax)
|
|
39
|
+
ratios_indirect = UncertMath.UncertContainer(ri_vals, ri_min, ri_max, mask=ratios_direct.vals.mask)
|
|
40
|
+
|
|
41
|
+
# Threshold indirect ratios
|
|
42
|
+
ti = ratios_indirect.wt < ratios_direct * threshold
|
|
43
|
+
ratios_indirect.mask = ti
|
|
44
|
+
ratios_indirect.update_mask()
|
|
45
|
+
|
|
46
|
+
ratios_indirect.concatenate(ratios_direct, axis=0)
|
|
47
|
+
ratios_average = ratios_indirect.weighted_average(axis=0)
|
|
48
|
+
|
|
49
|
+
else:
|
|
50
|
+
ratios_average = ratios_direct.weighted_average(axis=0, expaxis=0)
|
|
51
|
+
|
|
52
|
+
# STEP 2: Form clusters
|
|
53
|
+
|
|
54
|
+
# STEP 2a: Sort probability ratios based on uncertainty
|
|
55
|
+
# Sort uncertainties of ratios_average subject to the convention that p_i < p_j
|
|
56
|
+
|
|
57
|
+
i, j = np.triu_indices(Ni, 1) # indices of ij pairs where i != j
|
|
58
|
+
|
|
59
|
+
# Remove pairs that include a bin that has zero probability
|
|
60
|
+
nzi = (binProb[i] != 0.0) & (binProb[j] != 0.0)
|
|
61
|
+
i = i[nzi]
|
|
62
|
+
j = j[nzi]
|
|
63
|
+
|
|
64
|
+
vals = ma.vstack((ratios_average.vals[i, j], ratios_average.vals[j, i]))
|
|
65
|
+
ias = ma.argsort(vals, axis=0, fill_value=np.inf)
|
|
66
|
+
|
|
67
|
+
ordered_ind = np.vstack((i, j))
|
|
68
|
+
flip_ind = np.nonzero(ias[0, :]) # Find pairs in which to select ji rather than ij
|
|
69
|
+
ordered_ind[:, flip_ind[0]] = ordered_ind[:, flip_ind[0]][::-1]
|
|
70
|
+
|
|
71
|
+
iind = ordered_ind[0, :]
|
|
72
|
+
jind = ordered_ind[1, :]
|
|
73
|
+
uncertij = ratios_average.uncert[iind, jind] # Get the uncert for ij pairs
|
|
74
|
+
|
|
75
|
+
count = uncertij.count() # Count of the unmasked uncertainties
|
|
76
|
+
ias = ma.argsort(uncertij, fill_value=np.inf) # Get the indices that would sort uncertij
|
|
77
|
+
iind = iind[ias[:count]] # Sort the indices excluding masked/undefined values
|
|
78
|
+
jind = jind[ias[:count]]
|
|
79
|
+
|
|
80
|
+
# STEP 2b: Create ClusterList object and cluster bins
|
|
81
|
+
clusters = BinCluster.ClusterList(ratios_average, Ni)
|
|
82
|
+
|
|
83
|
+
if fullCalcClust:
|
|
84
|
+
clusters.join((iind, jind))
|
|
85
|
+
else:
|
|
86
|
+
clusters.join_simple((iind, jind))
|
|
87
|
+
|
|
88
|
+
total_prob = 0.0 # total probability in all clusters
|
|
89
|
+
for cid in clusters.cluster_contents:
|
|
90
|
+
binlist = list(clusters.cluster_contents[cid])
|
|
91
|
+
if len(binlist):
|
|
92
|
+
prob_cluster = binProb[binlist].sum()
|
|
93
|
+
total_prob += prob_cluster
|
|
94
|
+
|
|
95
|
+
binProb[binlist] = prob_cluster * clusters.bin_data[binlist].vals
|
|
96
|
+
|
|
97
|
+
binProb[zi] = 0.0 # re-zero bins that previously had zero prob
|
|
98
|
+
# for bi,p in enumerate(binProb):
|
|
99
|
+
# print('bin: {} -- {}'.format(bi,p))
|
|
100
|
+
print('.........Total Probability: {}'.format(binProb.sum()))
|
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import numpy.ma as ma
|
|
3
|
+
import itertools
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
TOL = 1.0e-6
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class UncertContainer:
|
|
10
|
+
"""Container to hold uncertainty measurements. Data is convert to np masked arrays
|
|
11
|
+
to avoid possible numerical problems
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, vals, vals_dmin, vals_dmax, mask=ma.nomask):
|
|
15
|
+
super().__init__()
|
|
16
|
+
|
|
17
|
+
# If input data already masked arrays extract unmasked data
|
|
18
|
+
if ma.isMaskedArray(vals):
|
|
19
|
+
vals = vals.data
|
|
20
|
+
if ma.isMaskedArray(vals_dmin):
|
|
21
|
+
vals_dmin = vals_dmin.data
|
|
22
|
+
if ma.isMaskedArray(vals_dmax):
|
|
23
|
+
vals_dmax = vals_dmax.data
|
|
24
|
+
|
|
25
|
+
# Adjust negative values
|
|
26
|
+
ineg = np.where(vals_dmin <= 0.0)
|
|
27
|
+
vals_dmin[ineg] = TOL * vals[ineg]
|
|
28
|
+
|
|
29
|
+
# Calculate weight based on fractional uncertainty
|
|
30
|
+
diff = vals_dmax - vals_dmin
|
|
31
|
+
diff_m = ma.masked_where(vals_dmax == vals_dmin, diff)
|
|
32
|
+
|
|
33
|
+
self.vals = ma.masked_where(vals == 0.0, vals)
|
|
34
|
+
|
|
35
|
+
self.wt = (self.vals / diff_m) ** 2
|
|
36
|
+
self.uncert = diff_m / self.vals
|
|
37
|
+
|
|
38
|
+
self.wt.fill_value = np.inf
|
|
39
|
+
self.uncert.fill_vaule = np.inf
|
|
40
|
+
|
|
41
|
+
assert np.all(self.wt.mask == self.uncert.mask)
|
|
42
|
+
|
|
43
|
+
# Mask data if uncertainty is not finite or if any of the inputs were
|
|
44
|
+
# already masked
|
|
45
|
+
|
|
46
|
+
mm = ma.mask_or(self.wt.mask, mask)
|
|
47
|
+
|
|
48
|
+
self.vals.mask = mm
|
|
49
|
+
self.wt.mask = mm
|
|
50
|
+
self.uncert.mask = mm
|
|
51
|
+
self.dmin = ma.array(vals_dmin, mask=mm, fill_value=np.inf)
|
|
52
|
+
self.dmax = ma.array(vals_dmax, mask=mm, fill_value=np.inf)
|
|
53
|
+
|
|
54
|
+
self.mask = ma.getmaskarray(self.vals)
|
|
55
|
+
|
|
56
|
+
def __getitem__(self, indx):
|
|
57
|
+
vals = self.vals[indx]
|
|
58
|
+
dmin = self.dmin[indx]
|
|
59
|
+
dmax = self.dmax[indx]
|
|
60
|
+
|
|
61
|
+
if isinstance(vals, ma.core.MaskedConstant):
|
|
62
|
+
dum = np.zeros((1,))
|
|
63
|
+
return UncertContainer(dum.copy(), dum.copy(), dum.copy())
|
|
64
|
+
elif isinstance(vals, (float, int, np.floating, np.integer)):
|
|
65
|
+
return UncertContainer(np.array([vals]), np.array([dmin]), np.array([dmax]))
|
|
66
|
+
elif isinstance(vals, np.ndarray):
|
|
67
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
68
|
+
else:
|
|
69
|
+
raise TypeError
|
|
70
|
+
|
|
71
|
+
def __setitem__(self, indx, value):
|
|
72
|
+
if isinstance(value, UncertContainer):
|
|
73
|
+
self.vals[indx] = value.vals
|
|
74
|
+
self.dmin[indx] = value.dmin
|
|
75
|
+
self.dmax[indx] = value.dmax
|
|
76
|
+
self.wt[indx] = value.wt
|
|
77
|
+
self.uncert[indx] = value.uncert
|
|
78
|
+
else:
|
|
79
|
+
raise TypeError('Can only set values with an UncertContainer object')
|
|
80
|
+
|
|
81
|
+
def __repr__(self):
|
|
82
|
+
return 'shape={} vals={} dmin={} dmax={} vals.mask={}'.format(
|
|
83
|
+
self.vals.shape, self.vals, self.dmin, self.dmax, self.vals.mask
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def __add__(self, value):
|
|
87
|
+
if isinstance(value, UncertContainer):
|
|
88
|
+
vals = self.vals + value.vals
|
|
89
|
+
dmin = self.dmin + value.dmin
|
|
90
|
+
dmax = self.dmax + value.dmax
|
|
91
|
+
|
|
92
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
93
|
+
elif isinstance(value, (float, int, np.floating, np.integer)):
|
|
94
|
+
vals = self.vals + value
|
|
95
|
+
dmin = self.dmin + value
|
|
96
|
+
dmax = self.dmax + value
|
|
97
|
+
|
|
98
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
99
|
+
else:
|
|
100
|
+
raise TypeError('Attempt to add value of unsupported type')
|
|
101
|
+
|
|
102
|
+
def __sub__(self, value):
|
|
103
|
+
if isinstance(value, UncertContainer):
|
|
104
|
+
vals = self.vals - value.vals
|
|
105
|
+
dmin = self.dmin - value.dmin
|
|
106
|
+
dmax = self.dmax - value.dmax
|
|
107
|
+
|
|
108
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
109
|
+
else:
|
|
110
|
+
raise TypeError('Attempted to subtract by value of unsupported type')
|
|
111
|
+
|
|
112
|
+
def __mul__(self, value):
|
|
113
|
+
if isinstance(value, UncertContainer):
|
|
114
|
+
vals = self.vals * value.vals
|
|
115
|
+
dmin = self.dmin * value.dmin
|
|
116
|
+
dmax = self.dmax * value.dmax
|
|
117
|
+
|
|
118
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
119
|
+
|
|
120
|
+
elif isinstance(value, (float, int, np.floating, np.integer)):
|
|
121
|
+
vals = self.vals * value
|
|
122
|
+
dmin = self.dmin * value
|
|
123
|
+
dmax = self.dmax * value
|
|
124
|
+
|
|
125
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
126
|
+
else:
|
|
127
|
+
raise TypeError('Attempted to multiply by value of unsupported type')
|
|
128
|
+
|
|
129
|
+
def __truediv__(self, value):
|
|
130
|
+
if isinstance(value, UncertContainer):
|
|
131
|
+
vals = self.vals / value.vals
|
|
132
|
+
dmin = self.dmin / value.dmax
|
|
133
|
+
dmax = self.dmax / value.dmin
|
|
134
|
+
|
|
135
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
136
|
+
else:
|
|
137
|
+
raise TypeError('Attempted to divide by unsupported type')
|
|
138
|
+
|
|
139
|
+
def transpose(self):
|
|
140
|
+
vals = self.vals.T
|
|
141
|
+
dmin = self.dmin.T
|
|
142
|
+
dmax = self.dmax.T
|
|
143
|
+
|
|
144
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
145
|
+
|
|
146
|
+
def recip(self):
|
|
147
|
+
vals = 1.0 / self.vals
|
|
148
|
+
dmin = 1.0 / self.dmax
|
|
149
|
+
dmax = 1.0 / self.dmin
|
|
150
|
+
|
|
151
|
+
return UncertContainer(vals, dmin, dmax, mask=vals.mask)
|
|
152
|
+
|
|
153
|
+
def update_mask(self):
|
|
154
|
+
self.vals.mask = self.mask
|
|
155
|
+
self.dmin.mask = self.mask
|
|
156
|
+
self.dmax.mask = self.mask
|
|
157
|
+
self.wt.mask = self.mask
|
|
158
|
+
self.uncert.mask = self.mask
|
|
159
|
+
|
|
160
|
+
def concatenate(self, value, axis=0):
|
|
161
|
+
"""Concatentate UncertContainer value to self.
|
|
162
|
+
Assumes that if dimensions of self and value do not match, to
|
|
163
|
+
add a np.newaxis along axis of value
|
|
164
|
+
"""
|
|
165
|
+
|
|
166
|
+
if isinstance(value, UncertContainer):
|
|
167
|
+
if value.vals.ndim == self.vals.ndim:
|
|
168
|
+
vals = value.vals
|
|
169
|
+
dmin = value.dmin
|
|
170
|
+
dmax = value.dmax
|
|
171
|
+
wt = value.wt
|
|
172
|
+
uncert = value.uncert
|
|
173
|
+
mask = value.mask
|
|
174
|
+
elif (value.vals.ndim + 1) == self.vals.ndim:
|
|
175
|
+
vals = ma.expand_dims(value.vals, axis)
|
|
176
|
+
dmin = ma.expand_dims(value.dmin, axis)
|
|
177
|
+
dmax = ma.expand_dims(value.dmax, axis)
|
|
178
|
+
wt = ma.expand_dims(value.wt, axis)
|
|
179
|
+
uncert = ma.expand_dims(value.uncert, axis)
|
|
180
|
+
mask = np.expand_dims(value.mask, axis)
|
|
181
|
+
else:
|
|
182
|
+
raise ValueError('Could not propery match dimensionality')
|
|
183
|
+
|
|
184
|
+
self.vals = ma.concatenate((self.vals, vals), axis=axis)
|
|
185
|
+
self.dmin = ma.concatenate((self.dmin, dmin), axis=axis)
|
|
186
|
+
self.dmax = ma.concatenate((self.dmax, dmax), axis=axis)
|
|
187
|
+
self.wt = ma.concatenate((self.wt, wt), axis=axis)
|
|
188
|
+
self.uncert = ma.concatenate((self.uncert, uncert), axis=axis)
|
|
189
|
+
|
|
190
|
+
self.mask = np.concatenate((self.mask, mask), axis=axis)
|
|
191
|
+
else:
|
|
192
|
+
raise ValueError('Can only concatenate with an UncertContainer object')
|
|
193
|
+
|
|
194
|
+
def weighted_average(self, axis=0, expaxis=None):
|
|
195
|
+
"""Calculate weighted average of data along axis
|
|
196
|
+
after optionally inserting a new dimension into the
|
|
197
|
+
shape array at position expaxis
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
if expaxis is not None:
|
|
201
|
+
vals = ma.expand_dims(self.vals, expaxis)
|
|
202
|
+
dmin = ma.expand_dims(self.dmin, expaxis)
|
|
203
|
+
dmax = ma.expand_dims(self.dmax, expaxis)
|
|
204
|
+
wt = ma.expand_dims(self.wt, expaxis)
|
|
205
|
+
else:
|
|
206
|
+
vals = self.vals
|
|
207
|
+
wt = self.wt
|
|
208
|
+
dmin = self.dmin
|
|
209
|
+
dmax = self.dmax
|
|
210
|
+
|
|
211
|
+
# Get average value
|
|
212
|
+
avg, norm = ma.average(vals, axis=axis, weights=wt, returned=True)
|
|
213
|
+
avg_ex = ma.expand_dims(avg, 0)
|
|
214
|
+
|
|
215
|
+
# Calculate weighted uncertainty
|
|
216
|
+
wtmax = ma.max(wt, axis=axis)
|
|
217
|
+
neff = norm / wtmax # Effective number of samples based on uncertainties
|
|
218
|
+
|
|
219
|
+
# Seeking max deviation from the average; if above avg use max, if below use min
|
|
220
|
+
term = np.empty_like(vals)
|
|
221
|
+
|
|
222
|
+
indices = np.where(vals > avg_ex)
|
|
223
|
+
i0 = indices[0]
|
|
224
|
+
irest = indices[1:]
|
|
225
|
+
ii = tuple(x for x in itertools.chain([i0], irest))
|
|
226
|
+
jj = tuple(x for x in itertools.chain([np.zeros_like(i0)], irest))
|
|
227
|
+
term[ii] = (dmax[ii] - avg_ex[jj]) ** 2
|
|
228
|
+
|
|
229
|
+
indices = np.where(vals <= avg_ex)
|
|
230
|
+
i0 = indices[0]
|
|
231
|
+
irest = indices[1:]
|
|
232
|
+
ii = tuple(x for x in itertools.chain([i0], irest))
|
|
233
|
+
jj = tuple(x for x in itertools.chain([np.zeros_like(i0)], irest))
|
|
234
|
+
term[ii] = (avg_ex[jj] - dmin[ii]) ** 2
|
|
235
|
+
|
|
236
|
+
dsum = ma.sum(term * wt, axis=0) # Sum for weighted average of deviations
|
|
237
|
+
|
|
238
|
+
dev = 0.5 * np.sqrt(dsum / (norm * neff))
|
|
239
|
+
|
|
240
|
+
if isinstance(avg, (float, np.floating)):
|
|
241
|
+
avg = avg_ex
|
|
242
|
+
|
|
243
|
+
tmp_min = avg - dev
|
|
244
|
+
ii = np.where(tmp_min < 0)
|
|
245
|
+
tmp_min[ii] = TOL * avg[ii]
|
|
246
|
+
|
|
247
|
+
return UncertContainer(avg, tmp_min, avg + dev)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
'''westext.weed -- Support for weighted ensemble equilibrium dynamics
|
|
2
|
+
|
|
3
|
+
Initial code by Dan Zuckerman (May 2011), integration by Matt Zwier,
|
|
4
|
+
and testing by Carsen Stringer. Re-factoring and optimization of probability
|
|
5
|
+
adjustment routines by Joshua L. Adelman (January 2012).
|
|
6
|
+
'''
|
|
7
|
+
|
|
8
|
+
from . import BinCluster, ProbAdjustEquil, UncertMath, weed_driver # noqa
|
|
9
|
+
from .ProbAdjustEquil import probAdjustEquil # noqa
|
|
10
|
+
from .weed_driver import WEEDDriver # noqa
|