westpa 2022.10__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of westpa might be problematic. Click here for more details.

Files changed (150) hide show
  1. westpa/__init__.py +14 -0
  2. westpa/_version.py +21 -0
  3. westpa/analysis/__init__.py +5 -0
  4. westpa/analysis/core.py +746 -0
  5. westpa/analysis/statistics.py +27 -0
  6. westpa/analysis/trajectories.py +360 -0
  7. westpa/cli/__init__.py +0 -0
  8. westpa/cli/core/__init__.py +0 -0
  9. westpa/cli/core/w_fork.py +152 -0
  10. westpa/cli/core/w_init.py +230 -0
  11. westpa/cli/core/w_run.py +77 -0
  12. westpa/cli/core/w_states.py +212 -0
  13. westpa/cli/core/w_succ.py +99 -0
  14. westpa/cli/core/w_truncate.py +59 -0
  15. westpa/cli/tools/__init__.py +0 -0
  16. westpa/cli/tools/ploterr.py +506 -0
  17. westpa/cli/tools/plothist.py +706 -0
  18. westpa/cli/tools/w_assign.py +596 -0
  19. westpa/cli/tools/w_bins.py +166 -0
  20. westpa/cli/tools/w_crawl.py +119 -0
  21. westpa/cli/tools/w_direct.py +547 -0
  22. westpa/cli/tools/w_dumpsegs.py +94 -0
  23. westpa/cli/tools/w_eddist.py +506 -0
  24. westpa/cli/tools/w_fluxanl.py +378 -0
  25. westpa/cli/tools/w_ipa.py +833 -0
  26. westpa/cli/tools/w_kinavg.py +127 -0
  27. westpa/cli/tools/w_kinetics.py +96 -0
  28. westpa/cli/tools/w_multi_west.py +414 -0
  29. westpa/cli/tools/w_ntop.py +213 -0
  30. westpa/cli/tools/w_pdist.py +515 -0
  31. westpa/cli/tools/w_postanalysis_matrix.py +82 -0
  32. westpa/cli/tools/w_postanalysis_reweight.py +53 -0
  33. westpa/cli/tools/w_red.py +486 -0
  34. westpa/cli/tools/w_reweight.py +780 -0
  35. westpa/cli/tools/w_select.py +226 -0
  36. westpa/cli/tools/w_stateprobs.py +111 -0
  37. westpa/cli/tools/w_trace.py +599 -0
  38. westpa/core/__init__.py +0 -0
  39. westpa/core/_rc.py +673 -0
  40. westpa/core/binning/__init__.py +55 -0
  41. westpa/core/binning/_assign.cpython-312-darwin.so +0 -0
  42. westpa/core/binning/assign.py +449 -0
  43. westpa/core/binning/binless.py +96 -0
  44. westpa/core/binning/binless_driver.py +54 -0
  45. westpa/core/binning/binless_manager.py +190 -0
  46. westpa/core/binning/bins.py +47 -0
  47. westpa/core/binning/mab.py +427 -0
  48. westpa/core/binning/mab_driver.py +54 -0
  49. westpa/core/binning/mab_manager.py +198 -0
  50. westpa/core/data_manager.py +1694 -0
  51. westpa/core/extloader.py +74 -0
  52. westpa/core/h5io.py +995 -0
  53. westpa/core/kinetics/__init__.py +24 -0
  54. westpa/core/kinetics/_kinetics.cpython-312-darwin.so +0 -0
  55. westpa/core/kinetics/events.py +147 -0
  56. westpa/core/kinetics/matrates.py +156 -0
  57. westpa/core/kinetics/rate_averaging.py +266 -0
  58. westpa/core/progress.py +218 -0
  59. westpa/core/propagators/__init__.py +54 -0
  60. westpa/core/propagators/executable.py +715 -0
  61. westpa/core/reweight/__init__.py +14 -0
  62. westpa/core/reweight/_reweight.cpython-312-darwin.so +0 -0
  63. westpa/core/reweight/matrix.py +126 -0
  64. westpa/core/segment.py +119 -0
  65. westpa/core/sim_manager.py +830 -0
  66. westpa/core/states.py +359 -0
  67. westpa/core/systems.py +93 -0
  68. westpa/core/textio.py +74 -0
  69. westpa/core/trajectory.py +330 -0
  70. westpa/core/we_driver.py +908 -0
  71. westpa/core/wm_ops.py +43 -0
  72. westpa/core/yamlcfg.py +391 -0
  73. westpa/fasthist/__init__.py +34 -0
  74. westpa/fasthist/__main__.py +110 -0
  75. westpa/fasthist/_fasthist.cpython-312-darwin.so +0 -0
  76. westpa/mclib/__init__.py +264 -0
  77. westpa/mclib/__main__.py +28 -0
  78. westpa/mclib/_mclib.cpython-312-darwin.so +0 -0
  79. westpa/oldtools/__init__.py +4 -0
  80. westpa/oldtools/aframe/__init__.py +35 -0
  81. westpa/oldtools/aframe/atool.py +75 -0
  82. westpa/oldtools/aframe/base_mixin.py +26 -0
  83. westpa/oldtools/aframe/binning.py +178 -0
  84. westpa/oldtools/aframe/data_reader.py +560 -0
  85. westpa/oldtools/aframe/iter_range.py +200 -0
  86. westpa/oldtools/aframe/kinetics.py +117 -0
  87. westpa/oldtools/aframe/mcbs.py +146 -0
  88. westpa/oldtools/aframe/output.py +39 -0
  89. westpa/oldtools/aframe/plotting.py +90 -0
  90. westpa/oldtools/aframe/trajwalker.py +126 -0
  91. westpa/oldtools/aframe/transitions.py +469 -0
  92. westpa/oldtools/cmds/__init__.py +0 -0
  93. westpa/oldtools/cmds/w_ttimes.py +358 -0
  94. westpa/oldtools/files.py +34 -0
  95. westpa/oldtools/miscfn.py +23 -0
  96. westpa/oldtools/stats/__init__.py +4 -0
  97. westpa/oldtools/stats/accumulator.py +35 -0
  98. westpa/oldtools/stats/edfs.py +129 -0
  99. westpa/oldtools/stats/mcbs.py +89 -0
  100. westpa/tools/__init__.py +33 -0
  101. westpa/tools/binning.py +472 -0
  102. westpa/tools/core.py +340 -0
  103. westpa/tools/data_reader.py +159 -0
  104. westpa/tools/dtypes.py +31 -0
  105. westpa/tools/iter_range.py +198 -0
  106. westpa/tools/kinetics_tool.py +340 -0
  107. westpa/tools/plot.py +283 -0
  108. westpa/tools/progress.py +17 -0
  109. westpa/tools/selected_segs.py +154 -0
  110. westpa/tools/wipi.py +751 -0
  111. westpa/trajtree/__init__.py +4 -0
  112. westpa/trajtree/_trajtree.cpython-312-darwin.so +0 -0
  113. westpa/trajtree/trajtree.py +117 -0
  114. westpa/westext/__init__.py +0 -0
  115. westpa/westext/adaptvoronoi/__init__.py +3 -0
  116. westpa/westext/adaptvoronoi/adaptVor_driver.py +214 -0
  117. westpa/westext/hamsm_restarting/__init__.py +3 -0
  118. westpa/westext/hamsm_restarting/example_overrides.py +35 -0
  119. westpa/westext/hamsm_restarting/restart_driver.py +1165 -0
  120. westpa/westext/stringmethod/__init__.py +11 -0
  121. westpa/westext/stringmethod/fourier_fitting.py +69 -0
  122. westpa/westext/stringmethod/string_driver.py +253 -0
  123. westpa/westext/stringmethod/string_method.py +306 -0
  124. westpa/westext/weed/BinCluster.py +180 -0
  125. westpa/westext/weed/ProbAdjustEquil.py +100 -0
  126. westpa/westext/weed/UncertMath.py +247 -0
  127. westpa/westext/weed/__init__.py +10 -0
  128. westpa/westext/weed/weed_driver.py +182 -0
  129. westpa/westext/wess/ProbAdjust.py +101 -0
  130. westpa/westext/wess/__init__.py +6 -0
  131. westpa/westext/wess/wess_driver.py +207 -0
  132. westpa/work_managers/__init__.py +57 -0
  133. westpa/work_managers/core.py +396 -0
  134. westpa/work_managers/environment.py +134 -0
  135. westpa/work_managers/mpi.py +318 -0
  136. westpa/work_managers/processes.py +187 -0
  137. westpa/work_managers/serial.py +28 -0
  138. westpa/work_managers/threads.py +79 -0
  139. westpa/work_managers/zeromq/__init__.py +20 -0
  140. westpa/work_managers/zeromq/core.py +641 -0
  141. westpa/work_managers/zeromq/node.py +131 -0
  142. westpa/work_managers/zeromq/work_manager.py +526 -0
  143. westpa/work_managers/zeromq/worker.py +320 -0
  144. westpa-2022.10.dist-info/AUTHORS +22 -0
  145. westpa-2022.10.dist-info/LICENSE +21 -0
  146. westpa-2022.10.dist-info/METADATA +183 -0
  147. westpa-2022.10.dist-info/RECORD +150 -0
  148. westpa-2022.10.dist-info/WHEEL +5 -0
  149. westpa-2022.10.dist-info/entry_points.txt +29 -0
  150. westpa-2022.10.dist-info/top_level.txt +1 -0
@@ -0,0 +1,908 @@
1
+ import logging
2
+ import math
3
+ import operator
4
+ import random
5
+
6
+ import numpy as np
7
+
8
+ import westpa
9
+ from .segment import Segment
10
+ from .states import InitialState
11
+
12
+ log = logging.getLogger(__name__)
13
+
14
+
15
+ def _group_walkers_identity(we_driver, ibin, **kwargs):
16
+ log.debug('using we_driver._group_walkers_identity')
17
+ bin_set = we_driver.next_iter_binning[ibin]
18
+ list_bins = [set()]
19
+ for i in bin_set:
20
+ list_bins[0].add(i)
21
+ return list_bins
22
+
23
+
24
+ class ConsistencyError(RuntimeError):
25
+ pass
26
+
27
+
28
+ class AccuracyError(RuntimeError):
29
+ pass
30
+
31
+
32
+ class NewWeightEntry:
33
+ NW_SOURCE_RECYCLED = 0
34
+
35
+ def __init__(
36
+ self,
37
+ source_type,
38
+ weight,
39
+ prev_seg_id=None,
40
+ prev_init_pcoord=None,
41
+ prev_final_pcoord=None,
42
+ new_init_pcoord=None,
43
+ target_state_id=None,
44
+ initial_state_id=None,
45
+ ):
46
+ self.source_type = source_type
47
+ self.weight = weight
48
+ self.prev_seg_id = prev_seg_id
49
+ self.prev_init_pcoord = np.asarray(prev_init_pcoord) if prev_init_pcoord is not None else None
50
+ self.prev_final_pcoord = np.asarray(prev_final_pcoord) if prev_final_pcoord is not None else None
51
+ self.new_init_pcoord = np.asarray(new_init_pcoord) if new_init_pcoord is not None else None
52
+ self.target_state_id = target_state_id
53
+ self.initial_state_id = initial_state_id
54
+
55
+ def __repr__(self):
56
+ return '<{} object at 0x{:x}: weight={self.weight:g} target_state_id={self.target_state_id} prev_final_pcoord={self.prev_final_pcoord}>'.format(
57
+ self.__class__.__name__, id(self), self=self
58
+ )
59
+
60
+
61
+ class WEDriver:
62
+ '''A class implemented Huber & Kim's weighted ensemble algorithm over Segment objects.
63
+ This class handles all binning, recycling, and preparation of new Segment objects for the
64
+ next iteration. Binning is accomplished using system.bin_mapper, and per-bin target counts
65
+ are from system.bin_target_counts.
66
+
67
+ The workflow is as follows:
68
+
69
+ 1) Call `new_iteration()` every new iteration, providing any recycling targets that are
70
+ in force and any available initial states for recycling.
71
+ 2) Call `assign()` to assign segments to bins based on their initial and end points. This
72
+ returns the number of walkers that were recycled.
73
+ 3) Call `run_we()`, optionally providing a set of initial states that will be used to
74
+ recycle walkers.
75
+
76
+ Note the presence of flux_matrix, transition_matrix,
77
+ current_iter_segments, next_iter_segments, recycling_segments,
78
+ initial_binning, final_binning, next_iter_binning, and new_weights (to be documented soon).
79
+ '''
80
+
81
+ weight_split_threshold = 2.0
82
+ weight_merge_cutoff = 1.0
83
+ largest_allowed_weight = 1.0
84
+ smallest_allowed_weight = 1e-310
85
+
86
+ def __init__(self, rc=None, system=None):
87
+ self.rc = rc or westpa.rc
88
+ self.system = system or self.rc.get_system_driver()
89
+
90
+ # Whether to adjust counts to exactly match target count
91
+ self.do_adjust_counts = True
92
+
93
+ # bin mapper and per-bin target counts (see new_iteration for initialization)
94
+ self.bin_mapper = None
95
+ self.bin_target_counts = None
96
+
97
+ # Mapping of bin index to target state
98
+ self.target_states = None
99
+
100
+ # binning on initial points
101
+ self.initial_binning = None
102
+
103
+ # binning on final points (pre-WE)
104
+ self.final_binning = None
105
+
106
+ # binning on initial points for next iteration
107
+ self.next_iter_binning = None
108
+
109
+ # Flux and rate matrices for the current iteration
110
+ self.flux_matrix = None
111
+ self.transition_matrix = None
112
+
113
+ # Information on new weights (e.g. from recycling) for the next iteration
114
+ self.new_weights = None
115
+
116
+ # Set of initial states passed to run_we() that are actually used for
117
+ # recycling targets
118
+ self.used_initial_states = None
119
+
120
+ self.avail_initial_states = None
121
+
122
+ # Make property for subgrouping function.
123
+ self.subgroup_function = _group_walkers_identity
124
+ self.subgroup_function_kwargs = {}
125
+
126
+ self.process_config()
127
+ self.check_threshold_configs()
128
+
129
+ def process_config(self):
130
+ config = self.rc.config
131
+
132
+ config.require_type_if_present(['west', 'we', 'adjust_counts'], bool)
133
+
134
+ config.require_type_if_present(['west', 'we', 'thresholds'], bool)
135
+
136
+ self.do_adjust_counts = config.get(['west', 'we', 'adjust_counts'], True)
137
+ log.info('Adjust counts to exactly match target_counts: {}'.format(self.do_adjust_counts))
138
+
139
+ self.do_thresholds = config.get(['west', 'we', 'thresholds'], True)
140
+ log.info('Obey abolute weight thresholds: {}'.format(self.do_thresholds))
141
+
142
+ self.weight_split_threshold = config.get(['west', 'we', 'weight_split_threshold'], self.weight_split_threshold)
143
+ log.info('Split threshold: {}'.format(self.weight_split_threshold))
144
+
145
+ self.weight_merge_cutoff = config.get(['west', 'we', 'weight_merge_cutoff'], self.weight_merge_cutoff)
146
+ log.info('Merge cutoff: {}'.format(self.weight_merge_cutoff))
147
+
148
+ self.largest_allowed_weight = config.get(['west', 'we', 'largest_allowed_weight'], self.largest_allowed_weight)
149
+ log.info('Largest allowed weight: {}'.format(self.largest_allowed_weight))
150
+
151
+ self.smallest_allowed_weight = config.get(['west', 'we', 'smallest_allowed_weight'], self.smallest_allowed_weight)
152
+ log.info('Smallest allowed_weight: {}'.format(self.smallest_allowed_weight))
153
+
154
+ @property
155
+ def next_iter_segments(self):
156
+ '''Newly-created segments for the next iteration'''
157
+ if self.next_iter_binning is None:
158
+ raise RuntimeError('cannot access next iteration segments before running WE')
159
+
160
+ for _bin in self.next_iter_binning:
161
+ for walker in _bin:
162
+ yield walker
163
+
164
+ @property
165
+ def current_iter_segments(self):
166
+ '''Segments for the current iteration'''
167
+ for _bin in self.final_binning:
168
+ for walker in _bin:
169
+ yield walker
170
+
171
+ @property
172
+ def next_iter_assignments(self):
173
+ '''Bin assignments (indices) for initial points of next iteration.'''
174
+ if self.next_iter_binning is None:
175
+ raise RuntimeError('cannot access next iteration segments before running WE')
176
+
177
+ for ibin, _bin in enumerate(self.next_iter_binning):
178
+ for _walker in _bin:
179
+ yield ibin
180
+
181
+ @property
182
+ def current_iter_assignments(self):
183
+ '''Bin assignments (indices) for endpoints of current iteration.'''
184
+ for ibin, _bin in enumerate(self.final_binning):
185
+ for walker in _bin:
186
+ yield ibin
187
+
188
+ @property
189
+ def recycling_segments(self):
190
+ '''Segments designated for recycling'''
191
+ if len(self.target_states):
192
+ for ibin, tstate in self.target_states.items():
193
+ for segment in self.final_binning[ibin]:
194
+ yield segment
195
+ else:
196
+ return
197
+
198
+ @property
199
+ def n_recycled_segs(self):
200
+ '''Number of segments recycled this iteration'''
201
+ count = 0
202
+ for _segment in self.recycling_segments:
203
+ count += 1
204
+ return count
205
+
206
+ @property
207
+ def n_istates_needed(self):
208
+ '''Number of initial states needed to support recycling for this iteration'''
209
+ n_istates_avail = len(self.avail_initial_states)
210
+ return max(0, self.n_recycled_segs - n_istates_avail)
211
+
212
+ def check_threshold_configs(self):
213
+ '''Check to see if weight thresholds parameters are valid'''
214
+ if (not np.issubdtype(type(self.largest_allowed_weight), np.floating)) or (
215
+ not np.issubdtype(type(self.smallest_allowed_weight), np.floating)
216
+ ):
217
+ try:
218
+ # Trying to self correct
219
+ self.largest_allowed_weight = float(self.largest_allowed_weight)
220
+ self.smallest_allowed_weight = float(self.smallest_allowed_weight)
221
+ except ValueError:
222
+ # Generate error saying thresholds are invalid
223
+ raise ValueError("Invalid weight thresholds specified. Please check your west.cfg.")
224
+
225
+ if np.isclose(self.largest_allowed_weight, self.smallest_allowed_weight):
226
+ raise ValueError("Weight threshold bounds cannot be identical.")
227
+ elif self.largest_allowed_weight < self.smallest_allowed_weight:
228
+ self.smallest_allowed_weight, self.largest_allowed_weight = self.largest_allowed_weight, self.smallest_allowed_weight
229
+ log.warning('Swapped largest allowed weight with smallest allowed weight to fulfill inequality (largest > smallest).')
230
+
231
+ def clear(self):
232
+ '''Explicitly delete all Segment-related state.'''
233
+
234
+ del self.initial_binning, self.final_binning, self.next_iter_binning
235
+ del self.flux_matrix, self.transition_matrix
236
+ del self.new_weights, self.used_initial_states, self.avail_initial_states
237
+
238
+ self.initial_binning = None
239
+ self.final_binning = None
240
+ self.next_iter_binning = None
241
+ self.flux_matrix = None
242
+ self.transition_matrix = None
243
+ self.avail_initial_states = None
244
+ self.used_initial_states = None
245
+ self.new_weights = None
246
+
247
+ def new_iteration(self, initial_states=None, target_states=None, new_weights=None, bin_mapper=None, bin_target_counts=None):
248
+ '''Prepare for a new iteration. ``initial_states`` is a sequence of all InitialState objects valid
249
+ for use in to generating new segments for the *next* iteration (after the one being begun with the call to
250
+ new_iteration); that is, these are states available to recycle to. Target states which generate recycling events
251
+ are specified in ``target_states``, a sequence of TargetState objects. Both ``initial_states``
252
+ and ``target_states`` may be empty as required.
253
+
254
+ The optional ``new_weights`` is a sequence of NewWeightEntry objects which will
255
+ be used to construct the initial flux matrix.
256
+
257
+ The given ``bin_mapper`` will be used for assignment, and ``bin_target_counts`` used for splitting/merging
258
+ target counts; each will be obtained from the system object if omitted or None.
259
+ '''
260
+
261
+ self.clear()
262
+
263
+ new_weights = new_weights or []
264
+ if initial_states is None:
265
+ initial_states = initial_states or []
266
+
267
+ # update mapper, in case it has changed on the system driver and has not been overridden
268
+ if bin_mapper is not None:
269
+ self.bin_mapper = bin_mapper
270
+ else:
271
+ self.bin_mapper = self.system.bin_mapper
272
+
273
+ if bin_target_counts is not None:
274
+ self.bin_target_counts = bin_target_counts
275
+ else:
276
+ self.bin_target_counts = np.array(self.system.bin_target_counts).copy()
277
+ nbins = self.bin_mapper.nbins
278
+ log.debug('mapper is {!r}, handling {:d} bins'.format(self.bin_mapper, nbins))
279
+
280
+ self.initial_binning = self.bin_mapper.construct_bins()
281
+ self.final_binning = self.bin_mapper.construct_bins()
282
+ self.next_iter_binning = None
283
+
284
+ flux_matrix = self.flux_matrix = np.zeros((nbins, nbins), dtype=np.float64)
285
+ transition_matrix = self.transition_matrix = np.zeros((nbins, nbins), np.uint)
286
+
287
+ # map target state specifications to bins
288
+ target_states = target_states or []
289
+ self.target_states = {}
290
+ for tstate in target_states:
291
+ tstate_assignment = self.bin_mapper.assign([tstate.pcoord])[0]
292
+ self.target_states[tstate_assignment] = tstate
293
+ log.debug('target state {!r} mapped to bin {}'.format(tstate, tstate_assignment))
294
+ self.bin_target_counts[tstate_assignment] = 0
295
+
296
+ # loop over recycled segments, adding entries to the flux matrix appropriately
297
+ if new_weights:
298
+ init_pcoords = np.empty((len(new_weights), self.system.pcoord_ndim), dtype=self.system.pcoord_dtype)
299
+ prev_init_pcoords = np.empty((len(new_weights), self.system.pcoord_ndim), dtype=self.system.pcoord_dtype)
300
+
301
+ for ientry, entry in enumerate(new_weights):
302
+ init_pcoords[ientry] = entry.new_init_pcoord
303
+ prev_init_pcoords[ientry] = entry.prev_init_pcoord
304
+
305
+ init_assignments = self.bin_mapper.assign(init_pcoords)
306
+ prev_init_assignments = self.bin_mapper.assign(prev_init_pcoords)
307
+
308
+ for entry, i, j in zip(new_weights, prev_init_assignments, init_assignments):
309
+ flux_matrix[i, j] += entry.weight
310
+ transition_matrix[i, j] += 1
311
+
312
+ del init_pcoords, prev_init_pcoords, init_assignments, prev_init_assignments
313
+
314
+ self.avail_initial_states = {state.state_id: state for state in initial_states}
315
+ self.used_initial_states = {}
316
+
317
+ def add_initial_states(self, initial_states):
318
+ '''Add newly-prepared initial states to the pool available for recycling.'''
319
+ for state in initial_states:
320
+ self.avail_initial_states[state.state_id] = state
321
+
322
+ @property
323
+ def all_initial_states(self):
324
+ '''Return an iterator over all initial states (available or used)'''
325
+ for state in self.avail_initial_states.values():
326
+ yield state
327
+ for state in self.used_initial_states.values():
328
+ yield state
329
+
330
+ def assign(self, segments, initializing=False):
331
+ '''Assign segments to initial and final bins, and update the (internal) lists of used and available
332
+ initial states. If ``initializing`` is True, then the "final" bin assignments will
333
+ be identical to the initial bin assignments, a condition required for seeding a new iteration from
334
+ pre-existing segments.'''
335
+
336
+ # collect initial and final coordinates into one place
337
+ all_pcoords = np.empty((2, len(segments), self.system.pcoord_ndim), dtype=self.system.pcoord_dtype)
338
+
339
+ for iseg, segment in enumerate(segments):
340
+ all_pcoords[0, iseg] = segment.pcoord[0, :]
341
+ all_pcoords[1, iseg] = segment.pcoord[-1, :]
342
+
343
+ # assign based on initial and final progress coordinates
344
+ initial_assignments = self.bin_mapper.assign(all_pcoords[0, :, :])
345
+ if initializing:
346
+ final_assignments = initial_assignments
347
+ else:
348
+ final_assignments = self.bin_mapper.assign(all_pcoords[1, :, :])
349
+
350
+ initial_binning = self.initial_binning
351
+ final_binning = self.final_binning
352
+ flux_matrix = self.flux_matrix
353
+ transition_matrix = self.transition_matrix
354
+ for segment, iidx, fidx in zip(segments, initial_assignments, final_assignments):
355
+ initial_binning[iidx].add(segment)
356
+ final_binning[fidx].add(segment)
357
+ flux_matrix[iidx, fidx] += segment.weight
358
+ transition_matrix[iidx, fidx] += 1
359
+
360
+ n_recycled_total = self.n_recycled_segs
361
+ n_new_states = n_recycled_total - len(self.avail_initial_states)
362
+
363
+ log.debug(
364
+ '{} walkers scheduled for recycling, {} initial states available'.format(
365
+ n_recycled_total, len(self.avail_initial_states)
366
+ )
367
+ )
368
+
369
+ if n_new_states > 0:
370
+ return n_new_states
371
+ else:
372
+ return 0
373
+
374
+ def _recycle_walkers(self):
375
+ '''Recycle walkers'''
376
+
377
+ # recall that every walker we deal with is already a new segment in the subsequent iteration,
378
+ # so to recycle, we actually move the appropriate Segment from the target bin to the initial state bin
379
+
380
+ self.new_weights = []
381
+
382
+ n_recycled_walkers = len(list(self.recycling_segments))
383
+ if not n_recycled_walkers:
384
+ return
385
+ elif n_recycled_walkers > len(self.avail_initial_states):
386
+ raise ConsistencyError(
387
+ 'need {} initial states for recycling, but only {} present'.format(
388
+ n_recycled_walkers, len(self.avail_initial_states)
389
+ )
390
+ )
391
+
392
+ used_istate_ids = set()
393
+ istateiter = iter(self.avail_initial_states.values())
394
+ for ibin, target_state in self.target_states.items():
395
+ target_bin = self.next_iter_binning[ibin]
396
+ for segment in set(target_bin):
397
+ initial_state = next(istateiter)
398
+ istate_assignment = self.bin_mapper.assign([initial_state.pcoord])[0]
399
+ parent = self._parent_map[segment.parent_id]
400
+ parent.endpoint_type = Segment.SEG_ENDPOINT_RECYCLED
401
+
402
+ if log.isEnabledFor(logging.DEBUG):
403
+ log.debug(
404
+ 'recycling {!r} from target state {!r} to initial state {!r}'.format(segment, target_state, initial_state)
405
+ )
406
+ log.debug('parent is {!r}'.format(parent))
407
+
408
+ segment.parent_id = -(initial_state.state_id + 1)
409
+ segment.pcoord[0] = initial_state.pcoord
410
+
411
+ self.new_weights.append(
412
+ NewWeightEntry(
413
+ source_type=NewWeightEntry.NW_SOURCE_RECYCLED,
414
+ weight=parent.weight,
415
+ prev_seg_id=parent.seg_id,
416
+ # the .copy() is crucial, otherwise the slice of pcoords will
417
+ # keep the parent segments' pcoord data alive unnecessarily long
418
+ prev_init_pcoord=parent.pcoord[0].copy(),
419
+ prev_final_pcoord=parent.pcoord[-1].copy(),
420
+ new_init_pcoord=initial_state.pcoord.copy(),
421
+ target_state_id=target_state.state_id,
422
+ initial_state_id=initial_state.state_id,
423
+ )
424
+ )
425
+
426
+ if log.isEnabledFor(logging.DEBUG):
427
+ log.debug('new weight entry is {!r}'.format(self.new_weights[-1]))
428
+
429
+ self.next_iter_binning[istate_assignment].add(segment)
430
+
431
+ initial_state.iter_used = segment.n_iter
432
+ log.debug('marking initial state {!r} as used'.format(initial_state))
433
+ used_istate_ids.add(initial_state.state_id)
434
+ target_bin.remove(segment)
435
+
436
+ assert len(target_bin) == 0
437
+
438
+ # Transfer newly-assigned states from "available" to "used"
439
+ for state_id in used_istate_ids:
440
+ self.used_initial_states[state_id] = self.avail_initial_states.pop(state_id)
441
+
442
+ def _split_walker(self, segment, m, bin):
443
+ '''Split the walker ``segment`` (in ``bin``) into ``m`` walkers'''
444
+ new_segments = []
445
+ for _inew in range(0, m):
446
+ new_segment = Segment(
447
+ n_iter=segment.n_iter, # previously incremented
448
+ weight=segment.weight / m,
449
+ parent_id=segment.parent_id,
450
+ wtg_parent_ids=set(segment.wtg_parent_ids),
451
+ pcoord=segment.pcoord.copy(),
452
+ status=Segment.SEG_STATUS_PREPARED,
453
+ )
454
+ new_segment.pcoord[0, :] = segment.pcoord[0, :]
455
+ new_segments.append(new_segment)
456
+
457
+ if log.isEnabledFor(logging.DEBUG):
458
+ log.debug('splitting {!r} into {:d}:\n {!r}'.format(segment, m, new_segments))
459
+
460
+ return new_segments
461
+
462
+ def _merge_walkers(self, segments, cumul_weight, bin):
463
+ '''Merge the given ``segments`` in ``bin``, previously sorted by weight, into one conglomerate segment.
464
+ ``cumul_weight`` is the cumulative sum of the weights of the ``segments``; this may be None to calculate here.'''
465
+
466
+ if cumul_weight is None:
467
+ cumul_weight = np.add.accumulate([segment.weight for segment in segments])
468
+
469
+ glom = Segment(
470
+ n_iter=segments[0].n_iter, # assumed correct (and equal among all segments)
471
+ weight=cumul_weight[len(segments) - 1],
472
+ status=Segment.SEG_STATUS_PREPARED,
473
+ pcoord=self.system.new_pcoord_array(),
474
+ )
475
+
476
+ # Select the history to use
477
+ # The following takes a random number in the interval 0 <= x < glom.weight, then
478
+ # sees where this value falls among the (sorted) weights of the segments being merged;
479
+ # this ensures that a walker with (e.g.) twice the weight of its brethren has twice the
480
+ # probability of having its history selected for continuation
481
+ iparent = np.digitize((random.uniform(0, glom.weight),), cumul_weight)[0]
482
+ gparent_seg = segments[iparent]
483
+
484
+ # Inherit history from this segment ("gparent" stands for "glom parent", as opposed to historical
485
+ # parent).
486
+ glom.parent_id = gparent_seg.parent_id
487
+ glom.pcoord[0, :] = gparent_seg.pcoord[0, :]
488
+
489
+ # Weight comes from all segments being merged, and therefore all their
490
+ # parent segments
491
+ glom.wtg_parent_ids = set()
492
+ for segment in segments:
493
+ glom.wtg_parent_ids |= segment.wtg_parent_ids
494
+
495
+ # The historical parent of gparent is continued; all others are marked as merged
496
+ for segment in segments:
497
+ if segment is gparent_seg:
498
+ # we must ignore initial states here...
499
+ if segment.parent_id >= 0:
500
+ self._parent_map[segment.parent_id].endpoint_type = Segment.SEG_ENDPOINT_CONTINUES
501
+ else:
502
+ # and "unuse" an initial state here (recall that initial states are in 1:1 correspondence
503
+ # with the segments they initiate), except when a previously-split particle is being
504
+ # merged
505
+ if segment.parent_id >= 0:
506
+ self._parent_map[segment.parent_id].endpoint_type = Segment.SEG_ENDPOINT_MERGED
507
+ else:
508
+ if segment.initial_state_id in {segment.initial_state_id for segment in bin}:
509
+ log.debug('initial state in use by other walker; not removing')
510
+ else:
511
+ initial_state = self.used_initial_states.pop(segment.initial_state_id)
512
+ log.debug('freeing initial state {!r} for future use (merged)'.format(initial_state))
513
+ self.avail_initial_states[initial_state.state_id] = initial_state
514
+ initial_state.iter_used = None
515
+
516
+ if log.isEnabledFor(logging.DEBUG):
517
+ log.debug('merging ({:d}) {!r} into 1:\n {!r}'.format(len(segments), segments, glom))
518
+
519
+ return glom, gparent_seg
520
+
521
+ def _split_by_weight(self, bin, target_count, ideal_weight):
522
+ '''Split overweight particles'''
523
+
524
+ segments = np.array(sorted(bin, key=operator.attrgetter('weight')), dtype=np.object_)
525
+ weights = np.array(list(map(operator.attrgetter('weight'), segments)))
526
+
527
+ if len(bin) > 0:
528
+ assert target_count > 0
529
+
530
+ to_split = segments[weights > self.weight_split_threshold * ideal_weight]
531
+
532
+ for segment in to_split:
533
+ m = int(math.ceil(segment.weight / ideal_weight))
534
+ bin.remove(segment)
535
+ new_segments_list = self._split_walker(segment, m, bin)
536
+ bin.update(new_segments_list)
537
+
538
+ def _merge_by_weight(self, bin, target_count, ideal_weight):
539
+ '''Merge underweight particles'''
540
+
541
+ while True:
542
+ segments = np.array(sorted(bin, key=operator.attrgetter('weight')), dtype=np.object_)
543
+ weights = np.array(list(map(operator.attrgetter('weight'), segments)))
544
+ cumul_weight = np.add.accumulate(weights)
545
+
546
+ to_merge = segments[cumul_weight <= ideal_weight * self.weight_merge_cutoff]
547
+ if len(to_merge) < 2:
548
+ return
549
+ bin.difference_update(to_merge)
550
+ new_segment, parent = self._merge_walkers(to_merge, cumul_weight, bin)
551
+ bin.add(new_segment)
552
+
553
+ def _adjust_count(self, bin, subgroups, target_count):
554
+ weight_getter = operator.attrgetter('weight')
555
+ # Order subgroups by the sum of their weights.
556
+ if len(subgroups) > target_count:
557
+ sorted_subgroups = [set()]
558
+ for i in bin:
559
+ sorted_subgroups[0].add(i)
560
+ else:
561
+ sorted_subgroups = sorted(subgroups, key=lambda gp: sum(seg.weight for seg in gp))
562
+ # Loops over the groups, splitting/merging until the proper count has been reached. This way, no trajectories are accidentally destroyed.
563
+
564
+ # split
565
+ while len(bin) < target_count:
566
+ for i in sorted_subgroups:
567
+ log.debug('adjusting counts by splitting')
568
+ # always split the highest probability walker into two
569
+ segments = sorted(i, key=weight_getter)
570
+ bin.remove(segments[-1])
571
+ i.remove(segments[-1])
572
+ new_segments_list = self._split_walker(segments[-1], 2, bin)
573
+ i.update(new_segments_list)
574
+ bin.update(new_segments_list)
575
+
576
+ if len(bin) == target_count:
577
+ break
578
+
579
+ # merge
580
+ while len(bin) > target_count:
581
+ sorted_subgroups.reverse()
582
+ # Adjust to go from lowest weight group to highest to merge
583
+ for i in sorted_subgroups:
584
+ # Ensures that there are least two walkers to merge
585
+ if len(i) > 1:
586
+ log.debug('adjusting counts by merging')
587
+ # always merge the two lowest-probability walkers
588
+ segments = sorted(i, key=weight_getter)
589
+ bin.difference_update(segments[:2])
590
+ i.difference_update(segments[:2])
591
+ merged_segment, parent = self._merge_walkers(segments[:2], cumul_weight=None, bin=bin)
592
+ i.add(merged_segment)
593
+ bin.add(merged_segment)
594
+
595
+ # As long as we're changing the merge_walkers and split_walkers, adjust them so that they don't update the bin within the function
596
+ # and instead update the bin here. Assuming nothing else relies on those. Make sure with grin.
597
+ # in bash, "find . -name \*.py | xargs fgrep -n '_merge_walkers'"
598
+ if len(bin) == target_count:
599
+ break
600
+
601
+ def _merge_by_threshold(self, bin, subgroup):
602
+ # merge to satisfy weight thresholds
603
+ # this gets rid of weights that are too small
604
+ while True:
605
+ segments = np.array(sorted(subgroup, key=operator.attrgetter('weight')), dtype=np.object_)
606
+ weights = np.array(list(map(operator.attrgetter('weight'), segments)))
607
+ cumul_weight = np.add.accumulate(weights)
608
+
609
+ to_merge = segments[weights < self.smallest_allowed_weight]
610
+ if len(to_merge) < 2:
611
+ return
612
+ bin.difference_update(to_merge)
613
+ subgroup.difference_update(to_merge)
614
+ new_segment, parent = self._merge_walkers(to_merge, cumul_weight, bin)
615
+ bin.add(new_segment)
616
+ subgroup.add(new_segment)
617
+
618
+ def _split_by_threshold(self, bin, subgroup):
619
+ # split to satisfy weight thresholds
620
+ # this splits walkers that are too big
621
+ segments = np.array(sorted(subgroup, key=operator.attrgetter('weight')), dtype=np.object_)
622
+ weights = np.array(list(map(operator.attrgetter('weight'), segments)))
623
+
624
+ to_split = segments[weights > self.largest_allowed_weight]
625
+ for segment in to_split:
626
+ m = int(math.ceil(segment.weight / self.largest_allowed_weight))
627
+ bin.remove(segment)
628
+ subgroup.remove(segment)
629
+ new_segments_list = self._split_walker(segment, m, bin)
630
+ bin.update(new_segments_list)
631
+ subgroup.update(new_segments_list)
632
+
633
+ def _check_pre(self):
634
+ for ibin, _bin in enumerate(self.next_iter_binning):
635
+ if self.bin_target_counts[ibin] == 0 and len(_bin) > 0:
636
+ raise ConsistencyError('bin {:d} has target count of 0 but contains {:d} walkers'.format(ibin, len(_bin)))
637
+
638
+ def _check_post(self):
639
+ for segment in self.next_iter_segments:
640
+ if segment.weight == 0:
641
+ raise ConsistencyError('segment {!r} has weight of zero')
642
+
643
+ def _prep_we(self):
644
+ '''Prepare internal state for WE recycle/split/merge.'''
645
+ self._parent_map = {}
646
+ self.next_iter_binning = self.bin_mapper.construct_bins()
647
+
648
+ def _run_we(self):
649
+ '''Run recycle/split/merge. Do not call this function directly; instead, use
650
+ populate_initial(), rebin_current(), or construct_next().'''
651
+ self._recycle_walkers()
652
+
653
+ # sanity check
654
+ self._check_pre()
655
+
656
+ # Regardless of current particle count, always split overweight particles and merge underweight particles
657
+ # Then and only then adjust for correct particle count
658
+ total_number_of_subgroups = 0
659
+ total_number_of_particles = 0
660
+ for ibin, bin in enumerate(self.next_iter_binning):
661
+ if len(bin) == 0:
662
+ continue
663
+
664
+ # Splits the bin into subgroups as defined by the called function
665
+ target_count = self.bin_target_counts[ibin]
666
+ subgroups = self.subgroup_function(self, ibin, **self.subgroup_function_kwargs)
667
+ total_number_of_subgroups += len(subgroups)
668
+ # Clear the bin
669
+ segments = np.array(sorted(bin, key=operator.attrgetter('weight')), dtype=np.object_)
670
+ weights = np.array(list(map(operator.attrgetter('weight'), segments)))
671
+ ideal_weight = weights.sum() / target_count
672
+ bin.clear()
673
+ # Determines to see whether we have more sub bins than we have target walkers in a bin (or equal to), and then uses
674
+ # different logic to deal with those cases. Should devolve to the Huber/Kim algorithm in the case of few subgroups.
675
+ if len(subgroups) >= target_count:
676
+ for i in subgroups:
677
+ # Merges all members of set i. Checks to see whether there are any to merge.
678
+ if len(i) > 1:
679
+ (segment, parent) = self._merge_walkers(
680
+ list(i),
681
+ np.add.accumulate(np.array(list(map(operator.attrgetter('weight'), i)))),
682
+ i,
683
+ )
684
+ i.clear()
685
+ i.add(segment)
686
+ # Add all members of the set i to the bin. This keeps the bins in sync for the adjustment step.
687
+ bin.update(i)
688
+
689
+ if len(subgroups) > target_count:
690
+ self._adjust_count(bin, subgroups, target_count)
691
+
692
+ if len(subgroups) < target_count:
693
+ for i in subgroups:
694
+ self._split_by_weight(i, target_count, ideal_weight)
695
+ self._merge_by_weight(i, target_count, ideal_weight)
696
+ # Same logic here.
697
+ bin.update(i)
698
+ if self.do_adjust_counts:
699
+ # A modified adjustment routine is necessary to ensure we don't unnecessarily destroy trajectory pathways.
700
+ self._adjust_count(bin, subgroups, target_count)
701
+ if self.do_thresholds:
702
+ for i in subgroups:
703
+ self._split_by_threshold(bin, i)
704
+ self._merge_by_threshold(bin, i)
705
+ for iseg in bin:
706
+ if iseg.weight > self.largest_allowed_weight or iseg.weight < self.smallest_allowed_weight:
707
+ log.warning(
708
+ f'Unable to fulfill threshold conditions for {iseg}. The given threshold range is likely too small.'
709
+ )
710
+ total_number_of_particles += len(bin)
711
+ log.debug('Total number of subgroups: {!r}'.format(total_number_of_subgroups))
712
+
713
+ self._check_post()
714
+
715
+ self.new_weights = self.new_weights or []
716
+
717
+ log.debug('used initial states: {!r}'.format(self.used_initial_states))
718
+ log.debug('available initial states: {!r}'.format(self.avail_initial_states))
719
+
720
+ def populate_initial(self, initial_states, weights, system=None):
721
+ '''Create walkers for a new weighted ensemble simulation.
722
+
723
+ One segment is created for each provided initial state, then binned and split/merged
724
+ as necessary. After this function is called, next_iter_segments will yield the new
725
+ segments to create, used_initial_states will contain data about which of the
726
+ provided initial states were used, and avail_initial_states will contain data about
727
+ which initial states were unused (because their corresponding walkers were merged
728
+ out of existence).
729
+ '''
730
+
731
+ # This has to be down here to avoid an import race
732
+ from westpa.core.data_manager import weight_dtype
733
+
734
+ EPS = np.finfo(weight_dtype).eps
735
+
736
+ system = system or westpa.rc.get_system_driver()
737
+ self.new_iteration(
738
+ initial_states=[], target_states=[], bin_mapper=system.bin_mapper, bin_target_counts=system.bin_target_counts
739
+ )
740
+
741
+ # Create dummy segments
742
+ segments = []
743
+ for seg_id, (initial_state, weight) in enumerate(zip(initial_states, weights)):
744
+ dummy_segment = Segment(
745
+ n_iter=0,
746
+ seg_id=seg_id,
747
+ parent_id=-(initial_state.state_id + 1),
748
+ weight=weight,
749
+ wtg_parent_ids=set([-(initial_state.state_id + 1)]),
750
+ pcoord=system.new_pcoord_array(),
751
+ status=Segment.SEG_STATUS_PREPARED,
752
+ )
753
+ dummy_segment.pcoord[[0, -1]] = initial_state.pcoord
754
+ segments.append(dummy_segment)
755
+
756
+ # Adjust weights, if necessary
757
+ tprob = sum(weights)
758
+ if abs(1.0 - tprob) > len(weights) * EPS:
759
+ pscale = 1.0 / tprob
760
+ log.warning('Weights of initial segments do not sum to unity; scaling by {:g}'.format(pscale))
761
+ for segment in segments:
762
+ segment.weight *= pscale
763
+
764
+ self.assign(segments, initializing=True)
765
+ self.construct_next()
766
+
767
+ # We now have properly-constructed initial segments, except for parent information,
768
+ # and we need to mark initial states as used or unused
769
+ istates_by_id = {state.state_id: state for state in initial_states}
770
+ dummysegs_by_id = self._parent_map
771
+
772
+ # Don't add start states to the list of available initial states.
773
+ # They're only meant to be used in the first iteration, so nothing should ever be recycled into them.
774
+ # Thus, they're not available.
775
+ self.avail_initial_states = {
776
+ k: v for (k, v) in istates_by_id.items() if not v.istate_type == InitialState.ISTATE_TYPE_START
777
+ }
778
+
779
+ for state in self.avail_initial_states.keys():
780
+ if self.avail_initial_states[state].istate_type == InitialState.ISTATE_TYPE_START:
781
+ self.avail_initial_states.pop(state)
782
+
783
+ self.used_initial_states = {}
784
+ for segment in self.next_iter_segments:
785
+ segment.parent_id = dummysegs_by_id[segment.parent_id].parent_id
786
+ segment.wtg_parent_ids = set([segment.parent_id])
787
+ assert segment.initpoint_type == Segment.SEG_INITPOINT_NEWTRAJ
788
+ istate = istates_by_id[segment.initial_state_id]
789
+ try:
790
+ self.used_initial_states[istate.state_id] = self.avail_initial_states.pop(istate.state_id)
791
+ except KeyError:
792
+ # Shared by more than one segment, and already marked as used
793
+ pass
794
+
795
+ for used_istate in self.used_initial_states.values():
796
+ used_istate.iter_used = 1
797
+
798
+ def rebin_current(self, parent_segments):
799
+ '''Reconstruct walkers for the current iteration based on (presumably) new binning.
800
+ The previous iteration's segments must be provided (as ``parent_segments``) in order
801
+ to update endpoint types appropriately.'''
802
+
803
+ self._prep_we()
804
+ self._parent_map = {segment.seg_id: segment for segment in parent_segments}
805
+
806
+ # Create new segments for the next iteration
807
+ # We assume that everything is going to continue without being touched by recycling or WE, and
808
+ # adjust later
809
+ new_pcoord_array = self.system.new_pcoord_array
810
+ n_iter = None
811
+
812
+ for ibin, _bin in enumerate(self.final_binning):
813
+ for segment in _bin:
814
+ if n_iter is None:
815
+ n_iter = segment.n_iter
816
+ else:
817
+ assert segment.n_iter == n_iter
818
+
819
+ new_segment = Segment(
820
+ n_iter=segment.n_iter,
821
+ parent_id=segment.parent_id,
822
+ weight=segment.weight,
823
+ wtg_parent_ids=set(segment.wtg_parent_ids or []),
824
+ pcoord=new_pcoord_array(),
825
+ status=Segment.SEG_STATUS_PREPARED,
826
+ )
827
+ new_segment.pcoord[0] = segment.pcoord[0]
828
+ self.next_iter_binning[ibin].add(new_segment)
829
+
830
+ self._run_we()
831
+
832
+ def construct_next(self):
833
+ '''Construct walkers for the next iteration, by running weighted ensemble recycling
834
+ and bin/split/merge on the segments previously assigned to bins using ``assign``.
835
+ Enough unused initial states must be present in ``self.avail_initial_states`` for every recycled
836
+ walker to be assigned an initial state.
837
+
838
+ After this function completes, ``self.flux_matrix`` contains a valid flux matrix for this
839
+ iteration (including any contributions from recycling from the previous iteration), and
840
+ ``self.next_iter_segments`` contains a list of segments ready for the next iteration,
841
+ with appropriate values set for weight, endpoint type, parent walkers, and so on.
842
+ '''
843
+
844
+ self._prep_we()
845
+
846
+ # Create new segments for the next iteration
847
+ # We assume that everything is going to continue without being touched by recycling or WE, and
848
+ # adjust later
849
+ new_pcoord_array = self.system.new_pcoord_array
850
+ n_iter = None
851
+
852
+ for ibin, _bin in enumerate(self.final_binning):
853
+ for segment in _bin:
854
+ if n_iter is None:
855
+ n_iter = segment.n_iter
856
+ else:
857
+ assert segment.n_iter == n_iter
858
+
859
+ segment.endpoint_type = Segment.SEG_ENDPOINT_CONTINUES
860
+ new_segment = Segment(
861
+ n_iter=segment.n_iter + 1,
862
+ parent_id=segment.seg_id,
863
+ weight=segment.weight,
864
+ wtg_parent_ids=[segment.seg_id],
865
+ pcoord=new_pcoord_array(),
866
+ status=Segment.SEG_STATUS_PREPARED,
867
+ )
868
+ new_segment.pcoord[0] = segment.pcoord[-1]
869
+ self.next_iter_binning[ibin].add(new_segment)
870
+
871
+ # Store a link to the parent segment, so we can update its endpoint status as we need,
872
+ # based on its ID
873
+ self._parent_map[segment.seg_id] = segment
874
+
875
+ self._run_we()
876
+
877
+ log.debug('used initial states: {!r}'.format(self.used_initial_states))
878
+ log.debug('available initial states: {!r}'.format(self.avail_initial_states))
879
+
880
+ def _log_bin_stats(self, bin, heading=None, level=logging.DEBUG):
881
+ if log.isEnabledFor(level):
882
+ weights = sorted(np.array(list(map(operator.attrgetter('weight'), bin))))
883
+ bin_label = getattr(bin, 'label', None) or ''
884
+ log_fmt = '\n '.join(
885
+ [
886
+ '',
887
+ 'stats for bin {bin_label!r} {heading}',
888
+ ' count: {bin.count:d}, target count: {bin.target_count:d}',
889
+ ' total weight: {bin.weight:{weight_spec}}, ideal weight: {ideal_weight:{weight_spec}}',
890
+ ' mean weight: {mean_weight:{weight_spec}}, stdev weight: {stdev_weight:{weight_spec}}',
891
+ ' min weight: {min_weight:{weight_spec}}, med weight : {median_weight:{weight_spec}}'
892
+ + ', max weight: {max_weight:{weight_spec}}',
893
+ ]
894
+ )
895
+ log_msg = log_fmt.format(
896
+ log_fmt,
897
+ weight_spec='<12.6e',
898
+ bin_label=bin_label,
899
+ heading=heading,
900
+ bin=bin,
901
+ ideal_weight=bin.weight / bin.target_count,
902
+ mean_weight=weights.mean(),
903
+ stdev_weight=weights.std(),
904
+ min_weight=weights[0],
905
+ median_weight=np.median(weights),
906
+ max_weight=weights[-1],
907
+ )
908
+ log.log(level, log_msg)