westpa 2022.12__cp313-cp313-macosx_10_13_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of westpa might be problematic. Click here for more details.

Files changed (149) hide show
  1. westpa/__init__.py +14 -0
  2. westpa/_version.py +21 -0
  3. westpa/analysis/__init__.py +5 -0
  4. westpa/analysis/core.py +746 -0
  5. westpa/analysis/statistics.py +27 -0
  6. westpa/analysis/trajectories.py +360 -0
  7. westpa/cli/__init__.py +0 -0
  8. westpa/cli/core/__init__.py +0 -0
  9. westpa/cli/core/w_fork.py +152 -0
  10. westpa/cli/core/w_init.py +230 -0
  11. westpa/cli/core/w_run.py +77 -0
  12. westpa/cli/core/w_states.py +212 -0
  13. westpa/cli/core/w_succ.py +99 -0
  14. westpa/cli/core/w_truncate.py +68 -0
  15. westpa/cli/tools/__init__.py +0 -0
  16. westpa/cli/tools/ploterr.py +506 -0
  17. westpa/cli/tools/plothist.py +706 -0
  18. westpa/cli/tools/w_assign.py +596 -0
  19. westpa/cli/tools/w_bins.py +166 -0
  20. westpa/cli/tools/w_crawl.py +119 -0
  21. westpa/cli/tools/w_direct.py +547 -0
  22. westpa/cli/tools/w_dumpsegs.py +94 -0
  23. westpa/cli/tools/w_eddist.py +506 -0
  24. westpa/cli/tools/w_fluxanl.py +376 -0
  25. westpa/cli/tools/w_ipa.py +833 -0
  26. westpa/cli/tools/w_kinavg.py +127 -0
  27. westpa/cli/tools/w_kinetics.py +96 -0
  28. westpa/cli/tools/w_multi_west.py +414 -0
  29. westpa/cli/tools/w_ntop.py +213 -0
  30. westpa/cli/tools/w_pdist.py +515 -0
  31. westpa/cli/tools/w_postanalysis_matrix.py +82 -0
  32. westpa/cli/tools/w_postanalysis_reweight.py +53 -0
  33. westpa/cli/tools/w_red.py +491 -0
  34. westpa/cli/tools/w_reweight.py +780 -0
  35. westpa/cli/tools/w_select.py +226 -0
  36. westpa/cli/tools/w_stateprobs.py +111 -0
  37. westpa/cli/tools/w_trace.py +599 -0
  38. westpa/core/__init__.py +0 -0
  39. westpa/core/_rc.py +673 -0
  40. westpa/core/binning/__init__.py +55 -0
  41. westpa/core/binning/_assign.cpython-313-darwin.so +0 -0
  42. westpa/core/binning/assign.py +455 -0
  43. westpa/core/binning/binless.py +96 -0
  44. westpa/core/binning/binless_driver.py +54 -0
  45. westpa/core/binning/binless_manager.py +190 -0
  46. westpa/core/binning/bins.py +47 -0
  47. westpa/core/binning/mab.py +506 -0
  48. westpa/core/binning/mab_driver.py +54 -0
  49. westpa/core/binning/mab_manager.py +198 -0
  50. westpa/core/data_manager.py +1694 -0
  51. westpa/core/extloader.py +74 -0
  52. westpa/core/h5io.py +995 -0
  53. westpa/core/kinetics/__init__.py +24 -0
  54. westpa/core/kinetics/_kinetics.cpython-313-darwin.so +0 -0
  55. westpa/core/kinetics/events.py +147 -0
  56. westpa/core/kinetics/matrates.py +156 -0
  57. westpa/core/kinetics/rate_averaging.py +266 -0
  58. westpa/core/progress.py +218 -0
  59. westpa/core/propagators/__init__.py +54 -0
  60. westpa/core/propagators/executable.py +719 -0
  61. westpa/core/reweight/__init__.py +14 -0
  62. westpa/core/reweight/_reweight.cpython-313-darwin.so +0 -0
  63. westpa/core/reweight/matrix.py +126 -0
  64. westpa/core/segment.py +119 -0
  65. westpa/core/sim_manager.py +835 -0
  66. westpa/core/states.py +359 -0
  67. westpa/core/systems.py +93 -0
  68. westpa/core/textio.py +74 -0
  69. westpa/core/trajectory.py +330 -0
  70. westpa/core/we_driver.py +910 -0
  71. westpa/core/wm_ops.py +43 -0
  72. westpa/core/yamlcfg.py +391 -0
  73. westpa/fasthist/__init__.py +34 -0
  74. westpa/fasthist/_fasthist.cpython-313-darwin.so +0 -0
  75. westpa/mclib/__init__.py +271 -0
  76. westpa/mclib/__main__.py +28 -0
  77. westpa/mclib/_mclib.cpython-313-darwin.so +0 -0
  78. westpa/oldtools/__init__.py +4 -0
  79. westpa/oldtools/aframe/__init__.py +35 -0
  80. westpa/oldtools/aframe/atool.py +75 -0
  81. westpa/oldtools/aframe/base_mixin.py +26 -0
  82. westpa/oldtools/aframe/binning.py +178 -0
  83. westpa/oldtools/aframe/data_reader.py +560 -0
  84. westpa/oldtools/aframe/iter_range.py +200 -0
  85. westpa/oldtools/aframe/kinetics.py +117 -0
  86. westpa/oldtools/aframe/mcbs.py +153 -0
  87. westpa/oldtools/aframe/output.py +39 -0
  88. westpa/oldtools/aframe/plotting.py +90 -0
  89. westpa/oldtools/aframe/trajwalker.py +126 -0
  90. westpa/oldtools/aframe/transitions.py +469 -0
  91. westpa/oldtools/cmds/__init__.py +0 -0
  92. westpa/oldtools/cmds/w_ttimes.py +361 -0
  93. westpa/oldtools/files.py +34 -0
  94. westpa/oldtools/miscfn.py +23 -0
  95. westpa/oldtools/stats/__init__.py +4 -0
  96. westpa/oldtools/stats/accumulator.py +35 -0
  97. westpa/oldtools/stats/edfs.py +129 -0
  98. westpa/oldtools/stats/mcbs.py +96 -0
  99. westpa/tools/__init__.py +33 -0
  100. westpa/tools/binning.py +472 -0
  101. westpa/tools/core.py +340 -0
  102. westpa/tools/data_reader.py +159 -0
  103. westpa/tools/dtypes.py +31 -0
  104. westpa/tools/iter_range.py +198 -0
  105. westpa/tools/kinetics_tool.py +340 -0
  106. westpa/tools/plot.py +283 -0
  107. westpa/tools/progress.py +17 -0
  108. westpa/tools/selected_segs.py +154 -0
  109. westpa/tools/wipi.py +751 -0
  110. westpa/trajtree/__init__.py +4 -0
  111. westpa/trajtree/_trajtree.cpython-313-darwin.so +0 -0
  112. westpa/trajtree/trajtree.py +117 -0
  113. westpa/westext/__init__.py +0 -0
  114. westpa/westext/adaptvoronoi/__init__.py +3 -0
  115. westpa/westext/adaptvoronoi/adaptVor_driver.py +214 -0
  116. westpa/westext/hamsm_restarting/__init__.py +3 -0
  117. westpa/westext/hamsm_restarting/example_overrides.py +35 -0
  118. westpa/westext/hamsm_restarting/restart_driver.py +1165 -0
  119. westpa/westext/stringmethod/__init__.py +11 -0
  120. westpa/westext/stringmethod/fourier_fitting.py +69 -0
  121. westpa/westext/stringmethod/string_driver.py +253 -0
  122. westpa/westext/stringmethod/string_method.py +306 -0
  123. westpa/westext/weed/BinCluster.py +180 -0
  124. westpa/westext/weed/ProbAdjustEquil.py +100 -0
  125. westpa/westext/weed/UncertMath.py +247 -0
  126. westpa/westext/weed/__init__.py +10 -0
  127. westpa/westext/weed/weed_driver.py +192 -0
  128. westpa/westext/wess/ProbAdjust.py +101 -0
  129. westpa/westext/wess/__init__.py +6 -0
  130. westpa/westext/wess/wess_driver.py +217 -0
  131. westpa/work_managers/__init__.py +57 -0
  132. westpa/work_managers/core.py +396 -0
  133. westpa/work_managers/environment.py +134 -0
  134. westpa/work_managers/mpi.py +318 -0
  135. westpa/work_managers/processes.py +187 -0
  136. westpa/work_managers/serial.py +28 -0
  137. westpa/work_managers/threads.py +79 -0
  138. westpa/work_managers/zeromq/__init__.py +20 -0
  139. westpa/work_managers/zeromq/core.py +641 -0
  140. westpa/work_managers/zeromq/node.py +131 -0
  141. westpa/work_managers/zeromq/work_manager.py +526 -0
  142. westpa/work_managers/zeromq/worker.py +320 -0
  143. westpa-2022.12.dist-info/AUTHORS +22 -0
  144. westpa-2022.12.dist-info/LICENSE +21 -0
  145. westpa-2022.12.dist-info/METADATA +193 -0
  146. westpa-2022.12.dist-info/RECORD +149 -0
  147. westpa-2022.12.dist-info/WHEEL +6 -0
  148. westpa-2022.12.dist-info/entry_points.txt +29 -0
  149. westpa-2022.12.dist-info/top_level.txt +1 -0
@@ -0,0 +1,318 @@
1
+ """
2
+ A work manager which uses MPI to distribute tasks and collect results.
3
+ """
4
+
5
+ import sys
6
+ import time
7
+ import traceback
8
+ import logging
9
+ import threading
10
+ from collections import deque
11
+ from mpi4py import MPI
12
+ from westpa.work_managers import WorkManager, WMFuture
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ # +------+
18
+ # | Task |
19
+ # +------+
20
+ class Task:
21
+ """Tasks are tuples of (task_id, function, args, keyword args)"""
22
+
23
+ def __init__(self, task_id, fn, args, kwargs):
24
+ self.task_id = task_id
25
+ self.fn = fn
26
+ self.args = args if args is not None else ()
27
+ self.kwargs = kwargs if kwargs is not None else {}
28
+
29
+ def __repr__(self):
30
+ return '<Task {self.task_id}: {self.fn!r}(*{self.args!r}, **{self.kwargs!r})>'.format(self=self)
31
+
32
+
33
+ # +----------------+
34
+ # | MPIWorkManager |
35
+ # +----------------+
36
+ class MPIWorkManager(WorkManager):
37
+ """MPIWorkManager factory."""
38
+
39
+ @classmethod
40
+ def from_environ(cls, wmenv=None):
41
+ return cls()
42
+
43
+ def __new__(cls):
44
+ """Creates a Serial WorkManager if size is 1. Otherwise creates a
45
+ single Manager and size-1 Worker.
46
+ """
47
+ log.debug('MPIWorkManager.__new__()')
48
+ assert MPI.Is_initialized()
49
+ assert MPI.Is_thread_main()
50
+
51
+ rank = MPI.COMM_WORLD.Get_rank()
52
+ size = MPI.COMM_WORLD.Get_size()
53
+
54
+ if size == 1:
55
+ return super().__new__(Serial)
56
+ elif rank == 0:
57
+ return super().__new__(Manager)
58
+ else:
59
+ return super().__new__(Worker)
60
+
61
+ def __init__(self):
62
+ """Initialize info shared by Manager and Worker classes."""
63
+ log.debug('MPIWorkManager.__init__()')
64
+
65
+ super().__init__()
66
+ comm = MPI.COMM_WORLD
67
+ self.comm = MPI.COMM_WORLD
68
+ self.rank = comm.Get_rank()
69
+ self.size = comm.Get_size()
70
+ self.name = MPI.Get_processor_name()
71
+
72
+ # some tags
73
+ self.task_tag = 110 # tag for server to client msgs
74
+ self.result_tag = 120 # tag for client to server msgs
75
+ self.shutdown_tag = 130 # tag for server to client to stop
76
+
77
+ self.managerID = 0
78
+
79
+ def submit(self, fn, args=None, kwargs=None):
80
+ """Adhere to WorkManager interface. This method should never be
81
+ called.
82
+ """
83
+ assert False
84
+
85
+
86
+ # +--------+
87
+ # | Serial |
88
+ # +--------+
89
+ # TODO: no need for the code replication here, just use the original serial wm
90
+ class Serial(MPIWorkManager):
91
+ """Replication of the serial work manager. This is a fallback for MPI runs
92
+ that request only 1 (size=1) processor.
93
+ """
94
+
95
+ def __init__(self):
96
+ super().__init__()
97
+ log.debug('Serial.__init__()')
98
+
99
+ def submit(self, fn, args=None, kwargs=None):
100
+ log.debug('Serial.__init__()')
101
+
102
+ ft = WMFuture()
103
+ try:
104
+ result = fn(*(args if args is not None else ()), **(kwargs if kwargs is not None else {}))
105
+ except Exception as e:
106
+ ft._set_exception(e, sys.exc_info()[2])
107
+ else:
108
+ ft._set_result(result)
109
+ return ft
110
+
111
+
112
+ # +---------+
113
+ # | Manager |
114
+ # +---------+
115
+ class Manager(MPIWorkManager):
116
+ """Manager of the MPIWorkManage. Distributes tasks to Worker as they are
117
+ received from the sim_manager. In addition to the main thread, this class
118
+ spawns two threads, a receiver and a dispatcher.
119
+ """
120
+
121
+ def __init__(self):
122
+ """Initialize different state variables used by Manager."""
123
+ super().__init__()
124
+ log.debug('Manager__init__()')
125
+
126
+ # number of workers
127
+ self.nworkers = self.size - 1
128
+
129
+ # list of worker ranks
130
+ self.workerIDs = range(1, self.size)
131
+ assert self.nworkers == len(self.workerIDs)
132
+
133
+ # deque of idle workers
134
+ self.dests = deque(self.workerIDs)
135
+
136
+ # deque of tesks
137
+ self.tasks = deque()
138
+
139
+ # number of unmatched send/receive pairs
140
+ self.nPending = 0
141
+
142
+ # thread shutdown sentinel
143
+ self.shutItDown = False
144
+
145
+ # task_id, future key value pair
146
+ self.pending_futures = dict()
147
+
148
+ # list of manager threads
149
+ self.workers = []
150
+
151
+ # thread lock
152
+ self.lock = threading.Lock()
153
+
154
+ def startup(self):
155
+ """Spawns the dispatcher and receiver threads."""
156
+ log.debug('Manager.startup()')
157
+
158
+ if not self.running:
159
+ self.workers.append(threading.Thread(name='dispatcher', target=self._dispatcher))
160
+ self.workers.append(threading.Thread(name='receiver', target=self._receiver))
161
+
162
+ for t in self.workers:
163
+ t.start()
164
+ log.info('Started thread: %s' % t.getName())
165
+
166
+ self.running = True
167
+
168
+ def _dispatcher(self):
169
+ """Continuously dispatches tasks to idle destinations until the
170
+ shutdown sentinel is set.
171
+ """
172
+ log.debug('Manager._dispatcher()')
173
+ assert MPI.Is_thread_main() is False
174
+ assert threading.currentThread().getName() == "dispatcher"
175
+
176
+ while not self.shutItDown:
177
+ req = []
178
+ # do we have work and somewhere to send it?
179
+ while self.tasks and self.dests:
180
+ with self.lock:
181
+ task = self.tasks.popleft()
182
+ sendTo = self.dests.popleft()
183
+ self.nPending += 1
184
+
185
+ req.append(self.comm.isend(task, dest=sendTo, tag=self.task_tag))
186
+
187
+ # make sure all sends completed
188
+ MPI.Request.Waitall(requests=req)
189
+
190
+ # force context switching ( 1ms )
191
+ time.sleep(0.001)
192
+
193
+ def _receiver(self):
194
+ """Continuously receives futures from workers until the shutdown
195
+ sentinel is set.
196
+ """
197
+ log.debug('Manager._receiver()')
198
+ assert MPI.Is_thread_main() is False
199
+ assert threading.currentThread().getName() == "receiver"
200
+
201
+ while not self.shutItDown:
202
+ # are we waiting on any results?
203
+ while self.nPending:
204
+ stat = MPI.Status()
205
+ (tid, msg, val) = self.comm.recv(source=MPI.ANY_SOURCE, tag=self.result_tag, status=stat)
206
+ log.debug('Manager._receiver received task: %s' % tid)
207
+
208
+ # update future
209
+ ft = self.pending_futures.pop(tid)
210
+ if msg == 'exception':
211
+ ft._set_exception(*val)
212
+ else:
213
+ ft._set_result(val)
214
+
215
+ with self.lock:
216
+ self.dests.append(stat.Get_source())
217
+ self.nPending -= 1
218
+
219
+ # force context switching ( 1ms )
220
+ time.sleep(0.001)
221
+
222
+ def submit(self, fn, args=None, kwargs=None):
223
+ """Receive task from simulation manager and add it to pending_futures."""
224
+ log.debug('Manager.submit()')
225
+
226
+ ft = WMFuture()
227
+ task_id = ft.task_id
228
+ with self.lock:
229
+ self.tasks.append(Task(task_id, fn, args, kwargs))
230
+ self.pending_futures[task_id] = ft
231
+
232
+ return ft
233
+
234
+ def shutdown(self):
235
+ """Send shutdown tag to all worker processes, and set the shutdown
236
+ sentinel to stop the receiver and dispatcher loops.
237
+ """
238
+ log.debug('Manager.shutdown()')
239
+
240
+ # wait on any unfinished work
241
+ while self.pending_futures:
242
+ pass
243
+
244
+ # send shutdown msg to all workers
245
+ req = [MPI.REQUEST_NULL] * self.nworkers
246
+ for rank in self.workerIDs:
247
+ req[rank - 1] = self.comm.isend(MPI.BOTTOM, dest=rank, tag=self.shutdown_tag)
248
+
249
+ MPI.Request.Waitall(requests=req)
250
+
251
+ # stop threads
252
+ self.shutItDown = True
253
+
254
+ for t in self.workers:
255
+ t.join()
256
+ log.info('Stopped thread: %s' % t.getName())
257
+
258
+ self.running = False
259
+
260
+
261
+ # +--------+
262
+ # | Worker |
263
+ # +--------+
264
+ class Worker(MPIWorkManager):
265
+ """Client class for executing tasks as distributed by the Manager in the
266
+ MPI Work Manager
267
+ """
268
+
269
+ def __init__(self):
270
+ super().__init__()
271
+ log.debug('Worker.__init__() %s' % self.rank)
272
+
273
+ def startup(self):
274
+ """Clock the worker in for work."""
275
+ log.debug('Worker.startup() %s' % self.rank)
276
+ if not self.running:
277
+ self.clockIn()
278
+
279
+ self.running = True
280
+
281
+ def clockIn(self):
282
+ """Do each task as it comes in. The completion of a task is
283
+ notice to the manager that more work is welcome.
284
+ """
285
+ log.info('Worker %s clocking in.' % self.rank)
286
+
287
+ comm = self.comm
288
+
289
+ while True:
290
+ stat = MPI.Status()
291
+ task = comm.recv(source=self.managerID, tag=MPI.ANY_TAG, status=stat)
292
+
293
+ tag = stat.Get_tag()
294
+
295
+ if tag == self.task_tag:
296
+ log.debug('Worker %s received task: %s' % (self.rank, task.task_id))
297
+
298
+ # do the work
299
+ try:
300
+ rv = task.fn(*task.args, **task.kwargs)
301
+ except BaseException as e:
302
+ ro = (task.task_id, 'exception', (e, traceback.format_exc()))
303
+ else:
304
+ ro = (task.task_id, 'result', rv)
305
+
306
+ # send result back to manager
307
+ comm.send(ro, dest=self.managerID, tag=self.result_tag)
308
+
309
+ if tag == self.shutdown_tag:
310
+ log.info('Worker %s clocking out.' % self.rank)
311
+ return
312
+
313
+ @property
314
+ def is_master(self):
315
+ """Worker processes need to be marked as not manager. This ensures that
316
+ the proper branching is followed in w_run.py.
317
+ """
318
+ return False
@@ -0,0 +1,187 @@
1
+ import os
2
+ import sys
3
+ import random
4
+ import logging
5
+ import threading
6
+ import traceback
7
+ import multiprocessing
8
+ from multiprocessing.queues import Empty
9
+
10
+ import westpa.work_managers as work_managers
11
+ from .core import WorkManager, WMFuture
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+ # Tasks are tuples ('task', task_id, fn, args, kwargs).
16
+ # Results are tuples (rtype, task_id, payload) where rtype is 'result' or 'exception' and payload is the return value
17
+ # or exception, respectively.
18
+
19
+ task_shutdown_sentinel = ('shutdown', None, None, (), {})
20
+ result_shutdown_sentinel = ('shutdown', None, None)
21
+
22
+
23
+ class ProcessWorkManager(WorkManager):
24
+ '''A work manager using the ``multiprocessing`` module.
25
+
26
+ Notes
27
+ -----
28
+
29
+ On MacOS, as of Python 3.8 the default start method for multiprocessing launching new processes was changed from fork to spawn.
30
+ In general, spawn is more robust and efficient, however it requires serializability of everything being passed to the child process.
31
+ In contrast, fork is much less memory efficient, as it makes a full copy of everything in the parent process.
32
+ However, it does not require picklability.
33
+
34
+ So, on MacOS, the method for launching new processes is explicitly changed to fork from the (MacOS-specific) default of spawn.
35
+ Unix should default to fork.
36
+
37
+ See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods and
38
+ https://docs.python.org/3/library/multiprocessing.html#the-spawn-and-forkserver-start-methods for more details.
39
+ '''
40
+
41
+ @classmethod
42
+ def from_environ(cls, wmenv=None):
43
+ if wmenv is None:
44
+ wmenv = work_managers.environment.default_env
45
+ return cls(wmenv.get_val('n_workers', multiprocessing.cpu_count(), int))
46
+
47
+ def __init__(self, n_workers=None, shutdown_timeout=1):
48
+ super().__init__()
49
+
50
+ try:
51
+ if sys.platform == 'darwin': # MacOS
52
+ multiprocessing.set_start_method('fork')
53
+ log.debug('setting multiprocessing start method to fork')
54
+ except RuntimeError:
55
+ log.debug('failed to set start method to fork')
56
+
57
+ self.n_workers = n_workers or multiprocessing.cpu_count()
58
+ self.workers = None
59
+ self.task_queue = multiprocessing.Queue()
60
+ self.result_queue = multiprocessing.Queue()
61
+ self.receive_thread = None
62
+ self.pending = None
63
+
64
+ self.shutdown_received = threading.Event()
65
+ self.shutdown_timeout = shutdown_timeout or 1
66
+
67
+ def task_loop(self):
68
+ # Close standard input, so we don't get SIGINT from ^C
69
+ try:
70
+ sys.stdin.close()
71
+ except Exception as e:
72
+ log.info("can't close stdin: {}".format(e))
73
+
74
+ # (re)initialize random number generator in this process
75
+ random.seed()
76
+
77
+ while not self.shutdown_received.is_set():
78
+ if not self.task_queue.empty():
79
+ message, task_id, fn, args, kwargs = self.task_queue.get()[:5]
80
+
81
+ if message == 'shutdown':
82
+ break
83
+ try:
84
+ result = fn(*args, **kwargs)
85
+ except BaseException as e:
86
+ result_tuple = ('exception', task_id, (e, traceback.format_exc()))
87
+ else:
88
+ result_tuple = ('result', task_id, result)
89
+ self.result_queue.put(result_tuple)
90
+
91
+ log.debug('exiting task_loop')
92
+ return
93
+
94
+ def results_loop(self):
95
+ while not self.shutdown_received.is_set():
96
+ if not self.result_queue.empty():
97
+ message, task_id, payload = self.result_queue.get()[:3]
98
+
99
+ if message == 'shutdown':
100
+ break
101
+ elif message == 'exception':
102
+ future = self.pending.pop(task_id)
103
+ future._set_exception(*payload)
104
+ elif message == 'result':
105
+ future = self.pending.pop(task_id)
106
+ future._set_result(payload)
107
+ else:
108
+ raise AssertionError('unknown message {!r}'.format((message, task_id, payload)))
109
+
110
+ log.debug('exiting results_loop')
111
+
112
+ def submit(self, fn, args=None, kwargs=None):
113
+ ft = WMFuture()
114
+ log.debug('dispatching {!r}'.format(fn))
115
+ self.pending[ft.task_id] = ft
116
+ self.task_queue.put(('task', ft.task_id, fn, args or (), kwargs or {}))
117
+ return ft
118
+
119
+ def startup(self):
120
+ from . import environment
121
+
122
+ if not self.running:
123
+ log.debug('starting up work manager {!r}'.format(self))
124
+ self.running = True
125
+ self.workers = [
126
+ multiprocessing.Process(target=self.task_loop, name='worker-{:d}-{:x}'.format(i, id(self)))
127
+ for i in range(self.n_workers)
128
+ ]
129
+
130
+ pi_name = '{}_PROCESS_INDEX'.format(environment.WMEnvironment.env_prefix)
131
+ for iworker, worker in enumerate(self.workers):
132
+ os.environ[pi_name] = str(iworker)
133
+ worker.start()
134
+ try:
135
+ del os.environ[pi_name]
136
+ except KeyError:
137
+ pass
138
+
139
+ self.pending = dict()
140
+
141
+ self.receive_thread = threading.Thread(target=self.results_loop, name='receiver')
142
+ self.receive_thread.daemon = True
143
+ self.receive_thread.start()
144
+
145
+ def _empty_queues(self):
146
+ '''Empty self.task_queue and self.result_queue until queue is empty'''
147
+ try:
148
+ while True:
149
+ self.task_queue.get_nowait()
150
+ except Empty:
151
+ pass
152
+
153
+ try:
154
+ while True:
155
+ self.result_queue.get_nowait()
156
+ except Empty:
157
+ pass
158
+
159
+ def shutdown(self):
160
+ while self.running:
161
+ log.debug('shutting down {!r}'.format(self))
162
+ self.shutdown_received.set()
163
+ self._empty_queues()
164
+
165
+ # Send shutdown signal
166
+ for _i in range(self.n_workers):
167
+ self.task_queue.put_nowait(task_shutdown_sentinel)
168
+
169
+ for worker in self.workers:
170
+ worker.join(self.shutdown_timeout)
171
+ if worker.is_alive():
172
+ log.debug('sending SIGINT to worker process {:d}'.format(worker.pid))
173
+ worker.terminate()
174
+ worker.join(self.shutdown_timeout)
175
+ if worker.is_alive():
176
+ log.warning('sending SIGKILL to worker process {:d}'.format(worker.pid))
177
+ worker.kill()
178
+ worker.join()
179
+
180
+ log.debug('worker process {:d} terminated with code {:d}'.format(worker.pid, worker.exitcode))
181
+ else:
182
+ log.debug('worker process {:d} terminated gracefully with code {:d}'.format(worker.pid, worker.exitcode))
183
+
184
+ self._empty_queues()
185
+ self.result_queue.put(result_shutdown_sentinel)
186
+
187
+ self.running = False
@@ -0,0 +1,28 @@
1
+ import logging
2
+ import sys
3
+
4
+ from .core import WorkManager, WMFuture
5
+
6
+
7
+ log = logging.getLogger(__name__)
8
+
9
+
10
+ class SerialWorkManager(WorkManager):
11
+ @classmethod
12
+ def from_environ(cls, wmenv=None):
13
+ return cls()
14
+
15
+ def __init__(self):
16
+ log.debug('initializing serial work manager')
17
+ super().__init__()
18
+ self.n_workers = 1
19
+
20
+ def submit(self, fn, args=None, kwargs=None):
21
+ ft = WMFuture()
22
+ try:
23
+ result = fn(*(args if args is not None else ()), **(kwargs if kwargs is not None else {}))
24
+ except Exception as e:
25
+ ft._set_exception(e, sys.exc_info()[2])
26
+ else:
27
+ ft._set_result(result)
28
+ return ft
@@ -0,0 +1,79 @@
1
+ import logging
2
+ import multiprocessing
3
+ import queue
4
+ import sys
5
+ import threading
6
+
7
+ from .core import WorkManager, WMFuture
8
+ import westpa.work_managers as work_managers
9
+
10
+ log = logging.getLogger(__name__)
11
+
12
+
13
+ class Task:
14
+ def __init__(self, fn, args, kwargs, future):
15
+ self.fn = fn
16
+ self.args = args
17
+ self.kwargs = kwargs
18
+ self.future = future
19
+
20
+ def run(self):
21
+ try:
22
+ result = self.fn(*self.args, **self.kwargs)
23
+ except Exception as e:
24
+ self.future._set_exception(e, sys.exc_info()[2])
25
+ else:
26
+ self.future._set_result(result)
27
+
28
+
29
+ ShutdownSentinel = object()
30
+
31
+
32
+ class ThreadsWorkManager(WorkManager):
33
+ '''A work manager using threads.'''
34
+
35
+ @classmethod
36
+ def from_environ(cls, wmenv=None):
37
+ if wmenv is None:
38
+ wmenv = work_managers.environment.default_env
39
+ return cls(wmenv.get_val('n_workers', multiprocessing.cpu_count(), int))
40
+
41
+ def __init__(self, n_workers=None):
42
+ super().__init__()
43
+ self.n_workers = n_workers or multiprocessing.cpu_count()
44
+ self.workers = []
45
+ self.task_queue = queue.Queue()
46
+
47
+ def runtask(self, task_queue):
48
+ while True:
49
+ task = task_queue.get()
50
+ if task is ShutdownSentinel:
51
+ return
52
+ else:
53
+ task.run()
54
+
55
+ def submit(self, fn, args=None, kwargs=None):
56
+ ft = WMFuture()
57
+ task = Task(fn, args if args is not None else (), kwargs if kwargs is not None else {}, ft)
58
+ self.task_queue.put(task)
59
+ return ft
60
+
61
+ def startup(self):
62
+ if not self.running:
63
+ self.running = True
64
+ self.workers = [
65
+ threading.Thread(target=self.runtask, args=[self.task_queue], name='worker-{:d}'.format(i))
66
+ for i in range(0, self.n_workers)
67
+ ]
68
+ for thread in self.workers:
69
+ log.debug('starting thread {!r}'.format(thread))
70
+ thread.start()
71
+
72
+ def shutdown(self):
73
+ if self.running:
74
+ # Put one sentinel on the queue per worker, then wait for threads to terminate
75
+ for i in range(0, self.n_workers):
76
+ self.task_queue.put(ShutdownSentinel)
77
+ for thread in self.workers:
78
+ thread.join()
79
+ self.running = False
@@ -0,0 +1,20 @@
1
+ from .core import ZMQWMError, ZMQWMTimeout, ZMQWMEnvironmentError, ZMQWorkerMissing, ZMQCore
2
+ from .node import ZMQNode
3
+ from .worker import ZMQWorker
4
+ from .work_manager import ZMQWorkManager
5
+
6
+ import atexit
7
+
8
+ __all__ = [
9
+ 'ZMQWMError',
10
+ 'ZMQWMTimeout',
11
+ 'ZMQWMEnvironmentError',
12
+ 'ZMQWorkerMissing',
13
+ 'ZMQCore',
14
+ 'ZMQNode',
15
+ 'ZMQWorker',
16
+ 'ZMQWorkManager',
17
+ ]
18
+
19
+ atexit.register(ZMQCore.remove_ipc_endpoints)
20
+ del atexit