vortex-nwp 2.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. vortex/__init__.py +135 -0
  2. vortex/algo/__init__.py +12 -0
  3. vortex/algo/components.py +2136 -0
  4. vortex/algo/mpitools.py +1648 -0
  5. vortex/algo/mpitools_templates/envelope_wrapper_default.tpl +27 -0
  6. vortex/algo/mpitools_templates/envelope_wrapper_mpiauto.tpl +29 -0
  7. vortex/algo/mpitools_templates/wrapstd_wrapper_default.tpl +18 -0
  8. vortex/algo/serversynctools.py +170 -0
  9. vortex/config.py +115 -0
  10. vortex/data/__init__.py +13 -0
  11. vortex/data/abstractstores.py +1572 -0
  12. vortex/data/containers.py +780 -0
  13. vortex/data/contents.py +596 -0
  14. vortex/data/executables.py +284 -0
  15. vortex/data/flow.py +113 -0
  16. vortex/data/geometries.ini +2689 -0
  17. vortex/data/geometries.py +703 -0
  18. vortex/data/handlers.py +1021 -0
  19. vortex/data/outflow.py +67 -0
  20. vortex/data/providers.py +465 -0
  21. vortex/data/resources.py +201 -0
  22. vortex/data/stores.py +1271 -0
  23. vortex/gloves.py +282 -0
  24. vortex/layout/__init__.py +27 -0
  25. vortex/layout/appconf.py +109 -0
  26. vortex/layout/contexts.py +511 -0
  27. vortex/layout/dataflow.py +1069 -0
  28. vortex/layout/jobs.py +1276 -0
  29. vortex/layout/monitor.py +833 -0
  30. vortex/layout/nodes.py +1424 -0
  31. vortex/layout/subjobs.py +464 -0
  32. vortex/nwp/__init__.py +11 -0
  33. vortex/nwp/algo/__init__.py +12 -0
  34. vortex/nwp/algo/assim.py +483 -0
  35. vortex/nwp/algo/clim.py +920 -0
  36. vortex/nwp/algo/coupling.py +609 -0
  37. vortex/nwp/algo/eda.py +632 -0
  38. vortex/nwp/algo/eps.py +613 -0
  39. vortex/nwp/algo/forecasts.py +745 -0
  40. vortex/nwp/algo/fpserver.py +927 -0
  41. vortex/nwp/algo/ifsnaming.py +403 -0
  42. vortex/nwp/algo/ifsroot.py +311 -0
  43. vortex/nwp/algo/monitoring.py +202 -0
  44. vortex/nwp/algo/mpitools.py +554 -0
  45. vortex/nwp/algo/odbtools.py +974 -0
  46. vortex/nwp/algo/oopsroot.py +735 -0
  47. vortex/nwp/algo/oopstests.py +186 -0
  48. vortex/nwp/algo/request.py +579 -0
  49. vortex/nwp/algo/stdpost.py +1285 -0
  50. vortex/nwp/data/__init__.py +12 -0
  51. vortex/nwp/data/assim.py +392 -0
  52. vortex/nwp/data/boundaries.py +261 -0
  53. vortex/nwp/data/climfiles.py +539 -0
  54. vortex/nwp/data/configfiles.py +149 -0
  55. vortex/nwp/data/consts.py +929 -0
  56. vortex/nwp/data/ctpini.py +133 -0
  57. vortex/nwp/data/diagnostics.py +181 -0
  58. vortex/nwp/data/eda.py +148 -0
  59. vortex/nwp/data/eps.py +383 -0
  60. vortex/nwp/data/executables.py +1039 -0
  61. vortex/nwp/data/fields.py +96 -0
  62. vortex/nwp/data/gridfiles.py +308 -0
  63. vortex/nwp/data/logs.py +551 -0
  64. vortex/nwp/data/modelstates.py +334 -0
  65. vortex/nwp/data/monitoring.py +220 -0
  66. vortex/nwp/data/namelists.py +644 -0
  67. vortex/nwp/data/obs.py +748 -0
  68. vortex/nwp/data/oopsexec.py +72 -0
  69. vortex/nwp/data/providers.py +182 -0
  70. vortex/nwp/data/query.py +217 -0
  71. vortex/nwp/data/stores.py +147 -0
  72. vortex/nwp/data/surfex.py +338 -0
  73. vortex/nwp/syntax/__init__.py +9 -0
  74. vortex/nwp/syntax/stdattrs.py +375 -0
  75. vortex/nwp/tools/__init__.py +10 -0
  76. vortex/nwp/tools/addons.py +35 -0
  77. vortex/nwp/tools/agt.py +55 -0
  78. vortex/nwp/tools/bdap.py +48 -0
  79. vortex/nwp/tools/bdcp.py +38 -0
  80. vortex/nwp/tools/bdm.py +21 -0
  81. vortex/nwp/tools/bdmp.py +49 -0
  82. vortex/nwp/tools/conftools.py +1311 -0
  83. vortex/nwp/tools/drhook.py +62 -0
  84. vortex/nwp/tools/grib.py +268 -0
  85. vortex/nwp/tools/gribdiff.py +99 -0
  86. vortex/nwp/tools/ifstools.py +163 -0
  87. vortex/nwp/tools/igastuff.py +249 -0
  88. vortex/nwp/tools/mars.py +56 -0
  89. vortex/nwp/tools/odb.py +548 -0
  90. vortex/nwp/tools/partitioning.py +234 -0
  91. vortex/nwp/tools/satrad.py +56 -0
  92. vortex/nwp/util/__init__.py +6 -0
  93. vortex/nwp/util/async.py +184 -0
  94. vortex/nwp/util/beacon.py +40 -0
  95. vortex/nwp/util/diffpygram.py +359 -0
  96. vortex/nwp/util/ens.py +198 -0
  97. vortex/nwp/util/hooks.py +128 -0
  98. vortex/nwp/util/taskdeco.py +81 -0
  99. vortex/nwp/util/usepygram.py +591 -0
  100. vortex/nwp/util/usetnt.py +87 -0
  101. vortex/proxy.py +6 -0
  102. vortex/sessions.py +341 -0
  103. vortex/syntax/__init__.py +9 -0
  104. vortex/syntax/stdattrs.py +628 -0
  105. vortex/syntax/stddeco.py +176 -0
  106. vortex/toolbox.py +982 -0
  107. vortex/tools/__init__.py +11 -0
  108. vortex/tools/actions.py +457 -0
  109. vortex/tools/addons.py +297 -0
  110. vortex/tools/arm.py +76 -0
  111. vortex/tools/compression.py +322 -0
  112. vortex/tools/date.py +20 -0
  113. vortex/tools/ddhpack.py +10 -0
  114. vortex/tools/delayedactions.py +672 -0
  115. vortex/tools/env.py +513 -0
  116. vortex/tools/folder.py +663 -0
  117. vortex/tools/grib.py +559 -0
  118. vortex/tools/lfi.py +746 -0
  119. vortex/tools/listings.py +354 -0
  120. vortex/tools/names.py +575 -0
  121. vortex/tools/net.py +1790 -0
  122. vortex/tools/odb.py +10 -0
  123. vortex/tools/parallelism.py +336 -0
  124. vortex/tools/prestaging.py +186 -0
  125. vortex/tools/rawfiles.py +10 -0
  126. vortex/tools/schedulers.py +413 -0
  127. vortex/tools/services.py +871 -0
  128. vortex/tools/storage.py +1061 -0
  129. vortex/tools/surfex.py +61 -0
  130. vortex/tools/systems.py +3396 -0
  131. vortex/tools/targets.py +384 -0
  132. vortex/util/__init__.py +9 -0
  133. vortex/util/config.py +1071 -0
  134. vortex/util/empty.py +24 -0
  135. vortex/util/helpers.py +184 -0
  136. vortex/util/introspection.py +63 -0
  137. vortex/util/iosponge.py +76 -0
  138. vortex/util/roles.py +51 -0
  139. vortex/util/storefunctions.py +103 -0
  140. vortex/util/structs.py +26 -0
  141. vortex/util/worker.py +150 -0
  142. vortex_nwp-2.0.0b1.dist-info/LICENSE +517 -0
  143. vortex_nwp-2.0.0b1.dist-info/METADATA +50 -0
  144. vortex_nwp-2.0.0b1.dist-info/RECORD +146 -0
  145. vortex_nwp-2.0.0b1.dist-info/WHEEL +5 -0
  146. vortex_nwp-2.0.0b1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2136 @@
1
+ # pylint: disable=unused-argument
2
+
3
+ """
4
+ Abstract class for any AlgoComponent (:class:`AlgoComponent`) or AlgoComponent's
5
+ Mixins (:class:`AlgoComponentDecoMixin`).
6
+
7
+ Some very generic concrete AlgoComponent classes are also provided:
8
+
9
+ * :class:`Expresso`: launch a simple script;
10
+ * :class:`BlindRun`: launch a simple executable (no MPI);
11
+ * :class:`Parallel`: launch an MPI application.
12
+
13
+ Additional abstract classes provide multiprocessing support (through the
14
+ :mod:`taylorism` package):
15
+
16
+ * :class:`TaylorRun`: launch a piece of Python code on several processes;
17
+ * :class:`ParaExpresso`: launch a script multiple times (in parallel);
18
+ * :class:`ParaBlindRun`: launch an executable multiple times (in parallel).
19
+
20
+ Such classes are based on the :mod:`taylorism` (the developer should be familiar
21
+ with this package) and uses "Worker" classes provided in the
22
+ :mod:`vortex.tools.parallelism` package.
23
+
24
+ A few examples of AlgoComponent classes are shipped with the code
25
+ (see :ref:`examples_algo`). In addition to the documentation provided
26
+ in :ref:`stepbystep-index`, it might help.
27
+
28
+ When class inheritance is not applicable or ineffective, The AlgoComponent's
29
+ Mixins are a powerful tool to mutualise some pieces of code. See the
30
+ :class:`AlgoComponentDecoMixin` class documentation for more details.
31
+ """
32
+
33
+ import collections.abc
34
+ import contextlib
35
+ import copy
36
+ import functools
37
+ import importlib
38
+ import locale
39
+ import logging
40
+ import multiprocessing
41
+ import queue
42
+ import shlex
43
+ import sys
44
+ import tempfile
45
+ import traceback as py_traceback
46
+
47
+ from bronx.fancies import loggers
48
+ from bronx.stdtypes import date
49
+ from bronx.syntax.decorators import nicedeco
50
+ import footprints
51
+ from taylorism import Boss
52
+ import vortex
53
+ from vortex.config import from_config
54
+ from vortex.algo import mpitools
55
+ from vortex.syntax.stdattrs import DelayedEnvValue
56
+ from vortex.tools.parallelism import ParallelResultParser
57
+
58
+ #: No automatic export
59
+ __all__ = []
60
+
61
+ logger = loggers.getLogger(__name__)
62
+
63
+
64
+ class AlgoComponentError(Exception):
65
+ """Generic exception class for Algo Components."""
66
+ pass
67
+
68
+
69
+ class AlgoComponentAssertionError(AlgoComponentError):
70
+ """Assertion exception class for Algo Components."""
71
+ pass
72
+
73
+
74
+ class DelayedAlgoComponentError(AlgoComponentError):
75
+ """Triggered when exceptions occurred during the execution but were delayed."""
76
+
77
+ def __init__(self, excs):
78
+ super().__init__("One or several errors occurred during the run.")
79
+ self._excs = excs
80
+
81
+ def __iter__(self):
82
+ yield from self._excs
83
+
84
+ def __str__(self):
85
+ outstr = "One or several errors occurred during the run. In order of appearance:\n"
86
+ outstr += "\n".join(['{:3d}. {!s} (type: {!s})'.format(i + 1, exc, type(exc))
87
+ for i, exc in enumerate(self)])
88
+ return outstr
89
+
90
+
91
+ class ParallelInconsistencyAlgoComponentError(Exception):
92
+ """Generic exception class for Algo Components."""
93
+
94
+ def __init__(self, target):
95
+ msg = "The len of {:s} is inconsistent with the number or ResourceHandlers."
96
+ super().__init__(msg.format(target))
97
+
98
+
99
+ @nicedeco
100
+ def _clsmtd_mixin_locked(f):
101
+ """
102
+ This is a utility decorator (for class methods) : it ensures that the method can only
103
+ be called on a bare :class:`AlgoComponentDecoMixin` class.
104
+ """
105
+ def wrapped_clsmethod(cls, *kargs, **kwargs):
106
+ if issubclass(cls, AlgoComponent):
107
+ raise RuntimeError("This class method should not be called once the mixin is in use.")
108
+ return f(cls, *kargs, **kwargs)
109
+ return wrapped_clsmethod
110
+
111
+
112
+ def algo_component_deco_mixin_autodoc(cls):
113
+ """
114
+ Decorator that adds an automatic documentation on any :class:`AlgoComponentDecoMixin`
115
+ class.
116
+ """
117
+ extradoc = ''
118
+
119
+ # Document extra footprints
120
+ if cls.MIXIN_AUTO_FPTWEAK and cls._MIXIN_EXTRA_FOOTPRINTS:
121
+ extradoc += '\nThe following footprints will be applied to the target classes:\n\n'
122
+ for fp in cls._MIXIN_EXTRA_FOOTPRINTS:
123
+ if isinstance(fp, footprints.Footprint):
124
+ extradoc += footprints.doc.format_docstring(fp,
125
+ footprints.setup.docstrings,
126
+ abstractfpobj=True)
127
+ extradoc += '\n'
128
+
129
+ # Document decorating classes
130
+ if cls.MIXIN_AUTO_DECO:
131
+ for what, desc in (('PREPARE_PREHOOKS', 'before the original ``prepare`` method'),
132
+ ('PREPARE_HOOKS', 'after the original ``prepare`` method'),
133
+ ('POSTFIX_PREHOOKS', 'before the original ``postfix`` method'),
134
+ ('POSTFIX_HOOKS', 'after the original ``postfix`` method'),
135
+ ('SPAWN_HOOKS', 'after the original ``spawn_hook`` method'),
136
+ ('CLI_OPTS_EXTEND', 'to alter the result of the ``spawn_command_options`` method'),
137
+ ('STDIN_OPTS_EXTEND', 'to alter the result of the ``spawn_stdin_options`` method'),
138
+ ('_MIXIN_EXECUTE_OVERWRITE', 'instead of the original ``execute`` method'),
139
+ ('MPIBINS_HOOKS', 'to alter the result of the ``_bootstrap_mpibins_hack`` method'),
140
+ ('MPIENVELOPE_HOOKS', 'to alter the result of the ``_bootstrap_mpienvelope_hack`` method')):
141
+ what = '_MIXIN_{:s}'.format(what)
142
+ if getattr(cls, what, ()):
143
+ extradoc += '\nThe following method(s) will be called {:s}:\n\n'.format(desc)
144
+ extradoc += '\n'.join(' * {!r}'.format(cb) for cb in getattr(cls, what))
145
+ extradoc += '\n'
146
+
147
+ if extradoc:
148
+ extradoc = ('\n .. note:: The following documentation is automatically generated. ' +
149
+ 'From a developer point of view, using the present mixin class ' +
150
+ 'will result in the following actions:\n' +
151
+ ' \n'.join([' ' + t if t else ''
152
+ for t in extradoc.split('\n')]))
153
+
154
+ if isinstance(getattr(cls, '__doc__', None), str):
155
+ cls.__doc__ += '\n' + extradoc
156
+ else:
157
+ cls.__doc__ = extradoc
158
+
159
+ return cls
160
+
161
+
162
+ class AlgoComponentDecoMixin:
163
+ """
164
+ This is the base class for any Mixin class targeting :class:`AlgoComponent`
165
+ classes.
166
+
167
+ Like any Mixin class, this Mixin class primary use is to define methods that
168
+ will be available to the child class.
169
+
170
+ However, this class will also interact with the :class:`AlgoComponentMeta`
171
+ metaclass to alter the behaviour of the :class:`AlgoComponent` class it is
172
+ used with. Several "alterations" will be made to the resulting
173
+ :class:`AlgoComponent` class.
174
+
175
+ * A bunch of footprints' attribute can be added to the resulting class.
176
+ This is controlled by the :data:`MIXIN_AUTO_FPTWEAK` and
177
+ :data:`_MIXIN_EXTRA_FOOTPRINTS` class variables.
178
+ If :data:`MIXIN_AUTO_FPTWEAK` is ``True`` (which is the default), the
179
+ :class:`~footrprints.Footprint` objects listed in the
180
+ :data:`_MIXIN_EXTRA_FOOTPRINTS` tuple will be prepended to the resulting
181
+ :class:`AlgoComponent` class footprint definition.
182
+
183
+ * The ``execute`` method of the resulting class can be overwritten by
184
+ the method referenced in the :data:`_MIXIN_EXECUTE_OVERWRITE` class
185
+ variable. This is allowed only if no ``execute`` method is defined
186
+ manually and if no other :class:`AlgoComponentDecoMixin` tries to
187
+ overwrite it as well. If these two conditions are not met, a
188
+ :class:`RuntimeError` exception will be thrown by the the
189
+ :class:`AlgoComponentMeta` metaclass.
190
+
191
+ * A bunch of the :class:`AlgoComponent`'s methods can be decorated. This
192
+ is controlled by the :data:`MIXIN_AUTO_DECO` class variable (``True``
193
+ by default) and a bunch of other class variables containing tuples.
194
+ They are described below:
195
+
196
+ * :data:`_MIXIN_PREPARE_PREHOOKS`: Tuple of methods that will be
197
+ executed before the original prepare method. Such methods receive
198
+ the same arguments list than the original decorated method.
199
+
200
+ * :data:`_MIXIN_PREPARE_HOOKS`: Tuple of methods that will be
201
+ executed after the original prepare method. Such methods receive
202
+ the same arguments list than the original decorated method.
203
+
204
+ * :data:`_MIXIN_EXECUTE_FINALISE_HOOKS`: Tuple of method that will
205
+ be executed after any execution (even if the execution failed).
206
+
207
+ * :data:`_MIXIN_FAIL_EXECUTE_HOOKS`: Tuple of method that will
208
+ be executed if the execution fails (the original exception
209
+ will be re-raised afterwards)
210
+
211
+ * :data:`_MIXIN_POSTFIX_PREHOOKS`: Tuple of methods that will be
212
+ executed before the original postfix method. Such methods receive
213
+ the same arguments list than the original decorated method.
214
+
215
+ * :data:`_MIXIN_POSTFIX_HOOKS`: Tuple of methods that will be
216
+ executed after the original postfix method. Such methods receive
217
+ the same arguments list than the original decorated method.
218
+
219
+ * :data:`_MIXIN_SPAWN_HOOKS`: Tuple of methods that will be
220
+ executed after the original spawn_hook method. Such methods receive
221
+ the same arguments list than the original decorated method.
222
+
223
+ * :data:`_MIXIN_CLI_OPTS_EXTEND`: Tuple of methods that will be
224
+ executed after the original ``spawn_command_options`` method. Such
225
+ method will receive one argument (``self`` set aside): the value
226
+ returned by the original ``spawn_command_options`` method.
227
+
228
+ * :data:`_MIXIN_STDIN_OPTS_EXTEND`: Tuple of methods that will be
229
+ executed after the original ``spawn_stdin_options`` method. Such
230
+ method will receive one argument (``self`` set aside): the value
231
+ returned by the original ``spawn_stdin_options`` method.
232
+
233
+ """
234
+
235
+ MIXIN_AUTO_FPTWEAK = True
236
+ MIXIN_AUTO_DECO = True
237
+
238
+ _MIXIN_EXTRA_FOOTPRINTS = ()
239
+
240
+ _MIXIN_PREPARE_PREHOOKS = ()
241
+ _MIXIN_PREPARE_HOOKS = ()
242
+ _MIXIN_EXECUTE_FINALISE_HOOKS = ()
243
+ _MIXIN_FAIL_EXECUTE_HOOKS = ()
244
+ _MIXIN_POSTFIX_PREHOOKS = ()
245
+ _MIXIN_POSTFIX_HOOKS = ()
246
+ _MIXIN_SPAWN_HOOKS = ()
247
+
248
+ _MIXIN_CLI_OPTS_EXTEND = ()
249
+ _MIXIN_STDIN_OPTS_EXTEND = ()
250
+
251
+ _MIXIN_EXECUTE_OVERWRITE = None
252
+
253
+ def __new__(cls, *args, **kwargs):
254
+ if not issubclass(cls, AlgoComponent):
255
+ # This class cannot be instanciated by itself !
256
+ raise RuntimeError('< {0.__name__:s} > is a mixin class: it cannot be instantiated.'
257
+ .format(cls))
258
+ else:
259
+ return super().__new__(cls)
260
+
261
+ @classmethod
262
+ @_clsmtd_mixin_locked
263
+ def mixin_tweak_footprint(cls, fplocal):
264
+ """Update the footprint definition list."""
265
+ for fp in cls._MIXIN_EXTRA_FOOTPRINTS:
266
+ assert isinstance(fp, footprints.Footprint)
267
+ fplocal.insert(0, fp)
268
+
269
+ @classmethod
270
+ @_clsmtd_mixin_locked
271
+ def _get_algo_wrapped(cls, targetcls, targetmtd, hooks, prehooks=(), reentering=False):
272
+ """Wraps **targetcls**'s **targetmtd** method."""
273
+ orig_mtd = getattr(targetcls, targetmtd)
274
+ if prehooks and reentering:
275
+ raise ValueError('Conflicting values between prehooks and reenterin.')
276
+
277
+ def wrapped_method(self, *kargs, **kwargs):
278
+ for phook in prehooks:
279
+ phook(self, *kargs, **kwargs)
280
+ rv = orig_mtd(self, *kargs, **kwargs)
281
+ if reentering:
282
+ kargs = [rv, ] + list(kargs)
283
+ for phook in hooks:
284
+ rv = phook(self, *kargs, **kwargs)
285
+ if reentering:
286
+ kargs[0] = rv
287
+ if reentering:
288
+ return rv
289
+
290
+ wrapped_method.__name__ = orig_mtd.__name__
291
+ wrapped_method.__doc__ = ((orig_mtd.__doc__ or '').rstrip('\n') +
292
+ "\n\nDecorated by :class:`{0.__module__:s}{0.__name__:s}`."
293
+ .format(cls))
294
+ wrapped_method.__dict__.update(orig_mtd.__dict__)
295
+ return wrapped_method
296
+
297
+ @classmethod
298
+ @_clsmtd_mixin_locked
299
+ def mixin_algo_deco(cls, targetcls):
300
+ """
301
+ Applies all the necessary decorators to the **targetcls**
302
+ :class:`AlgoComponent` class.
303
+ """
304
+ if not issubclass(targetcls, AlgoComponent):
305
+ raise RuntimeError('This class can only be mixed in AlgoComponent classes.')
306
+ for targetmtd, hooks, prehooks, reenter in [('prepare',
307
+ cls._MIXIN_PREPARE_HOOKS,
308
+ cls._MIXIN_PREPARE_PREHOOKS,
309
+ False),
310
+ ('fail_execute',
311
+ cls._MIXIN_FAIL_EXECUTE_HOOKS, (),
312
+ False),
313
+ ('execute_finalise',
314
+ cls._MIXIN_EXECUTE_FINALISE_HOOKS, (),
315
+ False),
316
+ ('postfix',
317
+ cls._MIXIN_POSTFIX_HOOKS,
318
+ cls._MIXIN_POSTFIX_PREHOOKS,
319
+ False),
320
+ ('spawn_hook',
321
+ cls._MIXIN_SPAWN_HOOKS, (),
322
+ False),
323
+ ('spawn_command_options',
324
+ cls._MIXIN_CLI_OPTS_EXTEND, (),
325
+ True),
326
+ ('spawn_stdin_options',
327
+ cls._MIXIN_STDIN_OPTS_EXTEND, (),
328
+ True), ]:
329
+ if hooks or prehooks:
330
+ setattr(targetcls, targetmtd,
331
+ cls._get_algo_wrapped(targetcls, targetmtd, hooks, prehooks, reenter))
332
+ return targetcls
333
+
334
+ @classmethod
335
+ @_clsmtd_mixin_locked
336
+ def mixin_execute_overwrite(cls):
337
+ return cls._MIXIN_EXECUTE_OVERWRITE
338
+
339
+ @classmethod
340
+ def mixin_execute_companion(cls):
341
+ """Find on which class "super" should be called (if_MIXIN_EXECUTE_OVERWRITE is used)."""
342
+ comp = getattr(cls, '_algo_meta_execute_companion', ())
343
+ if not comp:
344
+ raise RuntimeError("unable to find a suitable companion class")
345
+ return comp
346
+
347
+
348
+ class AlgoComponentMpiDecoMixin(AlgoComponentDecoMixin):
349
+ """
350
+ This is the base class for Mixin class targeting :class:`Parallel`
351
+ classes.
352
+
353
+ It inherits all the behaviour of the :class:`AlgoComponentDecoMixin` base
354
+ class. But in addition, it allows to decorate additional :class:`Parallel`'s
355
+ methods using the following class variables:
356
+
357
+ * :data:`_MIXIN_MPIBINS_HOOKS`: Tuple of methods that will be
358
+ executed after the original ``_bootstrap_mpibins_hack`` method. Such
359
+ methods will receive five arguments (``self`` set aside):
360
+
361
+ * The list of :class:`mpitools.MpiBinaryDescription` objects returned
362
+ by the original ``_bootstrap_mpibins_hack`` method;
363
+ * The list of :class:`mpitools.MpiBinaryDescription` objects as
364
+ provided by the first caller;
365
+ * The list of binary ResourceHandlers as provided to the ``run``
366
+ method;
367
+ * A dictionary of options as provided to the ``run`` method;
368
+ * A boolean indicating if an MPI envelope is provided by the user.
369
+
370
+ * :data:`_MIXIN_MPIENVELOPE_HOOKS`: Tuple of methods that will be
371
+ executed after the original ``_bootstrap_mpienvelope_hack`` method. Such
372
+ methods will receive four arguments (``self`` set aside):
373
+
374
+ * The list of dictionaries describing the envelope returned
375
+ by the original``_bootstrap_mpienvelope_hack`` method;
376
+ * The list of dictionaries describing the envelope as
377
+ provided by the first caller;
378
+ * The list of binary ResourceHandlers as provided to the ``run``
379
+ method;
380
+ * A dictionary of options as provided to the ``run`` method;
381
+ * The :class:`mpitools.MpiTool` that is used to generate the
382
+ MPI command line
383
+
384
+ """
385
+
386
+ _MIXIN_MPIBINS_HOOKS = ()
387
+ _MIXIN_MPIENVELOPE_HOOKS = ()
388
+ _MIXIN_MPIENVELOPE_POSTHOOKS = ()
389
+
390
+ @classmethod
391
+ @_clsmtd_mixin_locked
392
+ def mixin_algo_deco(cls, targetcls):
393
+ """
394
+ Applies all the necessary decorators to the **targetcls**
395
+ :class:`AlgoComponent` class.
396
+ """
397
+ targetcls = AlgoComponentDecoMixin.mixin_algo_deco(targetcls)
398
+ if not issubclass(targetcls, Parallel):
399
+ raise RuntimeError('This class can only be mixed in Parallel classes.')
400
+ for targetmtd, hooks, prehooks, reenter in [('_bootstrap_mpibins_hack',
401
+ cls._MIXIN_MPIBINS_HOOKS, (),
402
+ True),
403
+ ('_bootstrap_mpienvelope_hack',
404
+ cls._MIXIN_MPIENVELOPE_HOOKS, (),
405
+ True),
406
+ ('_bootstrap_mpienvelope_posthack',
407
+ cls._MIXIN_MPIENVELOPE_POSTHOOKS, (),
408
+ True), ]:
409
+ if hooks or prehooks:
410
+ setattr(targetcls, targetmtd,
411
+ cls._get_algo_wrapped(targetcls, targetmtd, hooks, prehooks, reenter))
412
+ return targetcls
413
+
414
+
415
+ class AlgoComponentMeta(footprints.FootprintBaseMeta):
416
+ """Meta class for building :class:`AlgoComponent` classes.
417
+
418
+ In addition of performing footprints' usual stuff, it processes mixin classes
419
+ that derives from the :class:`AlgoComponentDecoMixin` class. See the
420
+ documentation of this class for more details.
421
+ """
422
+
423
+ def __new__(cls, n, b, d):
424
+ # Mixin candidates: a mixin must only be dealt with once hence the
425
+ # condition on issubclass(base, AlgoComponent)
426
+ candidates = [base for base in b
427
+ if (issubclass(base, AlgoComponentDecoMixin) and
428
+ not issubclass(base, AlgoComponent))]
429
+ # Tweak footprints
430
+ todobases = [base for base in candidates if base.MIXIN_AUTO_FPTWEAK]
431
+ if todobases:
432
+ fplocal = d.get('_footprint', list())
433
+ if not isinstance(fplocal, list):
434
+ fplocal = [fplocal, ]
435
+ for base in todobases:
436
+ base.mixin_tweak_footprint(fplocal)
437
+ d['_footprint'] = fplocal
438
+ # Overwrite the execute method...
439
+ todobases_exc = [base for base in candidates if base.mixin_execute_overwrite() is not None]
440
+ if len(todobases_exc) > 1:
441
+ raise RuntimeError('Cannot overwrite < execute > multiple times: {:s}'
442
+ .format(','.join([base.__name__ for base in todobases_exc])))
443
+ if todobases_exc:
444
+ if 'execute' in d:
445
+ raise RuntimeError('< execute > is already defined in the target class: cannot proceed')
446
+ d['execute'] = todobases_exc[0].mixin_execute_overwrite()
447
+ # Create the class as usual
448
+ fpcls = super().__new__(cls, n, b, d)
449
+ if todobases_exc:
450
+ setattr(fpcls, '_algo_meta_execute_companion', fpcls)
451
+ # Apply decorators
452
+ todobases = [base for base in candidates if base.MIXIN_AUTO_DECO]
453
+ for base in reversed(todobases):
454
+ base.mixin_algo_deco(fpcls)
455
+ return fpcls
456
+
457
+
458
+ class AlgoComponent(footprints.FootprintBase, metaclass=AlgoComponentMeta):
459
+ """Component in charge of any kind of processing."""
460
+
461
+ _SERVERSYNC_RAISEONEXIT = True
462
+ _SERVERSYNC_RUNONSTARTUP = True
463
+ _SERVERSYNC_STOPONEXIT = True
464
+
465
+ _abstract = True
466
+ _collector = ('component',)
467
+ _footprint = dict(
468
+ info = 'Abstract algo component',
469
+ attr = dict(
470
+ engine = dict(
471
+ info = 'The way the executable should be run.',
472
+ values = ['algo', ]
473
+ ),
474
+ flyput = dict(
475
+ info = 'Activate a background job in charge off on the fly processing.',
476
+ type = bool,
477
+ optional = True,
478
+ default = False,
479
+ access = 'rwx',
480
+ doc_visibility = footprints.doc.visibility.GURU,
481
+ doc_zorder = -99,
482
+ ),
483
+ flypoll = dict(
484
+ info = 'The system method called by the flyput background job.',
485
+ optional = True,
486
+ default = 'io_poll',
487
+ access = 'rwx',
488
+ doc_visibility = footprints.doc.visibility.GURU,
489
+ doc_zorder = -99,
490
+ ),
491
+ flyargs = dict(
492
+ info = 'Arguments for the *flypoll* method.',
493
+ type = footprints.FPTuple,
494
+ optional = True,
495
+ default = footprints.FPTuple(),
496
+ doc_visibility = footprints.doc.visibility.GURU,
497
+ doc_zorder = -99,
498
+ ),
499
+ flymapping = dict(
500
+ info = 'Allow renaming of output files during on the fly processing.',
501
+ optional = True,
502
+ default = False,
503
+ access = 'rwx',
504
+ doc_visibility = footprints.doc.visibility.GURU,
505
+ doc_zorder = -99,
506
+ ),
507
+ timeout = dict(
508
+ info = 'Default timeout (in sec.) used when waiting for an expected resource.',
509
+ type = int,
510
+ optional = True,
511
+ default = 180,
512
+ doc_zorder = -50,
513
+ ),
514
+ server_run = dict(
515
+ info = 'Run the executable as a server.',
516
+ type = bool,
517
+ optional = True,
518
+ values = [False],
519
+ default = False,
520
+ access = 'rwx',
521
+ doc_visibility = footprints.doc.visibility.ADVANCED,
522
+ ),
523
+ serversync_method = dict(
524
+ info = 'The method that is used to synchronise with the server.',
525
+ optional = True,
526
+ doc_visibility = footprints.doc.visibility.GURU,
527
+ ),
528
+ serversync_medium = dict(
529
+ info = 'The medium that is used to synchronise with the server.',
530
+ optional = True,
531
+ doc_visibility = footprints.doc.visibility.GURU,
532
+ ),
533
+ extendpypath = dict(
534
+ info = "The list of things to be prepended in the python's path.",
535
+ type = footprints.FPList,
536
+ default = footprints.FPList([]),
537
+ optional = True
538
+ ),
539
+ )
540
+ )
541
+
542
+ def __init__(self, *args, **kw):
543
+ logger.debug('Algo component init %s', self.__class__)
544
+ self._fslog = list()
545
+ self._promises = None
546
+ self._expected = None
547
+ self._delayed_excs = list()
548
+ self._server_synctool = None
549
+ self._server_process = None
550
+ super().__init__(*args, **kw)
551
+
552
+ @property
553
+ def realkind(self):
554
+ """Default kind is ``algo``."""
555
+ return 'algo'
556
+
557
+ @property
558
+ def fslog(self):
559
+ """Changes on the filesystem during the execution."""
560
+ return self._fslog
561
+
562
+ def fstag(self):
563
+ """Defines a tag specific to the current algo component."""
564
+ return '-'.join((self.realkind, self.engine))
565
+
566
+ def fsstamp(self, opts):
567
+ """Ask the current context to put a stamp on file system."""
568
+ self.context.fstrack_stamp(tag=self.fstag())
569
+
570
+ def fscheck(self, opts):
571
+ """Ask the current context to check changes on file system since last stamp."""
572
+ self._fslog.append(self.context.fstrack_check(tag=self.fstag()))
573
+
574
+ @property
575
+ def promises(self):
576
+ """Build and return list of actual promises of the current component."""
577
+ if self._promises is None:
578
+ self._promises = [
579
+ x for x in self.context.sequence.outputs()
580
+ if x.rh.provider.expected
581
+ ]
582
+ return self._promises
583
+
584
+ @property
585
+ def expected_resources(self):
586
+ """Return the list of really expected inputs."""
587
+ if self._expected is None:
588
+ self._expected = [
589
+ x for x in self.context.sequence.effective_inputs()
590
+ if x.rh.is_expected()
591
+ ]
592
+ return self._expected
593
+
594
+ def delayed_exception_add(self, exc, traceback=True):
595
+ """Store the exception so that it will be handled at the end of the run."""
596
+ logger.error("An exception is delayed")
597
+ if traceback:
598
+ (exc_type, exc_value, exc_traceback) = sys.exc_info()
599
+ print('Exception type: {!s}'.format(exc_type))
600
+ print('Exception info: {!s}'.format(exc_value))
601
+ print('Traceback:')
602
+ print("\n".join(py_traceback.format_tb(exc_traceback)))
603
+ self._delayed_excs.append(exc)
604
+
605
+ def algoassert(self, assertion, msg=''):
606
+ if not assertion:
607
+ raise AlgoComponentAssertionError(msg)
608
+
609
+ def grab(self, sec, comment='resource', sleep=10, timeout=None):
610
+ """Wait for a given resource and get it if expected."""
611
+ local = sec.rh.container.localpath()
612
+ self.system.header('Wait for ' + comment + ' ... [' + local + ']')
613
+ if timeout is None:
614
+ timeout = self.timeout
615
+ if sec.rh.wait(timeout=timeout, sleep=sleep):
616
+ if sec.rh.is_expected():
617
+ sec.get(incache=True)
618
+ elif sec.fatal:
619
+ logger.critical('Missing expected resource <%s>', local)
620
+ raise ValueError('Could not get ' + local)
621
+ else:
622
+ logger.error('Missing expected resource <%s>', local)
623
+
624
+ def export(self, packenv):
625
+ """Export environment variables in given pack."""
626
+ for k, v in from_config(section=packenv).items():
627
+ if k not in self.env:
628
+ logger.info('Setting %s env %s = %s', packenv.upper(), k, v)
629
+ self.env[k] = v
630
+
631
+ def prepare(self, rh, opts):
632
+ """Set some defaults env values."""
633
+ if opts.get('fortran', True):
634
+ self.export('fortran')
635
+
636
+ def absexcutable(self, xfile):
637
+ """Retuns the absolute pathname of the ``xfile`` executable."""
638
+ absx = self.system.path.abspath(xfile)
639
+ return absx
640
+
641
+ def flyput_method(self):
642
+ """Check out what could be a valid io_poll command."""
643
+ return getattr(self, 'io_poll_method', getattr(self.system, self.flypoll, None))
644
+
645
+ def flyput_args(self):
646
+ """Return actual io_poll prefixes."""
647
+ return getattr(self, 'io_poll_args', tuple(self.flyargs))
648
+
649
+ def flyput_kwargs(self):
650
+ """Return actual io_poll prefixes."""
651
+ return getattr(self, 'io_poll_kwargs', dict())
652
+
653
+ def flyput_check(self):
654
+ """Check default args for io_poll command."""
655
+ actual_args = list()
656
+ if self.flymapping:
657
+ # No checks when mapping is activated
658
+ return self.flyput_args()
659
+ else:
660
+ for arg in self.flyput_args():
661
+ logger.info('Check arg <%s>', arg)
662
+ if any([x.rh.container.basename.startswith(arg) for x in self.promises]):
663
+ logger.info(
664
+ 'Match some promise %s',
665
+ str([x.rh.container.basename for x in self.promises
666
+ if x.rh.container.basename.startswith(arg)])
667
+ )
668
+ actual_args.append(arg)
669
+ else:
670
+ logger.info('Do not match any promise %s',
671
+ str([x.rh.container.basename for x in self.promises]))
672
+ return actual_args
673
+
674
+ def flyput_sleep(self):
675
+ """Return a sleeping time in seconds between io_poll commands."""
676
+ return getattr(self, 'io_poll_sleep', self.env.get('IO_POLL_SLEEP', 20))
677
+
678
+ def flyput_outputmapping(self, item):
679
+ """Map output to another filename."""
680
+ return item, 'unknown'
681
+
682
+ def _flyput_job_internal_search(self, io_poll_method, io_poll_args, io_poll_kwargs):
683
+ data = list()
684
+ for arg in io_poll_args:
685
+ logger.info('Polling check arg %s', arg)
686
+ rc = io_poll_method(arg, **io_poll_kwargs)
687
+ try:
688
+ data.extend(rc.result)
689
+ except AttributeError:
690
+ data.extend(rc)
691
+ data = [x for x in data if x]
692
+ logger.info('Polling retrieved data %s', str(data))
693
+ return data
694
+
695
+ def _flyput_job_internal_put(self, data):
696
+ for thisdata in data:
697
+ if self.flymapping:
698
+ mappeddata, mappedfmt = self.flyput_outputmapping(thisdata)
699
+ if not mappeddata:
700
+ raise AlgoComponentError('The mapping method failed for {:s}.'.format(thisdata))
701
+ if thisdata != mappeddata:
702
+ logger.info('Linking <%s> to <%s> (fmt=%s) before put',
703
+ thisdata, mappeddata, mappedfmt)
704
+ self.system.cp(thisdata, mappeddata, intent='in', fmt=mappedfmt)
705
+ else:
706
+ mappeddata = thisdata
707
+ candidates = [x for x in self.promises
708
+ if x.rh.container.abspath == self.system.path.abspath(mappeddata)]
709
+ if candidates:
710
+ logger.info('Polled data is promised <%s>', mappeddata)
711
+ bingo = candidates.pop()
712
+ bingo.put(incache=True)
713
+ else:
714
+ logger.warning('Polled data not promised <%s>', mappeddata)
715
+
716
+ def flyput_job(self, io_poll_method, io_poll_args, io_poll_kwargs,
717
+ event_complete, event_free, queue_context):
718
+ """Poll new data resources."""
719
+ logger.info('Polling with method %s', str(io_poll_method))
720
+ logger.info('Polling with args %s', str(io_poll_args))
721
+
722
+ time_sleep = self.flyput_sleep()
723
+ redo = True
724
+
725
+ # Start recording the changes in the current context
726
+ ctxrec = self.context.get_recorder()
727
+
728
+ while redo and not event_complete.is_set():
729
+ event_free.clear()
730
+ try:
731
+ data = self._flyput_job_internal_search(io_poll_method,
732
+ io_poll_args, io_poll_kwargs)
733
+ self._flyput_job_internal_put(data)
734
+ except Exception as trouble:
735
+ logger.error('Polling trouble: %s. %s',
736
+ str(trouble), py_traceback.format_exc())
737
+ redo = False
738
+ finally:
739
+ event_free.set()
740
+ if redo and not data and not event_complete.is_set():
741
+ logger.info('Get asleep for %d seconds...', time_sleep)
742
+ self.system.sleep(time_sleep)
743
+
744
+ # Stop recording and send back the results
745
+ ctxrec.unregister()
746
+ logger.info('Sending the Context recorder to the master process.')
747
+ queue_context.put(ctxrec)
748
+ queue_context.close()
749
+
750
+ if redo:
751
+ logger.info('Polling exit on complete event')
752
+ else:
753
+ logger.warning('Polling exit on abort')
754
+
755
+ def flyput_begin(self):
756
+ """Launch a co-process to handle promises."""
757
+
758
+ nope = (None, None, None, None)
759
+ if not self.flyput:
760
+ return nope
761
+
762
+ sh = self.system
763
+ sh.subtitle('On the fly - Begin')
764
+
765
+ if not self.promises:
766
+ logger.info('No promise, no co-process')
767
+ return nope
768
+
769
+ # Find out a polling method
770
+ io_poll_method = self.flyput_method()
771
+ if not io_poll_method:
772
+ logger.error('No method or shell function defined for polling data')
773
+ return nope
774
+
775
+ # Be sure that some default args could match local promises names
776
+ io_poll_args = self.flyput_check()
777
+ if not io_poll_args:
778
+ logger.error('Could not check default arguments for polling data')
779
+ return nope
780
+
781
+ # Additional named attributes
782
+ io_poll_kwargs = self.flyput_kwargs()
783
+
784
+ # Define events for a nice termination
785
+ event_stop = multiprocessing.Event()
786
+ event_free = multiprocessing.Event()
787
+ queue_ctx = multiprocessing.Queue()
788
+
789
+ p_io = multiprocessing.Process(
790
+ name=self.footprint_clsname(),
791
+ target=self.flyput_job,
792
+ args=(io_poll_method, io_poll_args, io_poll_kwargs, event_stop, event_free, queue_ctx),
793
+ )
794
+
795
+ # The co-process is started
796
+ p_io.start()
797
+
798
+ return (p_io, event_stop, event_free, queue_ctx)
799
+
800
+ def manual_flypolling(self):
801
+ """Call the flyput method and returns the list of newly available files."""
802
+ # Find out a polling method
803
+ io_poll_method = self.flyput_method()
804
+ if not io_poll_method:
805
+ raise AlgoComponentError('Unable to find an io_poll_method')
806
+ # Find out some polling prefixes
807
+ io_poll_args = self.flyput_check()
808
+ if not io_poll_args:
809
+ raise AlgoComponentError('Unable to find an io_poll_args')
810
+ # Additional named attributes
811
+ io_poll_kwargs = self.flyput_kwargs()
812
+ # Starting polling each of the prefixes
813
+ return self._flyput_job_internal_search(io_poll_method,
814
+ io_poll_args, io_poll_kwargs)
815
+
816
+ def manual_flypolling_job(self):
817
+ """Call the flyput method and deal with promised files."""
818
+ data = self.manual_flypolling()
819
+ self._flyput_job_internal_put(data)
820
+
821
+ def flyput_end(self, p_io, e_complete, e_free, queue_ctx):
822
+ """Wait for the co-process in charge of promises."""
823
+ e_complete.set()
824
+ logger.info('Waiting for polling process... <%s>', p_io.pid)
825
+ t0 = date.now()
826
+ e_free.wait(60)
827
+ # Get the Queue and update the context
828
+ time_sleep = self.flyput_sleep()
829
+ try:
830
+ # allow 5 sec to put data into queue (it should be more than enough)
831
+ ctxrec = queue_ctx.get(block=True, timeout=time_sleep + 5)
832
+ except queue.Empty:
833
+ logger.warning("Impossible to get the Context recorder")
834
+ ctxrec = None
835
+ finally:
836
+ queue_ctx.close()
837
+ if ctxrec is not None:
838
+ ctxrec.replay_in(self.context)
839
+ p_io.join(30)
840
+ t1 = date.now()
841
+ waiting = t1 - t0
842
+ logger.info('Waiting for polling process took %f seconds', waiting.total_seconds())
843
+ if p_io.is_alive():
844
+ logger.warning('Force termination of polling process')
845
+ p_io.terminate()
846
+ logger.info('Polling still alive ? %s', str(p_io.is_alive()))
847
+ return not p_io.is_alive()
848
+
849
+ def server_begin(self, rh, opts):
850
+ """Start a subprocess and run the server in it."""
851
+ self._server_event = multiprocessing.Event()
852
+ self._server_process = multiprocessing.Process(
853
+ name=self.footprint_clsname(),
854
+ target=self.server_job,
855
+ args=(rh, opts)
856
+ )
857
+ self._server_process.start()
858
+
859
+ def server_job(self, rh, opts):
860
+ """Actually run the server and catch all Exceptions.
861
+
862
+ If the server crashes, is killed or whatever, the Exception is displayed
863
+ and the appropriate Event is set.
864
+ """
865
+ self.system.signal_intercept_on()
866
+ try:
867
+ self.execute_single(rh, opts)
868
+ except Exception:
869
+ (exc_type, exc_value, exc_traceback) = sys.exc_info()
870
+ print('Exception type: {!s}'.format(exc_type))
871
+ print('Exception info: {!s}'.format(exc_value))
872
+ print('Traceback:')
873
+ print("\n".join(py_traceback.format_tb(exc_traceback)))
874
+ # Alert the main process of the error
875
+ self._server_event.set()
876
+
877
+ def server_alive(self):
878
+ """Is the server still running ?"""
879
+ return (self._server_process is not None and
880
+ self._server_process.is_alive())
881
+
882
+ def server_end(self):
883
+ """End the server.
884
+
885
+ A first attempt is made to terminate it nicely. If it doesn't work,
886
+ a SIGTERM is sent.
887
+ """
888
+ rc = False
889
+ # This test should always succeed...
890
+ if self._server_synctool is not None and self._server_process is not None:
891
+ # Is the process still running ?
892
+ if self._server_process.is_alive():
893
+ # Try to stop it nicely
894
+ if self._SERVERSYNC_STOPONEXIT and self._server_synctool.trigger_stop():
895
+ t0 = date.now()
896
+ self._server_process.join(30)
897
+ waiting = date.now() - t0
898
+ logger.info('Waiting for the server to stop took %f seconds',
899
+ waiting.total_seconds())
900
+ rc = not self._server_event.is_set()
901
+ # Be less nice if needed...
902
+ if (not self._SERVERSYNC_STOPONEXIT) or self._server_process.is_alive():
903
+ logger.warning('Force termination of the server process')
904
+ self._server_process.terminate()
905
+ self.system.sleep(1) # Allow some time for the process to terminate
906
+ if not self._SERVERSYNC_STOPONEXIT:
907
+ rc = False
908
+ else:
909
+ rc = not self._server_event.is_set()
910
+ logger.info('Server still alive ? %s', str(self._server_process.is_alive()))
911
+ # We are done with the server
912
+ self._server_synctool = None
913
+ self._server_process = None
914
+ del self._server_event
915
+ # Check the rc
916
+ if not rc:
917
+ raise AlgoComponentError('The server process ended badly.')
918
+ return rc
919
+
920
+ def spawn_pre_dirlisting(self):
921
+ """Print a directory listing just before run."""
922
+ self.system.subtitle('{:s} : directory listing (pre-execution)'.format(self.realkind))
923
+ self.system.dir(output=False, fatal=False)
924
+
925
+ def spawn_hook(self):
926
+ """Last chance to say something before execution."""
927
+ pass
928
+
929
+ def spawn(self, args, opts, stdin=None):
930
+ """
931
+ Spawn in the current system the command as defined in raw ``args``.
932
+
933
+ The followings environment variables could drive part of the execution:
934
+
935
+ * VORTEX_DEBUG_ENV : dump current environment before spawn
936
+ """
937
+ sh = self.system
938
+
939
+ if self.env.true('vortex_debug_env'):
940
+ sh.subtitle('{:s} : dump environment (os bound: {!s})'.format(
941
+ self.realkind,
942
+ self.env.osbound()
943
+ ))
944
+ self.env.osdump()
945
+
946
+ # On-the-fly coprocessing initialisation
947
+ p_io, e_complete, e_free, q_ctx = self.flyput_begin()
948
+
949
+ sh.remove('core')
950
+ sh.softlink('/dev/null', 'core')
951
+ self.spawn_hook()
952
+ self.target.spawn_hook(sh)
953
+ self.spawn_pre_dirlisting()
954
+ sh.subtitle('{:s} : start execution'.format(self.realkind))
955
+ try:
956
+ sh.spawn(args, output=False, stdin=stdin, fatal=opts.get('fatal', True))
957
+ finally:
958
+ # On-the-fly coprocessing cleaning
959
+ if p_io:
960
+ self.flyput_end(p_io, e_complete, e_free, q_ctx)
961
+
962
+ def spawn_command_options(self):
963
+ """Prepare options for the resource's command line."""
964
+ return dict()
965
+
966
+ def spawn_command_line(self, rh):
967
+ """Split the shell command line of the resource to be run."""
968
+ opts = self.spawn_command_options()
969
+ return shlex.split(rh.resource.command_line(**opts))
970
+
971
+ def spawn_stdin_options(self):
972
+ """Prepare options for the resource's stdin generator."""
973
+ return dict()
974
+
975
+ def spawn_stdin(self, rh):
976
+ """Generate the stdin File-Like object of the resource to be run."""
977
+ opts = self.spawn_stdin_options()
978
+ stdin_text = rh.resource.stdin_text(**opts)
979
+ if stdin_text is not None:
980
+ plocale = locale.getlocale()[1] or 'ascii'
981
+ tmpfh = tempfile.TemporaryFile(dir=self.system.pwd(), mode='w+b')
982
+ if isinstance(stdin_text, str):
983
+ tmpfh.write(stdin_text.encode(plocale))
984
+ else:
985
+ tmpfh.write(stdin_text)
986
+ tmpfh.seek(0)
987
+ return tmpfh
988
+ else:
989
+ return None
990
+
991
+ def execute_single(self, rh, opts):
992
+ """Abstract method.
993
+
994
+ When server_run is True, this method is used to start the server.
995
+ Otherwise, this method is called by each :meth:`execute` call.
996
+ """
997
+ pass
998
+
999
+ def execute(self, rh, opts):
1000
+ """Abstract method."""
1001
+ if self.server_run:
1002
+ # First time here ?
1003
+ if self._server_synctool is None:
1004
+ if self.serversync_method is None:
1005
+ raise ValueError('The serversync_method must be provided.')
1006
+ self._server_synctool = footprints.proxy.serversynctool(
1007
+ method=self.serversync_method,
1008
+ medium=self.serversync_medium,
1009
+ raiseonexit=self._SERVERSYNC_RAISEONEXIT,
1010
+ )
1011
+ self._server_synctool.set_servercheck_callback(self.server_alive)
1012
+ self.server_begin(rh, opts)
1013
+ # Wait for the first request
1014
+ self._server_synctool.trigger_wait()
1015
+ if self._SERVERSYNC_RUNONSTARTUP:
1016
+ self._server_synctool.trigger_run()
1017
+ else:
1018
+ # Acknowledge that we are ready and wait for the next request
1019
+ self._server_synctool.trigger_run()
1020
+ else:
1021
+ self.execute_single(rh, opts)
1022
+
1023
+ def fail_execute(self, e, rh, kw):
1024
+ """This method is called if :meth:`execute` raise an exception."""
1025
+ pass
1026
+
1027
+ def execute_finalise(self, opts):
1028
+ """Abstract method.
1029
+
1030
+ This method is called inconditionaly when :meth:`execute` exits (even
1031
+ if an Exception was raised).
1032
+ """
1033
+ if self.server_run:
1034
+ self.server_end()
1035
+
1036
+ def postfix_post_dirlisting(self):
1037
+ self.system.subtitle('{:s} : directory listing (post-run)'.format(self.realkind))
1038
+ self.system.dir(output=False, fatal=False)
1039
+
1040
+ def postfix(self, rh, opts):
1041
+ """Some basic informations."""
1042
+ self.postfix_post_dirlisting()
1043
+
1044
+ def dumplog(self, opts):
1045
+ """Dump to local file the internal log of the current algo component."""
1046
+ self.system.pickle_dump(self.fslog, 'log.' + self.fstag())
1047
+
1048
+ def delayed_exceptions(self, opts):
1049
+ """Gather all the delayed exceptions and raises one if necessary."""
1050
+ if len(self._delayed_excs) > 0:
1051
+ excstmp = self._delayed_excs
1052
+ self._delayed_excs = list()
1053
+ raise DelayedAlgoComponentError(excstmp)
1054
+
1055
+ def valid_executable(self, rh):
1056
+ """
1057
+ Return a boolean value according to the effective executable nature
1058
+ of the resource handler provided.
1059
+ """
1060
+ return True
1061
+
1062
+ def abortfabrik(self, step, msg):
1063
+ """A shortcut to avoid next steps of the run."""
1064
+
1065
+ def fastexit(self, *args, **kw):
1066
+ logger.warning('Run <%s> skipped because abort occurred [%s]', step, msg)
1067
+
1068
+ return fastexit
1069
+
1070
+ def abort(self, msg='Not documented'):
1071
+ """A shortcut to avoid next steps of the run."""
1072
+ for step in ('prepare', 'execute', 'postfix'):
1073
+ setattr(self, step, self.abortfabrik(step, msg))
1074
+
1075
+ def run(self, rh=None, **kw):
1076
+ """Sequence for execution : prepare / execute / postfix."""
1077
+ self._status = True
1078
+
1079
+ # Get instance shorcuts to context and system objects
1080
+ self.ticket = vortex.sessions.current()
1081
+ self.context = self.ticket.context
1082
+ self.system = self.context.system
1083
+ self.target = kw.pop('target', None)
1084
+ if self.target is None:
1085
+ self.target = self.system.default_target
1086
+
1087
+ # Before trying to do anything, check the executable
1088
+ if not self.valid_executable(rh):
1089
+ logger.warning('Resource %s is not a valid executable', rh.resource)
1090
+ return False
1091
+
1092
+ # A cloned environment will be bound to the OS
1093
+ self.env = self.context.env.clone()
1094
+ with self.env:
1095
+
1096
+ # The actual "run" recipe
1097
+ self.prepare(rh, kw) # 1
1098
+ self.fsstamp(kw) # 2
1099
+ try:
1100
+ self.execute(rh, kw) # 3
1101
+ except Exception as e:
1102
+ self.fail_execute(e, rh, kw) # 3.1
1103
+ raise
1104
+ finally:
1105
+ self.execute_finalise(kw) # 3.2
1106
+ self.fscheck(kw) # 4
1107
+ self.postfix(rh, kw) # 5
1108
+ self.dumplog(kw) # 6
1109
+ self.delayed_exceptions(kw) # 7
1110
+
1111
+ # Free local references
1112
+ self.env = None
1113
+ self.system = None
1114
+
1115
+ return self._status
1116
+
1117
+ def quickview(self, nb=0, indent=0):
1118
+ """Standard glance to objects."""
1119
+ tab = ' ' * indent
1120
+ print('{}{:02d}. {:s}'.format(tab, nb, repr(self)))
1121
+ for subobj in ('kind', 'engine', 'interpreter'):
1122
+ obj = getattr(self, subobj, None)
1123
+ if obj:
1124
+ print('{} {:s}: {!s}'.format(tab, subobj, obj))
1125
+ print()
1126
+
1127
+ def setlink(self, initrole=None, initkind=None, initname=None, inittest=lambda x: True):
1128
+ """Set a symbolic link for actual resource playing defined role."""
1129
+ initsec = [
1130
+ x for x in self.context.sequence.effective_inputs(role=initrole, kind=initkind)
1131
+ if inittest(x.rh)
1132
+ ]
1133
+
1134
+ if not initsec:
1135
+ logger.warning(
1136
+ 'Could not find logical role %s with kind %s - assuming already renamed',
1137
+ initrole, initkind
1138
+ )
1139
+
1140
+ if len(initsec) > 1:
1141
+ logger.warning('More than one role %s with kind %s',
1142
+ initrole, initkind)
1143
+
1144
+ if initname is not None:
1145
+ for l in [x.rh.container.localpath() for x in initsec]:
1146
+ if not self.system.path.exists(initname):
1147
+ self.system.symlink(l, initname)
1148
+ break
1149
+
1150
+ return initsec
1151
+
1152
+
1153
+ class PythonFunction(AlgoComponent):
1154
+ """Execute a function defined in Python module. The function is passed the
1155
+ current :class:`sequence <vortex.layout.dataflow.Sequence>`, as well as a
1156
+ keyword arguments described by attribute ``func_kwargs``. Example:
1157
+
1158
+ .. code-block:: python
1159
+
1160
+ >>> exe = toolbox.executable(
1161
+ ... role = 'Script',
1162
+ ... format = 'ascii',
1163
+ ... hostname = 'localhost',
1164
+ ... kind = 'script',
1165
+ ... language = 'python',
1166
+ ... local = 'module.py',
1167
+ ... remote = '/path/to/module.py',
1168
+ ... tube = 'file',
1169
+ ... )
1170
+ >>> tbalgo = toolbox.algo(
1171
+ ... engine="function",
1172
+ ... func_name="my_plugin_entry_point_function",
1173
+ ... func_kwargs={ntasks: 35, subnproc: 4},
1174
+ ... )
1175
+ >>> tbalgo.run(exe[0])
1176
+
1177
+ .. code-block:: python
1178
+
1179
+ # /path/to/module.py
1180
+ # ...
1181
+ def my_plugin_entry_point_function(
1182
+ sequence, ntasks, subnproc,
1183
+ ):
1184
+ for input in sequence.effective_inputs(role=gridpoint):
1185
+ # ...
1186
+ """
1187
+
1188
+ _footprint = dict(
1189
+ info = "Execute a Python function in a given module",
1190
+
1191
+ attr = dict(
1192
+ engine = dict(
1193
+ values = ["function"]
1194
+ ),
1195
+ func_name = dict(
1196
+ info="The function's name"
1197
+ ),
1198
+ func_kwargs = dict(
1199
+ info=(
1200
+ "A dictionary containing the function's "
1201
+ "keyword arguments"
1202
+ ),
1203
+ type=footprints.FPDict,
1204
+ default=footprints.FPDict({}),
1205
+ optional=True,
1206
+ ),
1207
+ )
1208
+ )
1209
+
1210
+ def prepare(self, rh, opts):
1211
+ spec = importlib.util.spec_from_file_location(
1212
+ name="module", location=rh.container.localpath()
1213
+ )
1214
+ mod = importlib.util.module_from_spec(spec)
1215
+ sys.path.extend(self.extendpypath)
1216
+ try:
1217
+ spec.loader.exec_module(mod)
1218
+ except AttributeError:
1219
+ raise AttributeError
1220
+ self.func = getattr(mod, self.func_name)
1221
+
1222
+ def execute(self, rh, opts):
1223
+ self.func(
1224
+ self.context.sequence, **self.func_kwargs,
1225
+ )
1226
+
1227
+ def execute_finalise(self, opts):
1228
+ for p in self.extendpypath:
1229
+ sys.path.remove(p)
1230
+
1231
+
1232
+ class ExecutableAlgoComponent(AlgoComponent):
1233
+ """Component in charge of running executable resources."""
1234
+
1235
+ _abstract = True
1236
+
1237
+ def valid_executable(self, rh):
1238
+ """
1239
+ Return a boolean value according to the effective executable nature
1240
+ of the resource handler provided.
1241
+ """
1242
+ return rh is not None
1243
+
1244
+
1245
+ class xExecutableAlgoComponent(ExecutableAlgoComponent):
1246
+ """Component in charge of running executable resources."""
1247
+
1248
+ _abstract = True
1249
+
1250
+ def valid_executable(self, rh):
1251
+ """
1252
+ Return a boolean value according to the effective executable nature
1253
+ of the resource handler provided.
1254
+ """
1255
+ rc = super().valid_executable(rh)
1256
+ if rc:
1257
+ # Ensure that the input file is executable
1258
+ xrh = rh if isinstance(rh, (list, tuple)) else [rh, ]
1259
+ for arh in xrh:
1260
+ self.system.xperm(arh.container.localpath(), force=True)
1261
+ return rc
1262
+
1263
+
1264
+ class TaylorRun(AlgoComponent):
1265
+ """
1266
+ Run any taylorism Worker in the current environment.
1267
+
1268
+ This abstract class includes helpers to use the taylorism package in order
1269
+ to introduce an external parallelisation. It is designed to work well with a
1270
+ taylorism Worker class that inherits from
1271
+ :class:`vortex.tools.parallelism.TaylorVortexWorker`.
1272
+ """
1273
+
1274
+ _abstract = True
1275
+ _footprint = dict(
1276
+ info = 'Abstract algo component based on the taylorism package.',
1277
+ attr = dict(
1278
+ kind = dict(),
1279
+ verbose = dict(
1280
+ info = 'Run in verbose mode',
1281
+ type = bool,
1282
+ default = False,
1283
+ optional = True,
1284
+ doc_zorder = -50,
1285
+ ),
1286
+ ntasks = dict(
1287
+ info = 'The maximum number of parallel tasks',
1288
+ type = int,
1289
+ default = DelayedEnvValue('VORTEX_SUBMIT_TASKS', 1),
1290
+ optional = True
1291
+ ),
1292
+ )
1293
+ )
1294
+
1295
+ def __init__(self, *kargs, **kwargs):
1296
+ super().__init__(*kargs, **kwargs)
1297
+ self._boss = None
1298
+
1299
+ def _default_common_instructions(self, rh, opts):
1300
+ """Create a common instruction dictionary that will be used by the workers."""
1301
+ return dict(kind=self.kind, taskdebug=self.verbose)
1302
+
1303
+ def _default_pre_execute(self, rh, opts):
1304
+ """Various initialisations. In particular it creates the task scheduler (Boss)."""
1305
+ # Start the task scheduler
1306
+ self._boss = Boss(verbose=self.verbose,
1307
+ scheduler=footprints.proxy.scheduler(limit='threads', max_threads=self.ntasks))
1308
+ self._boss.make_them_work()
1309
+
1310
+ def _add_instructions(self, common_i, individual_i):
1311
+ """Give a new set of instructions to the Boss."""
1312
+ self._boss.set_instructions(common_i, individual_i)
1313
+
1314
+ def _default_post_execute(self, rh, opts):
1315
+ """Summarise the results of the various tasks that were run."""
1316
+ logger.info("All the input files were dealt with: now waiting for the parallel processing to finish")
1317
+ self._boss.wait_till_finished()
1318
+ logger.info("The parallel processing has finished. here are the results:")
1319
+ report = self._boss.get_report()
1320
+ prp = ParallelResultParser(self.context)
1321
+ for r in report['workers_report']:
1322
+ rc = prp(r)
1323
+ if isinstance(rc, Exception):
1324
+ self.delayed_exception_add(rc, traceback=False)
1325
+ rc = False
1326
+ self._default_rc_action(rh, opts, r, rc)
1327
+
1328
+ def _default_rc_action(self, rh, opts, report, rc):
1329
+ """How should we process the return code ?"""
1330
+ if not rc:
1331
+ logger.warning("Apparently something went sideways with this task (rc=%s).",
1332
+ str(rc))
1333
+
1334
+ def execute(self, rh, opts):
1335
+ """
1336
+ This should be adapted to your needs...
1337
+
1338
+ A usual sequence is::
1339
+
1340
+ self._default_pre_execute(rh, opts)
1341
+ common_i = self._default_common_instructions(rh, opts)
1342
+ # Update the common instructions
1343
+ common_i.update(dict(someattribute='Toto', ))
1344
+
1345
+ # Your own code here
1346
+
1347
+ # Give some instructions to the boss
1348
+ self._add_instructions(common_i, dict(someattribute=['Toto', ],))
1349
+
1350
+ # Your own code here
1351
+
1352
+ self._default_post_execute(rh, opts)
1353
+
1354
+ """
1355
+ raise NotImplementedError
1356
+
1357
+
1358
+ class Expresso(ExecutableAlgoComponent):
1359
+ """Run a script resource in the good environment."""
1360
+
1361
+ _footprint = dict(
1362
+ info = 'AlgoComponent that simply runs a script',
1363
+ attr = dict(
1364
+ interpreter = dict(
1365
+ info = 'The interpreter needed to run the script.',
1366
+ values = ['current', 'awk', 'ksh', 'bash', 'perl', 'python']
1367
+ ),
1368
+ interpreter_path = dict(
1369
+ info = 'The interpreter command.',
1370
+ optional = True,
1371
+ ),
1372
+ engine = dict(
1373
+ values = ['exec', 'launch']
1374
+ ),
1375
+ )
1376
+ )
1377
+
1378
+ @property
1379
+ def _actual_interpreter(self):
1380
+ """Return the interpreter command."""
1381
+ if self.interpreter == 'current':
1382
+ if self.interpreter_path is not None:
1383
+ raise ValueError("*interpreter=current* and *interpreter_path* attributes are incompatible")
1384
+ return sys.executable
1385
+ else:
1386
+ if self.interpreter_path is None:
1387
+ return self.interpreter
1388
+ else:
1389
+ if self.system.xperm(self.interpreter_path):
1390
+ return self.interpreter_path
1391
+ else:
1392
+ raise AlgoComponentError("The '{:s}' interpreter is not executable"
1393
+ .format(self.interpreter_path))
1394
+
1395
+ def _interpreter_args_fix(self, rh, opts):
1396
+ absexec = self.absexcutable(rh.container.localpath())
1397
+ if self.interpreter == 'awk':
1398
+ return ['-f', absexec]
1399
+ else:
1400
+ return [absexec, ]
1401
+
1402
+ def execute_single(self, rh, opts):
1403
+ """
1404
+ Run the specified resource handler through the current interpreter,
1405
+ using the resource command_line method as args.
1406
+ """
1407
+ # Generic config
1408
+ args = [self._actual_interpreter, ]
1409
+ args.extend(self._interpreter_args_fix(rh, opts))
1410
+ args.extend(self.spawn_command_line(rh))
1411
+ logger.info('Run script %s', args)
1412
+ rh_stdin = self.spawn_stdin(rh)
1413
+ if rh_stdin is not None:
1414
+ plocale = locale.getlocale()[1] or 'ascii'
1415
+ logger.info('Script stdin:\n%s', rh_stdin.read().decode(plocale, 'replace'))
1416
+ rh_stdin.seek(0)
1417
+ # Python path stuff
1418
+ newpypath = ':'.join(self.extendpypath)
1419
+ if 'pythonpath' in self.env:
1420
+ newpypath += ':{:s}'.format(self.env.pythonpath)
1421
+ # launching the program...
1422
+ with self.env.delta_context(pythonpath=newpypath):
1423
+ self.spawn(args, opts, stdin=rh_stdin)
1424
+
1425
+
1426
+ class ParaExpresso(TaylorRun):
1427
+ """
1428
+ Run any script in the current environment.
1429
+
1430
+ This abstract class includes helpers to use the taylorism package in order
1431
+ to introduce an external parallelisation. It is designed to work well with a
1432
+ taylorism Worker class that inherits from
1433
+ :class:`vortex.tools.parallelism.VortexWorkerBlindRun`.
1434
+ """
1435
+
1436
+ _abstract = True
1437
+ _footprint = dict(
1438
+ info = 'AlgoComponent that simply runs a script using the taylorism package.',
1439
+ attr = dict(
1440
+ interpreter = dict(
1441
+ info = 'The interpreter needed to run the script.',
1442
+ values = ['current', 'awk', 'ksh', 'bash', 'perl', 'python']
1443
+ ),
1444
+ engine = dict(
1445
+ values = ['exec', 'launch']
1446
+ ),
1447
+ interpreter_path = dict(
1448
+ info = 'The full path to the interpreter.',
1449
+ optional = True,
1450
+ ),
1451
+ extendpypath = dict(
1452
+ info = "The list of things to be prepended in the python's path.",
1453
+ type = footprints.FPList,
1454
+ default = footprints.FPList([]),
1455
+ optional = True
1456
+ ),
1457
+ )
1458
+ )
1459
+
1460
+ def valid_executable(self, rh):
1461
+ """
1462
+ Return a boolean value according to the effective executable nature
1463
+ of the resource handler provided.
1464
+ """
1465
+ return rh is not None
1466
+
1467
+ def _interpreter_args_fix(self, rh, opts):
1468
+ absexec = self.absexcutable(rh.container.localpath())
1469
+ if self.interpreter == 'awk':
1470
+ return ['-f', absexec]
1471
+ else:
1472
+ return [absexec, ]
1473
+
1474
+ def _default_common_instructions(self, rh, opts):
1475
+ """Create a common instruction dictionary that will be used by the workers."""
1476
+ ddict = super()._default_common_instructions(rh, opts)
1477
+ actual_interpreter = sys.executable if self.interpreter == 'current' else self.interpreter
1478
+ ddict['progname'] = actual_interpreter
1479
+ ddict['progargs'] = footprints.FPList(self._interpreter_args_fix(rh, opts) +
1480
+ self.spawn_command_line(rh))
1481
+ ddict['progenvdelta'] = footprints.FPDict()
1482
+ # Deal with the python path
1483
+ newpypath = ':'.join(self.extendpypath)
1484
+ if 'pythonpath' in self.env:
1485
+ self.env.pythonpath += ':{:s}'.format(newpypath)
1486
+ if newpypath:
1487
+ ddict['progenvdelta']['pythonpath'] = newpypath
1488
+ return ddict
1489
+
1490
+
1491
+ class BlindRun(xExecutableAlgoComponent):
1492
+ """
1493
+ Run any executable resource in the current environment. Mandatory argument is:
1494
+ * engine ( values = blind )
1495
+ """
1496
+
1497
+ _footprint = dict(
1498
+ info = 'AlgoComponent that simply runs a serial binary',
1499
+ attr = dict(
1500
+ engine = dict(
1501
+ values = ['blind']
1502
+ )
1503
+ )
1504
+ )
1505
+
1506
+ def execute_single(self, rh, opts):
1507
+ """
1508
+ Run the specified resource handler as an absolute executable,
1509
+ using the resource command_line method as args.
1510
+ """
1511
+
1512
+ args = [self.absexcutable(rh.container.localpath())]
1513
+ args.extend(self.spawn_command_line(rh))
1514
+ logger.info('BlindRun executable resource %s', args)
1515
+ rh_stdin = self.spawn_stdin(rh)
1516
+ if rh_stdin is not None:
1517
+ plocale = locale.getlocale()[1] or 'ascii'
1518
+ logger.info('BlindRun executable stdin (fileno:%d):\n%s',
1519
+ rh_stdin.fileno(), rh_stdin.read().decode(plocale, 'replace'))
1520
+ rh_stdin.seek(0)
1521
+ self.spawn(args, opts, stdin=rh_stdin)
1522
+
1523
+
1524
+ class ParaBlindRun(TaylorRun):
1525
+ """
1526
+ Run any executable resource (without MPI) in the current environment.
1527
+
1528
+ This abstract class includes helpers to use the taylorism package in order
1529
+ to introduce an external parallelisation. It is designed to work well with a
1530
+ taylorism Worker class that inherits from
1531
+ :class:`vortex.tools.parallelism.VortexWorkerBlindRun`.
1532
+ """
1533
+
1534
+ _abstract = True
1535
+ _footprint = dict(
1536
+ info = 'Abstract AlgoComponent that runs a serial binary using the taylorism package.',
1537
+ attr = dict(
1538
+ engine = dict(
1539
+ values = ['blind']
1540
+ ),
1541
+ taskset = dict(
1542
+ info = "Topology/Method to set up the CPU affinity of the child task.",
1543
+ default = None,
1544
+ optional = True,
1545
+ values = ['{:s}{:s}'.format(t, m)
1546
+ for t in ('raw', 'socketpacked', 'numapacked')
1547
+ for m in ('', '_taskset', '_gomp', '_omp', '_ompverbose')]
1548
+ ),
1549
+ taskset_bsize = dict(
1550
+ info = 'The number of threads used by one task',
1551
+ type = int,
1552
+ default = 1,
1553
+ optional = True
1554
+ ),
1555
+ )
1556
+ )
1557
+
1558
+ def valid_executable(self, rh):
1559
+ """
1560
+ Return a boolean value according to the effective executable nature
1561
+ of the resource handler provided.
1562
+ """
1563
+ rc = rh is not None
1564
+ if rc:
1565
+ # Ensure that the input file is executable
1566
+ xrh = rh if isinstance(rh, (list, tuple)) else [rh, ]
1567
+ for arh in xrh:
1568
+ self.system.xperm(arh.container.localpath(), force=True)
1569
+ return rc
1570
+
1571
+ def _default_common_instructions(self, rh, opts):
1572
+ """Create a common instruction dictionary that will be used by the workers."""
1573
+ ddict = super()._default_common_instructions(rh, opts)
1574
+ ddict['progname'] = self.absexcutable(rh.container.localpath())
1575
+ ddict['progargs'] = footprints.FPList(self.spawn_command_line(rh))
1576
+ ddict['progtaskset'] = self.taskset
1577
+ ddict['progtaskset_bsize'] = self.taskset_bsize
1578
+ return ddict
1579
+
1580
+
1581
+ class Parallel(xExecutableAlgoComponent):
1582
+ """
1583
+ Run a binary launched with MPI support.
1584
+ """
1585
+
1586
+ _footprint = dict(
1587
+ info = 'AlgoComponent that simply runs an MPI binary',
1588
+ attr = dict(
1589
+ engine = dict(
1590
+ values = ['parallel']
1591
+ ),
1592
+ mpitool = dict(
1593
+ info = 'The object used to launch the parallel program',
1594
+ optional = True,
1595
+ type = mpitools.MpiTool,
1596
+ doc_visibility = footprints.doc.visibility.GURU,
1597
+ ),
1598
+ mpiname = dict(
1599
+ info = ('The mpiname of a class in the mpitool collector ' +
1600
+ '(used only if *mpitool* is not provided)'),
1601
+ optional = True,
1602
+ alias = ['mpi'],
1603
+ doc_visibility = footprints.doc.visibility.GURU,
1604
+ ),
1605
+ mpiverbose = dict(
1606
+ info = 'Boost logging verbosity in mpitools',
1607
+ optional = True,
1608
+ default = False,
1609
+ doc_visibility = footprints.doc.visibility.GURU,
1610
+ ),
1611
+ binaries = dict(
1612
+ info = 'List of MpiBinaryDescription objects',
1613
+ optional = True,
1614
+ type = footprints.FPList,
1615
+ doc_visibility = footprints.doc.visibility.GURU,
1616
+ ),
1617
+ binarysingle = dict(
1618
+ info = 'If *binaries* is missing, the default binary role for single binaries',
1619
+ optional = True,
1620
+ default = 'basicsingle',
1621
+ doc_visibility = footprints.doc.visibility.GURU,
1622
+ ),
1623
+ binarymulti = dict(
1624
+ info = 'If *binaries* is missing, the default binary role for multiple binaries',
1625
+ type = footprints.FPList,
1626
+ optional = True,
1627
+ default = footprints.FPList(['basic', ]),
1628
+ doc_visibility = footprints.doc.visibility.GURU,
1629
+ ),
1630
+ )
1631
+ )
1632
+
1633
+ def _mpitool_attributes(self, opts):
1634
+ """Return the dictionary of attributes needed to create the mpitool object."""
1635
+ # Read the appropriate configuration in the target file
1636
+ conf_dict = from_config(section="mpitool")
1637
+ if self.mpiname:
1638
+ conf_dict["mpiname"] = self.mpiname
1639
+ # Make "mpirun" the default mpi command name
1640
+ if "mpiname" not in conf_dict.keys():
1641
+ conf_dict["mpiname"] = "mpirun"
1642
+ possible_attrs = functools.reduce(
1643
+ lambda s, t: s | t,
1644
+ [
1645
+ set(cls._footprint.attr.keys())
1646
+ for cls in footprints.proxy.mpitools
1647
+ ],
1648
+ )
1649
+ nonkeys = set(conf_dict.keys()) - possible_attrs
1650
+ if nonkeys:
1651
+ msg = (
1652
+ "The following keywords are unknown configuration"
1653
+ "keys for section \"mpitool\":\n"
1654
+ )
1655
+
1656
+ raise ValueError(msg + "\n".join(nonkeys))
1657
+ return conf_dict
1658
+
1659
+ def spawn_command_line(self, rh):
1660
+ """Split the shell command line of the resource to be run."""
1661
+ return [super(Parallel, self).spawn_command_line(r) for r in rh]
1662
+
1663
+ def _bootstrap_mpibins_hack(self, bins, rh, opts, use_envelope):
1664
+ return copy.deepcopy(bins)
1665
+
1666
+ def _bootstrap_mpienvelope_hack(self, envelope, rh, opts, mpi):
1667
+ return copy.deepcopy(envelope)
1668
+
1669
+ def _bootstrap_mpienvelope_posthack(self, envelope, rh, opts, mpi):
1670
+ return None
1671
+
1672
+ def _bootstrap_mpitool(self, rh, opts):
1673
+ """Initialise the mpitool object and finds out the command line."""
1674
+
1675
+ # Rh is a list binaries...
1676
+ if not isinstance(rh, collections.abc.Iterable):
1677
+ rh = [rh, ]
1678
+
1679
+ # Find the MPI launcher
1680
+ mpi = self.mpitool
1681
+ if not mpi:
1682
+ mpi = footprints.proxy.mpitool(
1683
+ sysname=self.system.sysname,
1684
+ ** self._mpitool_attributes(opts)
1685
+ )
1686
+ if not mpi:
1687
+ logger.critical('Component %s could not find any mpitool', self.footprint_clsname())
1688
+ raise AttributeError('No valid mpitool attr could be found.')
1689
+
1690
+ # Setup various useful things (env, system, ...)
1691
+ mpi.import_basics(self)
1692
+
1693
+ mpi_opts = opts.get('mpiopts', dict())
1694
+
1695
+ envelope = []
1696
+ use_envelope = 'envelope' in mpi_opts
1697
+ if use_envelope:
1698
+ envelope = mpi_opts.pop('envelope')
1699
+ if envelope == 'auto':
1700
+ blockspec = dict(nn=self.env.get('VORTEX_SUBMIT_NODES', 1), )
1701
+ if 'VORTEX_SUBMIT_TASKS' in self.env:
1702
+ blockspec['nnp'] = self.env.get('VORTEX_SUBMIT_TASKS')
1703
+ else:
1704
+ raise ValueError("when envelope='auto', VORTEX_SUBMIT_TASKS must be set up.")
1705
+ envelope = [blockspec, ]
1706
+ elif isinstance(envelope, dict):
1707
+ envelope = [envelope, ]
1708
+ elif isinstance(envelope, (list, tuple)):
1709
+ pass
1710
+ else:
1711
+ raise AttributeError('Invalid envelope specification')
1712
+ if envelope:
1713
+ envelope_ntasks = sum([d['nn'] * d['nnp'] for d in envelope])
1714
+ if not envelope:
1715
+ use_envelope = False
1716
+
1717
+ if not use_envelope:
1718
+ # Some MPI presets
1719
+ mpi_desc = dict()
1720
+ for mpi_k in ('tasks', 'openmp'):
1721
+ mpi_kenv = 'VORTEX_SUBMIT_' + mpi_k.upper()
1722
+ if mpi_kenv in self.env:
1723
+ mpi_desc[mpi_k] = self.env.get(mpi_kenv)
1724
+
1725
+ # Binaries may be grouped together on the same nodes
1726
+ bin_groups = mpi_opts.pop('groups', [])
1727
+
1728
+ # Find out the command line
1729
+ bargs = self.spawn_command_line(rh)
1730
+
1731
+ # Potential Source files
1732
+ sources = []
1733
+
1734
+ # The usual case: no indications, 1 binary + a potential ioserver
1735
+ if len(rh) == 1 and not self.binaries:
1736
+
1737
+ # In such a case, defining group does not makes sense
1738
+ self.algoassert(not bin_groups,
1739
+ "With only one binary, groups should not be defined")
1740
+
1741
+ # The main program
1742
+ allowbind = mpi_opts.pop('allowbind', True)
1743
+ distribution = mpi_opts.pop('distribution',
1744
+ self.env.get('VORTEX_MPIBIN_DEF_DISTRIBUTION', None))
1745
+ if use_envelope:
1746
+ master = footprints.proxy.mpibinary(
1747
+ kind=self.binarysingle,
1748
+ ranks=envelope_ntasks,
1749
+ openmp=self.env.get('VORTEX_SUBMIT_OPENMP', None),
1750
+ allowbind=allowbind,
1751
+ distribution=distribution)
1752
+ else:
1753
+ master = footprints.proxy.mpibinary(
1754
+ kind=self.binarysingle,
1755
+ nodes=self.env.get('VORTEX_SUBMIT_NODES', 1),
1756
+ allowbind=allowbind,
1757
+ distribution=distribution,
1758
+ **mpi_desc)
1759
+ master.options = mpi_opts
1760
+ master.master = self.absexcutable(rh[0].container.localpath())
1761
+ master.arguments = bargs[0]
1762
+ bins = [master, ]
1763
+ # Source files ?
1764
+ if hasattr(rh[0].resource, 'guess_binary_sources'):
1765
+ sources.extend(rh[0].resource.guess_binary_sources(rh[0].provider))
1766
+
1767
+ # Multiple binaries are to be launched: no IO server support here.
1768
+ elif len(rh) > 1 and not self.binaries:
1769
+
1770
+ # Binary roles
1771
+ if len(self.binarymulti) == 1:
1772
+ bnames = self.binarymulti * len(rh)
1773
+ else:
1774
+ if len(self.binarymulti) != len(rh):
1775
+ raise ParallelInconsistencyAlgoComponentError("self.binarymulti")
1776
+ bnames = self.binarymulti
1777
+
1778
+ # Check mpiopts shape
1779
+ for k, v in mpi_opts.items():
1780
+ if not isinstance(v, collections.abc.Iterable):
1781
+ raise ValueError('In such a case, mpiopts must be Iterable')
1782
+ if len(v) != len(rh):
1783
+ raise ParallelInconsistencyAlgoComponentError('mpiopts[{:s}]'.format(k))
1784
+ # Check bin_group shape
1785
+ if bin_groups:
1786
+ if len(bin_groups) != len(rh):
1787
+ raise ParallelInconsistencyAlgoComponentError('bin_group')
1788
+
1789
+ # Create MpiBinaryDescription objects
1790
+ bins = list()
1791
+ allowbinds = mpi_opts.pop('allowbind', [True, ] * len(rh))
1792
+ distributions = mpi_opts.pop('distribution',
1793
+ [self.env.get('VORTEX_MPIBIN_DEF_DISTRIBUTION', None), ]
1794
+ * len(rh))
1795
+ for i, r in enumerate(rh):
1796
+ if use_envelope:
1797
+ bins.append(
1798
+ footprints.proxy.mpibinary(
1799
+ kind=bnames[i],
1800
+ allowbind=allowbinds[i],
1801
+ distribution=distributions[i],
1802
+ )
1803
+ )
1804
+ else:
1805
+ bins.append(
1806
+ footprints.proxy.mpibinary(
1807
+ kind=bnames[i],
1808
+ nodes=self.env.get('VORTEX_SUBMIT_NODES', 1),
1809
+ allowbind=allowbinds[i],
1810
+ distribution=distributions[i],
1811
+ **mpi_desc
1812
+ )
1813
+ )
1814
+ # Reshape mpiopts
1815
+ bins[i].options = {k: v[i] for k, v in mpi_opts.items()}
1816
+ if bin_groups:
1817
+ bins[i].group = bin_groups[i]
1818
+ bins[i].master = self.absexcutable(r.container.localpath())
1819
+ bins[i].arguments = bargs[i]
1820
+ # Source files ?
1821
+ if hasattr(r.resource, 'guess_binary_sources'):
1822
+ sources.extend(r.resource.guess_binary_sources(r.provider))
1823
+
1824
+ # Nothing to do: binary descriptions are provided by the user
1825
+ else:
1826
+ if len(self.binaries) != len(rh):
1827
+ raise ParallelInconsistencyAlgoComponentError("self.binaries")
1828
+ bins = self.binaries
1829
+ for i, r in enumerate(rh):
1830
+ bins[i].master = self.absexcutable(r.container.localpath())
1831
+ bins[i].arguments = bargs[i]
1832
+
1833
+ # The global envelope
1834
+ envelope = self._bootstrap_mpienvelope_hack(envelope, rh, opts, mpi)
1835
+ if envelope:
1836
+ mpi.envelope = envelope
1837
+
1838
+ # The binaries description
1839
+ mpi.binaries = self._bootstrap_mpibins_hack(bins, rh, opts, use_envelope)
1840
+ upd_envelope = self._bootstrap_mpienvelope_posthack(envelope, rh, opts, mpi)
1841
+ if upd_envelope:
1842
+ mpi.envelope = upd_envelope
1843
+
1844
+ # The source files
1845
+ mpi.sources = sources
1846
+
1847
+ if envelope:
1848
+ # Check the consistency between nranks and the total number of processes
1849
+ envelope_ntasks = sum([d.nprocs for d in mpi.envelope])
1850
+ mpibins_total = sum([m.nprocs for m in mpi.binaries])
1851
+ if not envelope_ntasks == mpibins_total:
1852
+ raise AlgoComponentError(
1853
+ ("The number of requested ranks ({:d}) must be equal "
1854
+ "to the number of processes available in the envelope ({:d})").
1855
+ format(mpibins_total, envelope_ntasks))
1856
+
1857
+ args = mpi.mkcmdline()
1858
+ for b in mpi.binaries:
1859
+ logger.info('Run %s in parallel mode. Args: %s.', b.master, ' '.join(b.arguments))
1860
+ logger.info('Full MPI command line: %s', ' '.join(args))
1861
+
1862
+ # Setup various useful things (env, system, ...)
1863
+ mpi.import_basics(self)
1864
+
1865
+ return mpi, args
1866
+
1867
+ @contextlib.contextmanager
1868
+ def _tweak_mpitools_logging(self):
1869
+ if self.mpiverbose:
1870
+ m_loggers = dict()
1871
+ for m_logger_name in [l for l in loggers.lognames if 'mpitools' in l]:
1872
+ m_logger = loggers.getLogger(m_logger_name)
1873
+ m_loggers[m_logger] = m_logger.level
1874
+ m_logger.setLevel(logging.DEBUG)
1875
+ try:
1876
+ yield
1877
+ finally:
1878
+ for m_logger, prev_level in m_loggers.items():
1879
+ m_logger.setLevel(prev_level)
1880
+ else:
1881
+ yield
1882
+
1883
+ def execute_single(self, rh, opts):
1884
+ """Run the specified resource handler through the `mpitool` launcher
1885
+
1886
+ An argument named `mpiopts` could be provided as a dictionary: it may
1887
+ contain indications on the number of nodes, tasks, ...
1888
+ """
1889
+
1890
+ self.system.subtitle('{:s} : parallel engine'.format(self.realkind))
1891
+
1892
+ with self._tweak_mpitools_logging():
1893
+
1894
+ # Return a mpitool object and the mpicommand line
1895
+ mpi, args = self._bootstrap_mpitool(rh, opts)
1896
+
1897
+ # Specific parallel settings
1898
+ mpi.setup(opts)
1899
+
1900
+ # This is actual running command
1901
+ self.spawn(args, opts)
1902
+
1903
+ # Specific parallel cleaning
1904
+ mpi.clean(opts)
1905
+
1906
+
1907
+ @algo_component_deco_mixin_autodoc
1908
+ class ParallelIoServerMixin(AlgoComponentMpiDecoMixin):
1909
+ """Adds an IOServer capabilities (footprints attributes + MPI bianries alteration)."""
1910
+
1911
+ _MIXIN_EXTRA_FOOTPRINTS = [footprints.Footprint(
1912
+ info="Abstract IoServer footprints' attributes.",
1913
+ attr=dict(
1914
+ ioserver=dict(
1915
+ info='The object used to launch the IOserver part of the binary.',
1916
+ type=mpitools.MpiBinaryIOServer,
1917
+ optional=True,
1918
+ default=None,
1919
+ doc_visibility=footprints.doc.visibility.GURU,
1920
+ ),
1921
+ ioname=dict(
1922
+ info=('The binary_kind of a class in the mpibinary collector ' +
1923
+ '(used only if *ioserver* is not provided)'),
1924
+ optional=True,
1925
+ default='ioserv',
1926
+ doc_visibility=footprints.doc.visibility.GURU,
1927
+ ),
1928
+ iolocation=dict(
1929
+ info='Location of the IO server within the binary list',
1930
+ type=int,
1931
+ default=-1,
1932
+ optional=True
1933
+ )
1934
+ )),
1935
+ ]
1936
+
1937
+ def _bootstrap_mpibins_ioserver_hack(self, bins, bins0, rh, opts, use_envelope):
1938
+ """If requested, adds an extra binary that will act as an IOServer."""
1939
+ master = bins[-1]
1940
+ # A potential IO server
1941
+ io = self.ioserver
1942
+ if not io and int(self.env.get('VORTEX_IOSERVER_NODES', -1)) >= 0:
1943
+ io = footprints.proxy.mpibinary(
1944
+ kind=self.ioname,
1945
+ nodes=self.env.VORTEX_IOSERVER_NODES,
1946
+ tasks=(self.env.VORTEX_IOSERVER_TASKS or
1947
+ master.options.get('nnp', master.tasks)),
1948
+ openmp=(self.env.VORTEX_IOSERVER_OPENMP or
1949
+ master.options.get('openmp', master.openmp)),
1950
+ iolocation=self.iolocation)
1951
+ io.options = {x[3:]: opts[x]
1952
+ for x in opts.keys() if x.startswith('io_')}
1953
+ io.master = master.master
1954
+ io.arguments = master.arguments
1955
+ if not io and int(self.env.get('VORTEX_IOSERVER_COMPANION_TASKS', -1)) >= 0:
1956
+ io = footprints.proxy.mpibinary(
1957
+ kind=self.ioname,
1958
+ nodes=master.options.get('nn', master.nodes),
1959
+ tasks=self.env.VORTEX_IOSERVER_COMPANION_TASKS,
1960
+ openmp=(self.env.VORTEX_IOSERVER_OPENMP or
1961
+ master.options.get('openmp', master.openmp)))
1962
+ io.options = {x[3:]: opts[x]
1963
+ for x in opts.keys() if x.startswith('io_')}
1964
+ io.master = master.master
1965
+ io.arguments = master.arguments
1966
+ if master.group is not None:
1967
+ # The master binary is already in a group ! Use it.
1968
+ io.group = master.group
1969
+ else:
1970
+ io.group = 'auto_masterwithio'
1971
+ master.group = 'auto_masterwithio'
1972
+ if not io and self.env.get('VORTEX_IOSERVER_INCORE_TASKS', None) is not None:
1973
+ if hasattr(master, 'incore_iotasks'):
1974
+ master.incore_iotasks = self.env.VORTEX_IOSERVER_INCORE_TASKS
1975
+ if not io and self.env.get('VORTEX_IOSERVER_INCORE_FIXER', None) is not None:
1976
+ if hasattr(master, 'incore_iotasks_fixer'):
1977
+ master.incore_iotasks_fixer = self.env.VORTEX_IOSERVER_INCORE_FIXER
1978
+ if not io and self.env.get('VORTEX_IOSERVER_INCORE_DIST', None) is not None:
1979
+ if hasattr(master, 'incore_iodist'):
1980
+ master.incore_iodist = self.env.VORTEX_IOSERVER_INCORE_DIST
1981
+ if io:
1982
+ rh.append(rh[0])
1983
+ if master.group is None:
1984
+ if 'nn' in master.options:
1985
+ master.options['nn'] = master.options['nn'] - io.options['nn']
1986
+ else:
1987
+ logger.warning('The "nn" option is not available in the master binary ' +
1988
+ 'mpi options. Consequently it can be fixed...')
1989
+ if self.iolocation >= 0:
1990
+ bins.insert(self.iolocation, io)
1991
+ else:
1992
+ bins.append(io)
1993
+ return bins
1994
+
1995
+ _MIXIN_MPIBINS_HOOKS = (_bootstrap_mpibins_ioserver_hack, )
1996
+
1997
+
1998
+ @algo_component_deco_mixin_autodoc
1999
+ class ParallelOpenPalmMixin(AlgoComponentMpiDecoMixin):
2000
+ """Class mixin to be used with OpenPALM programs.
2001
+
2002
+ It will automatically add the OpenPALM driver binary to the list of
2003
+ binaries. The location of the OpenPALM driver should be automatically
2004
+ detected provided that a section with ``role=OpenPALM Driver`` lies in the
2005
+ input's sequence. Alternatively, the path to the OpenPALM driver can be
2006
+ provided using the **openpalm_driver** footprint's argument.
2007
+ """
2008
+
2009
+ _MIXIN_EXTRA_FOOTPRINTS = [footprints.Footprint(
2010
+ info="Abstract OpenPALM footprints' attributes.",
2011
+ attr=dict(
2012
+ openpalm_driver=dict(
2013
+ info=('The path to the OpenPALM driver binary. ' +
2014
+ 'When omitted, the input sequence is looked up ' +
2015
+ 'for section with ``role=OpenPALM Driver``.'),
2016
+ optional=True,
2017
+ doc_visibility=footprints.doc.visibility.ADVANCED,
2018
+ ),
2019
+ openpalm_overcommit=dict(
2020
+ info=('Run the OpenPALM driver on the first node in addition ' +
2021
+ 'to existing tasks. Otherwise dedicated tasks are used.'),
2022
+ type=bool,
2023
+ default=True,
2024
+ optional=True,
2025
+ doc_visibility=footprints.doc.visibility.ADVANCED,
2026
+ ),
2027
+ openpalm_binddriver=dict(
2028
+ info='Try to bind the OpenPALM driver binary.',
2029
+ type=bool,
2030
+ optional=True,
2031
+ default=False,
2032
+ doc_visibility=footprints.doc.visibility.ADVANCED,
2033
+ ),
2034
+ openpalm_binkind=dict(
2035
+ info='The binary kind for the OpenPALM driver.',
2036
+ optional=True,
2037
+ default='basic',
2038
+ doc_visibility=footprints.doc.visibility.GURU,
2039
+ ),
2040
+ )),
2041
+ ]
2042
+
2043
+ @property
2044
+ def _actual_openpalm_driver(self):
2045
+ """Returns the OpenPALM's driver location."""
2046
+ path = self.openpalm_driver
2047
+ if path is None:
2048
+ drivers = self.context.sequence.effective_inputs(role='OpenPALMDriver')
2049
+ if not drivers:
2050
+ raise AlgoComponentError('No OpenPALM driver was provided.')
2051
+ elif len(drivers) > 1:
2052
+ raise AlgoComponentError('Several OpenPALM driver were provided.')
2053
+ path = drivers[0].rh.container.localpath()
2054
+ else:
2055
+ if not self.system.path.exists(path):
2056
+ raise AlgoComponentError('No OpenPALM driver was provider ({:s} does not exists).'
2057
+ .format(path))
2058
+ return path
2059
+
2060
+ def _bootstrap_mpibins_openpalm_hack(self, bins, bins0, rh, opts, use_envelope):
2061
+ """Adds the OpenPALM driver to the binary list."""
2062
+ single_bin = len(bins) == 1
2063
+ master = bins[0]
2064
+ driver = footprints.proxy.mpibinary(
2065
+ kind=self.openpalm_binkind,
2066
+ nodes=1,
2067
+ tasks=self.env.VORTEX_OPENPALM_DRV_TASKS or 1,
2068
+ openmp=self.env.VORTEX_OPENPALM_DRV_OPENMP or 1,
2069
+ allowbind=opts.pop('palmdrv_bind',
2070
+ self.env.get('VORTEX_OPENPALM_DRV_BIND',
2071
+ self.openpalm_binddriver)),
2072
+ )
2073
+ driver.options = {x[8:]: opts[x]
2074
+ for x in opts.keys() if x.startswith('palmdrv_')}
2075
+ driver.master = self._actual_openpalm_driver
2076
+ self.system.xperm(driver.master, force=True)
2077
+ bins.insert(0, driver)
2078
+ if not self.openpalm_overcommit and single_bin:
2079
+ # Tweak the number of tasks of the master program in order to accommodate
2080
+ # the driver
2081
+ # NB: If multiple binaries are provided, the user must do this by
2082
+ # himself (i.e. leave enough room for the driver's task).
2083
+ if 'nn' in master.options:
2084
+ master.options['nn'] = master.options['nn'] - 1
2085
+ else:
2086
+ # Ok, tweak nprocs instead (an envelope might be defined)
2087
+ try:
2088
+ nprocs = master.nprocs
2089
+ except mpitools.MpiException:
2090
+ logger.error('Neither the "nn" option nor the nprocs is ' +
2091
+ 'available for the master binary. Consequently ' +
2092
+ 'it can be fixed...')
2093
+ else:
2094
+ master.options['np'] = nprocs - driver.nprocs
2095
+ return bins
2096
+
2097
+ _MIXIN_MPIBINS_HOOKS = (_bootstrap_mpibins_openpalm_hack, )
2098
+
2099
+ def _bootstrap_mpienvelope_openpalm_posthack(self, env, env0, rh, opts, mpi):
2100
+ """
2101
+ Tweak the MPI envelope in order to execute the OpenPALM driver on the
2102
+ appropriate node.
2103
+ """
2104
+ master = mpi.binaries[1] # The first "real" program that will be launched
2105
+ driver = mpi.binaries[0] # The OpenPALM driver
2106
+ if self.openpalm_overcommit:
2107
+ # Execute the driver on the first compute node
2108
+ if env or env0:
2109
+ env = env or copy.deepcopy(env0)
2110
+ # An envelope is already defined... update it
2111
+ if not ('nn' in env[0] and 'nnp' in env[0]):
2112
+ raise AlgoComponentError("'nn' and 'nnp' must be defined in the envelope")
2113
+ if env[0]['nn'] > 1:
2114
+ env[0]['nn'] -= 1
2115
+ newenv = copy.copy(env[0])
2116
+ newenv['nn'] = 1
2117
+ newenv['nnp'] += driver.nprocs
2118
+ env.insert(0, newenv)
2119
+ else:
2120
+ env[0]['nnp'] += driver.nprocs
2121
+ else:
2122
+ # Setup a new envelope
2123
+ if not ('nn' in master.options and 'nnp' in master.options):
2124
+ raise AlgoComponentError("'nn' and 'nnp' must be defined for the master executable")
2125
+ env = [dict(nn=1,
2126
+ nnp=master.options['nnp'] + driver.nprocs,
2127
+ openmp=master.options.get('openmp', 1))]
2128
+ if master.options['nn'] > 1:
2129
+ env.append(dict(nn=master.options['nn'] - 1,
2130
+ nnp=master.options['nnp'],
2131
+ openmp=master.options.get('openmp', 1)))
2132
+ if len(mpi.binaries) > 2:
2133
+ env.extend([b.options for b in mpi.binaries[2:]])
2134
+ return env
2135
+
2136
+ _MIXIN_MPIENVELOPE_POSTHOOKS = (_bootstrap_mpienvelope_openpalm_posthack, )