vortex-nwp 2.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. vortex/__init__.py +135 -0
  2. vortex/algo/__init__.py +12 -0
  3. vortex/algo/components.py +2136 -0
  4. vortex/algo/mpitools.py +1648 -0
  5. vortex/algo/mpitools_templates/envelope_wrapper_default.tpl +27 -0
  6. vortex/algo/mpitools_templates/envelope_wrapper_mpiauto.tpl +29 -0
  7. vortex/algo/mpitools_templates/wrapstd_wrapper_default.tpl +18 -0
  8. vortex/algo/serversynctools.py +170 -0
  9. vortex/config.py +115 -0
  10. vortex/data/__init__.py +13 -0
  11. vortex/data/abstractstores.py +1572 -0
  12. vortex/data/containers.py +780 -0
  13. vortex/data/contents.py +596 -0
  14. vortex/data/executables.py +284 -0
  15. vortex/data/flow.py +113 -0
  16. vortex/data/geometries.ini +2689 -0
  17. vortex/data/geometries.py +703 -0
  18. vortex/data/handlers.py +1021 -0
  19. vortex/data/outflow.py +67 -0
  20. vortex/data/providers.py +465 -0
  21. vortex/data/resources.py +201 -0
  22. vortex/data/stores.py +1271 -0
  23. vortex/gloves.py +282 -0
  24. vortex/layout/__init__.py +27 -0
  25. vortex/layout/appconf.py +109 -0
  26. vortex/layout/contexts.py +511 -0
  27. vortex/layout/dataflow.py +1069 -0
  28. vortex/layout/jobs.py +1276 -0
  29. vortex/layout/monitor.py +833 -0
  30. vortex/layout/nodes.py +1424 -0
  31. vortex/layout/subjobs.py +464 -0
  32. vortex/nwp/__init__.py +11 -0
  33. vortex/nwp/algo/__init__.py +12 -0
  34. vortex/nwp/algo/assim.py +483 -0
  35. vortex/nwp/algo/clim.py +920 -0
  36. vortex/nwp/algo/coupling.py +609 -0
  37. vortex/nwp/algo/eda.py +632 -0
  38. vortex/nwp/algo/eps.py +613 -0
  39. vortex/nwp/algo/forecasts.py +745 -0
  40. vortex/nwp/algo/fpserver.py +927 -0
  41. vortex/nwp/algo/ifsnaming.py +403 -0
  42. vortex/nwp/algo/ifsroot.py +311 -0
  43. vortex/nwp/algo/monitoring.py +202 -0
  44. vortex/nwp/algo/mpitools.py +554 -0
  45. vortex/nwp/algo/odbtools.py +974 -0
  46. vortex/nwp/algo/oopsroot.py +735 -0
  47. vortex/nwp/algo/oopstests.py +186 -0
  48. vortex/nwp/algo/request.py +579 -0
  49. vortex/nwp/algo/stdpost.py +1285 -0
  50. vortex/nwp/data/__init__.py +12 -0
  51. vortex/nwp/data/assim.py +392 -0
  52. vortex/nwp/data/boundaries.py +261 -0
  53. vortex/nwp/data/climfiles.py +539 -0
  54. vortex/nwp/data/configfiles.py +149 -0
  55. vortex/nwp/data/consts.py +929 -0
  56. vortex/nwp/data/ctpini.py +133 -0
  57. vortex/nwp/data/diagnostics.py +181 -0
  58. vortex/nwp/data/eda.py +148 -0
  59. vortex/nwp/data/eps.py +383 -0
  60. vortex/nwp/data/executables.py +1039 -0
  61. vortex/nwp/data/fields.py +96 -0
  62. vortex/nwp/data/gridfiles.py +308 -0
  63. vortex/nwp/data/logs.py +551 -0
  64. vortex/nwp/data/modelstates.py +334 -0
  65. vortex/nwp/data/monitoring.py +220 -0
  66. vortex/nwp/data/namelists.py +644 -0
  67. vortex/nwp/data/obs.py +748 -0
  68. vortex/nwp/data/oopsexec.py +72 -0
  69. vortex/nwp/data/providers.py +182 -0
  70. vortex/nwp/data/query.py +217 -0
  71. vortex/nwp/data/stores.py +147 -0
  72. vortex/nwp/data/surfex.py +338 -0
  73. vortex/nwp/syntax/__init__.py +9 -0
  74. vortex/nwp/syntax/stdattrs.py +375 -0
  75. vortex/nwp/tools/__init__.py +10 -0
  76. vortex/nwp/tools/addons.py +35 -0
  77. vortex/nwp/tools/agt.py +55 -0
  78. vortex/nwp/tools/bdap.py +48 -0
  79. vortex/nwp/tools/bdcp.py +38 -0
  80. vortex/nwp/tools/bdm.py +21 -0
  81. vortex/nwp/tools/bdmp.py +49 -0
  82. vortex/nwp/tools/conftools.py +1311 -0
  83. vortex/nwp/tools/drhook.py +62 -0
  84. vortex/nwp/tools/grib.py +268 -0
  85. vortex/nwp/tools/gribdiff.py +99 -0
  86. vortex/nwp/tools/ifstools.py +163 -0
  87. vortex/nwp/tools/igastuff.py +249 -0
  88. vortex/nwp/tools/mars.py +56 -0
  89. vortex/nwp/tools/odb.py +548 -0
  90. vortex/nwp/tools/partitioning.py +234 -0
  91. vortex/nwp/tools/satrad.py +56 -0
  92. vortex/nwp/util/__init__.py +6 -0
  93. vortex/nwp/util/async.py +184 -0
  94. vortex/nwp/util/beacon.py +40 -0
  95. vortex/nwp/util/diffpygram.py +359 -0
  96. vortex/nwp/util/ens.py +198 -0
  97. vortex/nwp/util/hooks.py +128 -0
  98. vortex/nwp/util/taskdeco.py +81 -0
  99. vortex/nwp/util/usepygram.py +591 -0
  100. vortex/nwp/util/usetnt.py +87 -0
  101. vortex/proxy.py +6 -0
  102. vortex/sessions.py +341 -0
  103. vortex/syntax/__init__.py +9 -0
  104. vortex/syntax/stdattrs.py +628 -0
  105. vortex/syntax/stddeco.py +176 -0
  106. vortex/toolbox.py +982 -0
  107. vortex/tools/__init__.py +11 -0
  108. vortex/tools/actions.py +457 -0
  109. vortex/tools/addons.py +297 -0
  110. vortex/tools/arm.py +76 -0
  111. vortex/tools/compression.py +322 -0
  112. vortex/tools/date.py +20 -0
  113. vortex/tools/ddhpack.py +10 -0
  114. vortex/tools/delayedactions.py +672 -0
  115. vortex/tools/env.py +513 -0
  116. vortex/tools/folder.py +663 -0
  117. vortex/tools/grib.py +559 -0
  118. vortex/tools/lfi.py +746 -0
  119. vortex/tools/listings.py +354 -0
  120. vortex/tools/names.py +575 -0
  121. vortex/tools/net.py +1790 -0
  122. vortex/tools/odb.py +10 -0
  123. vortex/tools/parallelism.py +336 -0
  124. vortex/tools/prestaging.py +186 -0
  125. vortex/tools/rawfiles.py +10 -0
  126. vortex/tools/schedulers.py +413 -0
  127. vortex/tools/services.py +871 -0
  128. vortex/tools/storage.py +1061 -0
  129. vortex/tools/surfex.py +61 -0
  130. vortex/tools/systems.py +3396 -0
  131. vortex/tools/targets.py +384 -0
  132. vortex/util/__init__.py +9 -0
  133. vortex/util/config.py +1071 -0
  134. vortex/util/empty.py +24 -0
  135. vortex/util/helpers.py +184 -0
  136. vortex/util/introspection.py +63 -0
  137. vortex/util/iosponge.py +76 -0
  138. vortex/util/roles.py +51 -0
  139. vortex/util/storefunctions.py +103 -0
  140. vortex/util/structs.py +26 -0
  141. vortex/util/worker.py +150 -0
  142. vortex_nwp-2.0.0b1.dist-info/LICENSE +517 -0
  143. vortex_nwp-2.0.0b1.dist-info/METADATA +50 -0
  144. vortex_nwp-2.0.0b1.dist-info/RECORD +146 -0
  145. vortex_nwp-2.0.0b1.dist-info/WHEEL +5 -0
  146. vortex_nwp-2.0.0b1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1648 @@
1
+ """
2
+ This package handles MPI interface objects responsible of parallel executions.
3
+ :class:`MpiTool` and :class:`MpiBinaryDescription` objects use the
4
+ :mod:`footprints` mechanism.
5
+
6
+ A :class:`MpiTool` object is directly related to a concrete MPI implementation: it
7
+ builds the proper command line, update the namelists with relevant MPI parameters
8
+ (for instance, the total number of tasks), update the environment to fit the MPI
9
+ implementation needs, ... It heavily relies on :class:`MpiBinaryDescription`
10
+ objects that describe the settings and behaviours associated with each of the
11
+ binaries that will be launched.
12
+
13
+ Here is a typical use of MpiTools:
14
+
15
+ .. code-block:: python
16
+
17
+ # We will assume that bin0, bin1 are valid executable's Resource Handlers
18
+
19
+ from footprints import proxy as fpx
20
+ import vortex
21
+
22
+ t = vortex.ticket()
23
+
24
+ # Create the mpitool object for a given MPI implementation
25
+ mpitool = fpx.mpitool(sysname=t.system().sysname,
26
+ mpiname='mpirun', # To use Open-MPI's mpirun
27
+ )
28
+ # NB: mpiname='...' may be omitted. In such a case, the VORTEX_MPI_NAME
29
+ # environment variable is used
30
+
31
+ # Create the MPI binaires descriptions
32
+ dbin0 = fpx.mpibinary(kind='basic', nodes=2, tasks=4, openmp=10)
33
+ dbin0.master = bin0.container.localpath()
34
+ dbin1 = fpx.mpibinary(kind='basic', nodes=1, tasks=8, openmp=5)
35
+ dbin1.master = bin1.container.localpath()
36
+
37
+ # Note: the number of nodes, tasks, ... can be overwritten at any time using:
38
+ # dbinX.options = dict(nn=M, nnp=N, openmp=P)
39
+
40
+ # Associate the MPI binaires descriptions to the mpitool object
41
+ mpitool.binaries = [dbin0, dbin1]
42
+
43
+ bargs = ['-test bin0' # Command line arguments for bin0
44
+ '-test bin1' ] # Command line arguments for bin1
45
+ # Build the MPI command line :
46
+ args = mpitool.mkcmdline(bargs)
47
+
48
+ # Setup various usefull things (env, system, ...)
49
+ mpitool.import_basics(an_algo_component_object)
50
+
51
+ # Specific parallel settings (the namelists and environment may be modified here)
52
+ mpitool.setup(dict()) # The dictionary may contain additional options
53
+
54
+ # ...
55
+ # Here you may run the command contained in *args*
56
+ # ...
57
+
58
+ # Specific parallel cleaning
59
+ mpitool.clean(opts)
60
+
61
+ Actually, in real scripts, all of this is carried out by the
62
+ :class:`vortex.algo.components.Parallel` class which saves a lot of hassle.
63
+
64
+ Note: Namelists and environment changes are orchestrated as follows:
65
+ * Changes (if any) are apply be the :class:`MpiTool` object
66
+ * Changes (if any) are apply by each of the :class:`MpiBinaryDescription` objects
67
+ attached to the MpiTool object
68
+
69
+ """
70
+
71
+ import collections
72
+ import collections.abc
73
+ import itertools
74
+ import locale
75
+ import re
76
+ import shlex
77
+
78
+ import footprints
79
+ from bronx.fancies import loggers
80
+ from bronx.syntax.parsing import xlist_strings
81
+ from vortex.config import from_config
82
+ from vortex.tools import env
83
+ from vortex.tools.arm import ArmForgeTool
84
+ from vortex.tools.systems import ExecutionError
85
+ from vortex.util import config
86
+
87
+ #: No automatic export
88
+ __all__ = []
89
+
90
+ logger = loggers.getLogger(__name__)
91
+
92
+
93
+ class MpiException(Exception):
94
+ """Raise an exception in the parallel execution mode."""
95
+ pass
96
+
97
+
98
+ class MpiTool(footprints.FootprintBase):
99
+ """Root class for any :class:`MpiTool` subclass."""
100
+
101
+ _abstract = True
102
+ _collector = ('mpitool', )
103
+ _footprint = dict(
104
+ info = 'MpiTool class in charge of a particular MPI implementation',
105
+ attr = dict(
106
+ sysname = dict(
107
+ info = 'The current OS name (e.g. Linux)',
108
+ ),
109
+ mpiname = dict(
110
+ info = 'The MPI implementation one wishes to use',
111
+ ),
112
+ mpilauncher = dict(
113
+ info = 'The MPI launcher command to be used',
114
+ optional = True
115
+ ),
116
+ mpiopts = dict(
117
+ info = 'Extra arguments for the MPI command',
118
+ optional = True,
119
+ default = ''
120
+ ),
121
+ mpiwrapstd = dict(
122
+ info = "When using the Vortex' global wrapper redirect stderr/stdout",
123
+ type = bool,
124
+ optional = True,
125
+ default = False,
126
+ doc_visibility = footprints.doc.visibility.ADVANCED,
127
+ doc_zorder = -90,
128
+ ),
129
+ mpibind_topology = dict(
130
+ optional = True,
131
+ default = "numapacked",
132
+ doc_visibility = footprints.doc.visibility.ADVANCED,
133
+ doc_zorder = -90,
134
+ ),
135
+ optsep = dict(
136
+ info = 'Separator between MPI options and the program name',
137
+ optional = True,
138
+ default = '--'
139
+ ),
140
+ optprefix = dict(
141
+ info = 'MPI options prefix',
142
+ optional = True,
143
+ default = '--'
144
+ ),
145
+ optmap = dict(
146
+ info = ('Mapping between MpiBinaryDescription objects ' +
147
+ 'internal data and actual command line options'),
148
+ type = footprints.FPDict,
149
+ optional = True,
150
+ default = footprints.FPDict(nn='nn', nnp='nnp', openmp='openmp')
151
+ ),
152
+ binsep = dict(
153
+ info = 'Separator between multiple binary groups',
154
+ optional = True,
155
+ default = '--'
156
+ ),
157
+ basics = dict(
158
+ type = footprints.FPList,
159
+ optional = True,
160
+ default = footprints.FPList(['system', 'env', 'target', 'context', 'ticket', ])
161
+ ),
162
+ bindingmethod = dict(
163
+ info = 'How to bind the MPI processes',
164
+ values = ['vortex', ],
165
+ access = 'rwx',
166
+ optional = True,
167
+ doc_visibility = footprints.doc.visibility.ADVANCED,
168
+ doc_zorder = -90,
169
+ ),
170
+ )
171
+ )
172
+
173
+ _envelope_bit_kind = 'basicenvelopebit'
174
+ _envelope_wrapper_tpl = '@envelope_wrapper_default.tpl'
175
+ _wrapstd_wrapper_tpl = '@wrapstd_wrapper_default.tpl'
176
+ _envelope_wrapper_name = './global_envelope_wrapper.py'
177
+ _wrapstd_wrapper_name = './global_wrapstd_wrapper.py'
178
+ _envelope_rank_var = 'MPIRANK'
179
+ _supports_manual_ranks_mapping = False
180
+ _needs_mpilib_specific_mpienv = True
181
+
182
+ def __init__(self, *args, **kw):
183
+ """After parent initialization, set master, options and basics to undefined."""
184
+ logger.debug('Abstract mpi tool init %s', self.__class__)
185
+ super().__init__(*args, **kw)
186
+ self._launcher = self.mpilauncher or self.generic_mpiname
187
+ self._binaries = []
188
+ self._envelope = []
189
+ self._sources = []
190
+ self._mpilib_data_cache = None
191
+ self._mpilib_identification_cache = None
192
+ self._ranks_map_cache = None
193
+ self._complex_ranks_map = None
194
+ for k in self.basics:
195
+ self.__dict__['_' + k] = None
196
+
197
+ @property
198
+ def realkind(self):
199
+ return 'mpitool'
200
+
201
+ @property
202
+ def generic_mpiname(self):
203
+ return self.mpiname.split('-')[0]
204
+
205
+ def __getattr__(self, key):
206
+ """Have a look to basics values provided by some proxy."""
207
+ if key in self.basics:
208
+ return getattr(self, '_' + key)
209
+ else:
210
+ raise AttributeError('Attribute [%s] is not a basic mpitool attribute' % key)
211
+
212
+ def import_basics(self, obj, attrs=None):
213
+ """Import some current values such as system, env, target and context from provided ``obj``."""
214
+ if attrs is None:
215
+ attrs = self.basics
216
+ for k in [x for x in attrs if x in self.basics and hasattr(obj, x)]:
217
+ setattr(self, '_' + k, getattr(obj, k))
218
+ for bin_obj in self.binaries:
219
+ bin_obj.import_basics(obj, attrs=None)
220
+
221
+ def _get_launcher(self):
222
+ """
223
+ Returns the name of the mpi tool to be used, set from VORTEX_MPI_LAUNCHER
224
+ environment variable, current attribute :attr:`mpiname` or explicit setting.
225
+ """
226
+ return self._launcher
227
+
228
+ def _set_launcher(self, value):
229
+ """Set current launcher mpi name. Should be some special trick, so issue a warning."""
230
+ logger.warning('Setting a new value [%s] to mpi launcher [%s].', value, self)
231
+ self._launcher = value
232
+
233
+ launcher = property(_get_launcher, _set_launcher)
234
+
235
+ def _get_envelope(self):
236
+ """Returns the envelope description."""
237
+ return self._envelope
238
+
239
+ def _valid_envelope(self, value):
240
+ """Tweak the envelope description values."""
241
+ pass
242
+
243
+ def _set_envelope(self, value):
244
+ """Set the envelope description."""
245
+ if not (isinstance(value, collections.abc.Iterable) and
246
+ all([isinstance(b, dict) and
247
+ all([bk in ('nn', 'nnp', 'openmp', 'np') for bk in b.keys()])
248
+ for b in value])):
249
+ raise ValueError('This should be an Iterable of dictionaries.')
250
+ self._valid_envelope(value)
251
+ self._envelope = list()
252
+ for e in value:
253
+ e_bit = footprints.proxy.mpibinary(kind=self._envelope_bit_kind)
254
+ self._envelope_fix_envelope_bit(e_bit, e)
255
+ self._envelope.append(e_bit)
256
+
257
+ envelope = property(_get_envelope, _set_envelope)
258
+
259
+ def _get_binaries(self):
260
+ """Returns the list of :class:`MpiBinaryDescription` objects associated with this instance."""
261
+ return self._binaries
262
+
263
+ def _set_envelope_from_binaries(self):
264
+ """Create an envelope from existing binaries."""
265
+ # Detect possible groups of binaries
266
+ groups = collections.defaultdict(list)
267
+ for a_bin in self.binaries:
268
+ if a_bin.group is not None:
269
+ groups[a_bin.group].append(a_bin)
270
+ new_envelope = list()
271
+ for a_bin in self.binaries:
272
+ if a_bin.group is None:
273
+ # The usual (and easy) case
274
+ new_envelope.append({k: v for k, v in a_bin.options.items()
275
+ if k in ('nn', 'nnp', 'openmp', 'np')})
276
+ elif a_bin.group in groups:
277
+ # Deal with group of binaries
278
+ group = groups.pop(a_bin.group)
279
+ n_nodes = {g_bin.options.get('nn', None) for g_bin in group}
280
+ if None in n_nodes:
281
+ raise ValueError('To build a proper envelope, ' +
282
+ '"nn" needs to be specified in all binaries')
283
+ done_nodes = 0
284
+ for n_node in sorted(n_nodes):
285
+ new_desc = {}
286
+ new_desc['nn'] = n_node - done_nodes
287
+ new_desc['nnp'] = 0
288
+ for g_bin in [g_bin for g_bin in group if g_bin.options['nn'] >= n_node]:
289
+ new_desc['nnp'] += g_bin.options['nnp']
290
+ new_envelope.append(new_desc)
291
+ done_nodes = n_node
292
+ self.envelope = new_envelope
293
+
294
+ def _set_binaries_hack(self, binaries):
295
+ """Perform any action right after the binaries have been setup."""
296
+ pass
297
+
298
+ def _set_binaries_envelope_hack(self, binaries):
299
+ """Tweak the envelope after binaries were setup."""
300
+ pass
301
+
302
+ def _set_binaries(self, value):
303
+ """Set the list of :class:`MpiBinaryDescription` objects associated with this instance."""
304
+ if not (isinstance(value, collections.abc.Iterable) and
305
+ all([isinstance(b, MpiBinary) for b in value])):
306
+ raise ValueError('This should be an Iterable of MpiBinary instances.')
307
+ has_bin_groups = not all([b.group is None for b in value])
308
+ if not (self._supports_manual_ranks_mapping or not has_bin_groups):
309
+ raise ValueError('Binary groups are not supported by this MpiTool class')
310
+ has_bin_distribution = not all([b.distribution is None for b in value])
311
+ if not (self._supports_manual_ranks_mapping or not has_bin_distribution):
312
+ raise ValueError('Binary distribution option is not supported by this MpiTool class')
313
+ self._binaries = value
314
+ if not self.envelope and self.bindingmethod == 'vortex':
315
+ self._set_envelope_from_binaries()
316
+ elif not self.envelope and (has_bin_groups or has_bin_distribution):
317
+ self._set_envelope_from_binaries()
318
+ self._set_binaries_hack(self._binaries)
319
+ if self.envelope:
320
+ self._set_binaries_envelope_hack(self._binaries)
321
+ self._mpilib_data_cache = None
322
+ self._mpilib_identification_cache = None
323
+ self._ranks_map_cache = None
324
+ self._complex_ranks_map = None
325
+
326
+ binaries = property(_get_binaries, _set_binaries)
327
+
328
+ def _mpilib_data(self):
329
+ """From the binaries, try to detect MPI library and mpirun paths."""
330
+ if self._mpilib_data_cache is None:
331
+ mpilib_guesses = ('libmpi.so', 'libmpi_mt.so',
332
+ 'libmpi_dbg.so', 'libmpi_dbg_mt.so')
333
+ shp = self.system.path
334
+ mpilib_data = set()
335
+ for binary in self.binaries:
336
+ # For each binary call ldd...
337
+ mpilib = None
338
+ try:
339
+ binlibs = self.system.ldd(binary.master)
340
+ except (RuntimeError, ValueError):
341
+ # May fail if the 'master' is not a binary
342
+ continue
343
+ for mpilib_guess in mpilib_guesses:
344
+ for l, lp in binlibs.items():
345
+ if l.startswith(mpilib_guess):
346
+ mpilib = lp
347
+ break
348
+ if mpilib:
349
+ break
350
+ if mpilib:
351
+ mpilib = shp.normpath(mpilib)
352
+ mpitoolsdir = None
353
+ mpidir = shp.dirname(shp.dirname(mpilib))
354
+ if shp.exists(shp.join(mpidir, 'bin', 'mpirun')):
355
+ mpitoolsdir = shp.join(mpidir, 'bin')
356
+ if not mpitoolsdir and shp.exists(shp.join(mpidir, '..', 'bin', 'mpirun')):
357
+ mpitoolsdir = shp.normpath(shp.join(mpidir, '..', 'bin'))
358
+ if mpilib and mpitoolsdir:
359
+ mpilib_data.add((shp.realpath(mpilib),
360
+ shp.realpath(mpitoolsdir)))
361
+ # All the binary must use the same library !
362
+ if len(mpilib_data) == 0:
363
+ logger.info('No MPI library was detected.')
364
+ self._mpilib_data_cache = ()
365
+ elif len(mpilib_data) > 1:
366
+ logger.error('Multiple MPI library were detected.')
367
+ self._mpilib_data_cache = ()
368
+ else:
369
+ self._mpilib_data_cache = mpilib_data.pop()
370
+ return self._mpilib_data_cache if self._mpilib_data_cache else None
371
+
372
+ def _mpilib_match_result(self, regex, rclines, which):
373
+ for line in rclines:
374
+ matched = regex.match(line)
375
+ if matched:
376
+ logger.info('MPI implementation detected: %s (%s)',
377
+ which, ' '.join(matched.groups()))
378
+ return [which] + [int(res) for res in matched.groups()]
379
+ return False
380
+
381
+ def _mpilib_identification(self):
382
+ """Try to guess the name and version of the MPI library."""
383
+ if self._mpilib_data() is None:
384
+ return None
385
+ if self._mpilib_identification_cache is None:
386
+ mpi_lib, mpi_tools_dir = self._mpilib_data()
387
+ ld_libs_extra = set()
388
+ sh = self.system
389
+ mpirun_path = sh.path.join(mpi_tools_dir, 'mpirun')
390
+ if sh.path.exists(mpirun_path):
391
+ try:
392
+ libs = sh.ldd(mpirun_path)
393
+ except ExecutionError:
394
+ # This may happen if the mpirun binary is statically linked
395
+ libs = dict()
396
+ if any([libname is None for libname in libs.values()]):
397
+ libscache = dict()
398
+ for binary in self.binaries:
399
+ for lib, libpath in sh.ldd(binary.master).items():
400
+ if libpath:
401
+ libscache[lib] = sh.path.dirname(libpath)
402
+ for missing_lib in [lib for lib, libname in libs.items()
403
+ if libname is None]:
404
+ if missing_lib in libscache:
405
+ ld_libs_extra.add(libscache[missing_lib])
406
+ with self.env.clone() as localenv:
407
+ for libpath in ld_libs_extra:
408
+ localenv.setgenericpath('LD_LIBRARY_PATH', libpath)
409
+ rc = sh.spawn([mpirun_path, '--version'], output=True, fatal=False)
410
+ if rc:
411
+ id_res = self._mpilib_match_result(
412
+ re.compile(r'^.*Intel.*MPI.*Version\s+(\d+)\s+Update\s+(\d+)',
413
+ re.IGNORECASE),
414
+ rc, 'intelmpi')
415
+ id_res = id_res or self._mpilib_match_result(
416
+ re.compile(r'^.*Open\s*MPI.*\s+(\d+)\.(\d+)(?:\.(\d+))?',
417
+ re.IGNORECASE),
418
+ rc, 'openmpi')
419
+ if id_res:
420
+ ld_libs_extra = tuple(sorted(ld_libs_extra))
421
+ self._mpilib_identification_cache = tuple([mpi_lib, mpi_tools_dir, ld_libs_extra] +
422
+ id_res)
423
+ if self._mpilib_identification_cache is None:
424
+ ld_libs_extra = tuple(sorted(ld_libs_extra))
425
+ self._mpilib_identification_cache = (mpi_lib, mpi_tools_dir, ld_libs_extra, 'unknown')
426
+ return self._mpilib_identification_cache
427
+
428
+ def _get_sources(self):
429
+ """Returns a list of directories that may contain source files."""
430
+ return self._sources
431
+
432
+ def _set_sources(self, value):
433
+ """Set the list of of directories that may contain source files."""
434
+ if not isinstance(value, collections.abc.Iterable):
435
+ raise ValueError('This should be an Iterable.')
436
+ self._sources = value
437
+
438
+ sources = property(_get_sources, _set_sources)
439
+
440
+ def _actual_mpiopts(self):
441
+ """The mpiopts string."""
442
+ return self.mpiopts
443
+
444
+ def _reshaped_mpiopts(self):
445
+ """Raw list of mpi tool command line options."""
446
+ klast = None
447
+ options = collections.defaultdict(list)
448
+ for optdef in shlex.split(self._actual_mpiopts()):
449
+ if optdef.startswith('-'):
450
+ optdef = optdef.lstrip('-')
451
+ options[optdef].append([])
452
+ klast = optdef
453
+ elif klast is not None:
454
+ options[klast][-1].append(optdef)
455
+ else:
456
+ raise MpiException('Badly shaped mpi option around {!s}'.format(optdef))
457
+ return options
458
+
459
+ def _hook_binary_mpiopts(self, binary, options):
460
+ """A nasty hook to modify binaries' mpiopts on the fly."""
461
+ return options
462
+
463
+ @property
464
+ def _ranks_mapping(self):
465
+ """When group are defined, associate each MPI rank with a "real" slot."""
466
+ if self._ranks_map_cache is None:
467
+ self._complex_ranks_map = False
468
+ if not self.envelope:
469
+ raise RuntimeError('Ranks mapping should always be used within an envelope.')
470
+ # First deal with bingroups
471
+ ranks_map = dict()
472
+ has_bin_groups = not all([b.group is None for b in self.binaries])
473
+ cursor = 0 # The MPI rank we are currently processing
474
+ if has_bin_groups:
475
+ if not self._supports_manual_ranks_mapping:
476
+ raise RuntimeError('This MpiTool class does not supports ranks mapping.')
477
+ self._complex_ranks_map = True
478
+ cursor0 = 0 # The first available "real" slot
479
+ group_cache = collections.defaultdict(list)
480
+ for a_bin in self.binaries:
481
+ if a_bin.group is None:
482
+ # Easy, the usual case
483
+ reserved = list(range(cursor0, cursor0 + a_bin.nprocs))
484
+ cursor0 += a_bin.nprocs
485
+ else:
486
+ reserved = group_cache.get(a_bin, [])
487
+ if not reserved:
488
+ # It is the first time this group of binaries is seen
489
+ # Find out what are the binaries in this group
490
+ bin_buddies = [bin_b for bin_b in self.binaries
491
+ if bin_b.group == a_bin.group]
492
+ if all(['nn' in bin_b.options for bin_b in bin_buddies]):
493
+ # Each of the binary descriptions should define the number of nodes
494
+ max_nn = max([bin_b.options['nn'] for bin_b in bin_buddies])
495
+ for i_node in range(max_nn):
496
+ for bin_b in bin_buddies:
497
+ if bin_b.options['nn'] > i_node:
498
+ group_cache[bin_b].extend(range(cursor0,
499
+ cursor0 +
500
+ bin_b.options['nnp']))
501
+ cursor0 += bin_b.options['nnp']
502
+ else:
503
+ # If the number of nodes is not defined, revert to the number of tasks.
504
+ # This will probably result in strange results !
505
+ for bin_b in bin_buddies:
506
+ group_cache[bin_b].extend(range(cursor0,
507
+ cursor0 + bin_b.nprocs))
508
+ cursor0 += bin_b.nprocs
509
+ reserved = group_cache[a_bin]
510
+ for rank in range(a_bin.nprocs):
511
+ ranks_map[rank + cursor] = reserved[rank]
512
+ cursor += a_bin.nprocs
513
+ else:
514
+ # Just do nothing...
515
+ for a_bin in self.binaries:
516
+ for rank in range(a_bin.nprocs):
517
+ ranks_map[rank + cursor] = rank + cursor
518
+ cursor += a_bin.nprocs
519
+ # Then deal with distribution
520
+ do_bin_distribution = not all([b.distribution in (None, "continuous")
521
+ for b in self.binaries])
522
+ if self._complex_ranks_map or do_bin_distribution:
523
+ if not self.envelope:
524
+ raise RuntimeError('Ranks mapping shoudl always be used within an envelope.')
525
+ if do_bin_distribution:
526
+ if not self._supports_manual_ranks_mapping:
527
+ raise RuntimeError('This MpiTool class does not supports ranks mapping.')
528
+ self._complex_ranks_map = True
529
+ if all(['nn' in b.options and 'nnp' in b.options for b in self.envelope]):
530
+ # Extract node information
531
+ node_cursor = 0
532
+ nodes_id = list()
533
+ for e_bit in self.envelope:
534
+ for _ in range(e_bit.options['nn']):
535
+ nodes_id.extend([node_cursor, ] * e_bit.options['nnp'])
536
+ node_cursor += 1
537
+ # Re-order ranks given the distribution
538
+ cursor = 0
539
+ for a_bin in self.binaries:
540
+ if a_bin.distribution == "roundrobin":
541
+ # The current list of ranks
542
+ actual_ranks = [ranks_map[i]
543
+ for i in range(cursor, cursor + a_bin.nprocs)]
544
+ # Find the node number associated with each rank
545
+ nodes_dict = collections.defaultdict(collections.deque)
546
+ for rank in actual_ranks:
547
+ nodes_dict[nodes_id[rank]].append(rank)
548
+ # Create a new list of ranks in a round-robin manner
549
+ actual_ranks = list()
550
+ iter_nodes = itertools.cycle(sorted(nodes_dict.keys()))
551
+ for _ in range(a_bin.nprocs):
552
+ av_ranks = None
553
+ while not av_ranks:
554
+ av_ranks = nodes_dict[next(iter_nodes)]
555
+ actual_ranks.append(av_ranks.popleft())
556
+ # Inject the result back
557
+ for i in range(a_bin.nprocs):
558
+ ranks_map[cursor + i] = actual_ranks[i]
559
+ cursor += a_bin.nprocs
560
+ else:
561
+ logger.warning('Cannot enforce binary distribution if the envelope' +
562
+ 'does not contain nn/nnp information')
563
+ # Cache the final result !
564
+ self._ranks_map_cache = ranks_map
565
+ return self._ranks_map_cache
566
+
567
+ @property
568
+ def _complex_ranks_mapping(self):
569
+ """Is it a complex ranks mapping (e.g not the identity)."""
570
+ if self._complex_ranks_map is None:
571
+ # To initialise everything...
572
+ self._ranks_mapping
573
+ return self._complex_ranks_map
574
+
575
+ def _wrapstd_mkwrapper(self):
576
+ """Generate the wrapper script used when wrapstd=True."""
577
+ if not self.mpiwrapstd:
578
+ return None
579
+ # Create the launchwrapper
580
+ wtpl = config.load_template(self.ticket,
581
+ self._wrapstd_wrapper_tpl,
582
+ encoding='utf-8')
583
+ with open(self._wrapstd_wrapper_name, 'w', encoding='utf-8') as fhw:
584
+ fhw.write(
585
+ wtpl.substitute(
586
+ python=self.system.executable,
587
+ mpirankvariable=self._envelope_rank_var,
588
+ )
589
+ )
590
+ self.system.xperm(self._wrapstd_wrapper_name, force=True)
591
+ return self._wrapstd_wrapper_name
592
+
593
+ def _simple_mkcmdline(self, cmdl):
594
+ """Builds the MPI command line when no envelope is used.
595
+
596
+ :param list[str] cmdl: the command line as a list
597
+ """
598
+ effective = 0
599
+ wrapstd = self._wrapstd_mkwrapper()
600
+ for bin_obj in self.binaries:
601
+ if bin_obj.master is None:
602
+ raise MpiException('No master defined before launching MPI')
603
+ # If there are no options, do not bother...
604
+ if len(bin_obj.expanded_options()):
605
+ if effective > 0 and self.binsep:
606
+ cmdl.append(self.binsep)
607
+ e_options = self._hook_binary_mpiopts(bin_obj, bin_obj.expanded_options())
608
+ for k in sorted(e_options.keys()):
609
+ if k in self.optmap:
610
+ cmdl.append(self.optprefix + str(self.optmap[k]))
611
+ if e_options[k] is not None:
612
+ cmdl.append(str(e_options[k]))
613
+ if self.optsep:
614
+ cmdl.append(self.optsep)
615
+ if wrapstd:
616
+ cmdl.append(wrapstd)
617
+ cmdl.append(bin_obj.master)
618
+ cmdl.extend(bin_obj.arguments)
619
+ effective += 1
620
+
621
+ def _envelope_fix_envelope_bit(self, e_bit, e_desc):
622
+ """Set the envelope fake binary options."""
623
+ e_bit.options = {k: v for k, v in e_desc.items()
624
+ if k not in ('openmp', 'np')}
625
+ e_bit.master = self._envelope_wrapper_name
626
+
627
+ def _envelope_mkwrapper_todostack(self):
628
+ ranksidx = 0
629
+ ranks_bsize = dict()
630
+ todostack = dict()
631
+ for bin_obj in self.binaries:
632
+ if bin_obj.master is None:
633
+ raise MpiException('No master defined before launching MPI')
634
+ # If there are no options, do not bother...
635
+ if bin_obj.options and bin_obj.nprocs != 0:
636
+ if not bin_obj.nprocs:
637
+ raise ValueError('nranks must be provided when using envelopes')
638
+ for mpirank in range(ranksidx, ranksidx + bin_obj.nprocs):
639
+ if bin_obj.allowbind:
640
+ ranks_bsize[mpirank] = bin_obj.options.get('openmp', 1)
641
+ else:
642
+ ranks_bsize[mpirank] = -1
643
+ todostack[mpirank] = (bin_obj.master, bin_obj.arguments,
644
+ bin_obj.options.get('openmp', None))
645
+ ranksidx += bin_obj.nprocs
646
+ return todostack, ranks_bsize
647
+
648
+ def _envelope_mkwrapper_cpu_dispensers(self):
649
+ # Dispensers map
650
+ totalnodes = 0
651
+ ranks_idx = 0
652
+ dispensers_map = dict()
653
+ for e_bit in self.envelope:
654
+ if 'nn' in e_bit.options and 'nnp' in e_bit.options:
655
+ for _ in range(e_bit.options['nn']):
656
+ cpu_disp = self.system.cpus_ids_dispenser(topology=self.mpibind_topology)
657
+ if not cpu_disp:
658
+ raise MpiException('Unable to detect the CPU layout with topology: {:s}'
659
+ .format(self.mpibind_topology, ))
660
+ for _ in range(e_bit.options['nnp']):
661
+ dispensers_map[ranks_idx] = (cpu_disp, totalnodes)
662
+ ranks_idx += 1
663
+ totalnodes += 1
664
+ else:
665
+ logger.error("Cannot compute a proper binding without nn/nnp information")
666
+ raise MpiException("Vortex binding error.")
667
+ return dispensers_map
668
+
669
+ def _envelope_mkwrapper_bindingstack(self, ranks_bsize):
670
+ binding_stack = dict()
671
+ binding_node = dict()
672
+ if self.bindingmethod:
673
+ dispensers_map = self._envelope_mkwrapper_cpu_dispensers()
674
+ # Actually generate the binding map
675
+ ranks_idx = 0
676
+ for e_bit in self.envelope:
677
+ for _ in range(e_bit.options['nn']):
678
+ for _ in range(e_bit.options['nnp']):
679
+ cpu_disp, i_node = dispensers_map[self._ranks_mapping[ranks_idx]]
680
+ if ranks_bsize.get(ranks_idx, 1) != -1:
681
+ try:
682
+ binding_stack[ranks_idx] = cpu_disp(ranks_bsize.get(ranks_idx, 1))
683
+ except (StopIteration, IndexError):
684
+ # When CPU dispensers are exhausted (it might happened if more tasks
685
+ # than available CPUs are requested).
686
+ dispensers_map = self._envelope_mkwrapper_cpu_dispensers()
687
+ cpu_disp, i_node = dispensers_map[self._ranks_mapping[ranks_idx]]
688
+ binding_stack[ranks_idx] = cpu_disp(ranks_bsize.get(ranks_idx, 1))
689
+ else:
690
+ binding_stack[ranks_idx] = set(self.system.cpus_info.cpus.keys())
691
+ binding_node[ranks_idx] = i_node
692
+ ranks_idx += 1
693
+ return binding_stack, binding_node
694
+
695
+ def _envelope_mkwrapper_tplsubs(self, todostack, bindingstack):
696
+ return dict(python=self.system.executable,
697
+ sitepath=self.system.path.join(self.ticket.glove.siteroot, 'site'),
698
+ mpirankvariable=self._envelope_rank_var,
699
+ todolist=("\n".join([" {:d}: ('{:s}', [{:s}], {:s}),".format(
700
+ mpi_r,
701
+ what[0],
702
+ ', '.join(["'{:s}'".format(a) for a in what[1]]),
703
+ str(what[2]))
704
+ for mpi_r, what in sorted(todostack.items())])),
705
+ bindinglist=("\n".join([" {:d}: [{:s}],".format(
706
+ mpi_r,
707
+ ', '.join(['{:d}'.format(a) for a in what]))
708
+ for mpi_r, what in sorted(bindingstack.items())])))
709
+
710
+ def _envelope_mkwrapper(self, cmdl):
711
+ """Generate the wrapper script used when an envelope is defined."""
712
+ # Generate the dictionary that associate rank numbers and programs
713
+ todostack, ranks_bsize = self._envelope_mkwrapper_todostack()
714
+ # Generate the binding stuff
715
+ bindingstack, bindingnode = self._envelope_mkwrapper_bindingstack(ranks_bsize)
716
+ # Print binding details
717
+ logger.debug('Vortex Envelope Mechanism is used' +
718
+ (' & vortex binding is on.' if bindingstack else '.'))
719
+ env_info_head = '{:5s} {:24s} {:4s}'.format('#rank', 'binary_name', '#OMP')
720
+ env_info_fmt = '{:5d} {:24s} {:4s}'
721
+ if bindingstack:
722
+ env_info_head += ' {:5s} {:s}'.format('#node', 'bindings_list')
723
+ env_info_fmt2 = ' {:5d} {:s}'
724
+ binding_str = [env_info_head]
725
+ for i_rank in sorted(todostack):
726
+ entry_str = env_info_fmt.format(i_rank,
727
+ self.system.path.basename(todostack[i_rank][0])[:24],
728
+ str(todostack[i_rank][2]))
729
+ if bindingstack:
730
+ entry_str += env_info_fmt2.format(bindingnode[i_rank],
731
+ ','.join([str(c)
732
+ for c in sorted(bindingstack[i_rank])]))
733
+ binding_str.append(entry_str)
734
+ logger.debug('Here are the envelope details:\n%s', '\n'.join(binding_str))
735
+ # Create the launchwrapper
736
+ wtpl = config.load_template(self.ticket,
737
+ self._envelope_wrapper_tpl,
738
+ encoding='utf-8')
739
+ with open(self._envelope_wrapper_name, 'w', encoding='utf-8') as fhw:
740
+ fhw.write(
741
+ wtpl.substitute(**self._envelope_mkwrapper_tplsubs(todostack,
742
+ bindingstack))
743
+ )
744
+ self.system.xperm(self._envelope_wrapper_name, force=True)
745
+ return self._envelope_wrapper_name
746
+
747
+ def _envelope_mkcmdline(self, cmdl):
748
+ """Builds the MPI command line when an envelope is used.
749
+
750
+ :param list[str] cmdl: the command line as a list
751
+ """
752
+ self._envelope_mkwrapper(cmdl)
753
+ wrapstd = self._wrapstd_mkwrapper()
754
+ for effective, e_bit in enumerate(self.envelope):
755
+ if effective > 0 and self.binsep:
756
+ cmdl.append(self.binsep)
757
+ e_options = self._hook_binary_mpiopts(e_bit, e_bit.expanded_options())
758
+ for k in sorted(e_options.keys()):
759
+ if k in self.optmap:
760
+ cmdl.append(self.optprefix + str(self.optmap[k]))
761
+ if e_options[k] is not None:
762
+ cmdl.append(str(e_options[k]))
763
+ self._envelope_mkcmdline_extra(cmdl)
764
+ if self.optsep:
765
+ cmdl.append(self.optsep)
766
+ if wrapstd:
767
+ cmdl.append(wrapstd)
768
+ cmdl.append(e_bit.master)
769
+
770
+ def _envelope_mkcmdline_extra(self, cmdl):
771
+ """Possibly add extra options when building the envelope."""
772
+ pass
773
+
774
+ def mkcmdline(self):
775
+ """Builds the MPI command line."""
776
+ cmdl = [self.launcher, ]
777
+ for k, instances in sorted(self._reshaped_mpiopts().items()):
778
+ for instance in instances:
779
+ cmdl.append(self.optprefix + str(k))
780
+ for a_value in instance:
781
+ cmdl.append(str(a_value))
782
+ if self.envelope:
783
+ self._envelope_mkcmdline(cmdl)
784
+ else:
785
+ self._simple_mkcmdline(cmdl)
786
+ return cmdl
787
+
788
+ def clean(self, opts=None):
789
+ """post-execution cleaning."""
790
+ if self.mpiwrapstd:
791
+ # Deal with standard output/error files
792
+ for outf in sorted(self.system.glob('vwrap_stdeo.*')):
793
+ rank = int(outf[12:])
794
+ with open(outf,
795
+ encoding=locale.getlocale()[1] or 'ascii',
796
+ errors='replace') as sfh:
797
+ for (i, l) in enumerate(sfh):
798
+ if i == 0:
799
+ self.system.highlight('rank {:d}: stdout/err'.format(rank))
800
+ print(l.rstrip('\n'))
801
+ self.system.remove(outf)
802
+ if self.envelope and self.system.path.exists(self._envelope_wrapper_name):
803
+ self.system.remove(self._envelope_wrapper_name)
804
+ if self.mpiwrapstd:
805
+ self.system.remove(self._wrapstd_wrapper_name)
806
+ # Call the dedicated method en registered MPI binaries
807
+ for bin_obj in self.binaries:
808
+ bin_obj.clean(opts)
809
+
810
+ def find_namelists(self, opts=None):
811
+ """Find any namelists candidates in actual context inputs."""
812
+ namcandidates = [x.rh for x in
813
+ self.context.sequence.effective_inputs(kind=('namelist', 'namelistfp'))]
814
+ if opts is not None and 'loop' in opts:
815
+ namcandidates = [
816
+ x for x in namcandidates
817
+ if (hasattr(x.resource, 'term') and x.resource.term == opts['loop'])
818
+ ]
819
+ else:
820
+ logger.info('No loop option in current parallel execution.')
821
+ self.system.highlight('Namelist candidates')
822
+ for nam in namcandidates:
823
+ nam.quickview()
824
+ return namcandidates
825
+
826
+ def setup_namelist_delta(self, namcontents, namlocal):
827
+ """Abstract method for applying a delta: return False."""
828
+ return False
829
+
830
+ def setup_namelists(self, opts=None):
831
+ """MPI information to be written in namelists."""
832
+ for namrh in self.find_namelists(opts):
833
+ namc = namrh.contents
834
+ changed = self.setup_namelist_delta(namc, namrh.container.actualpath())
835
+ # Call the dedicated method en registered MPI binaries
836
+ for bin_obj in self.binaries:
837
+ changed = bin_obj.setup_namelist_delta(namc, namrh.container.actualpath()) or changed
838
+ if changed:
839
+ if namc.dumps_needs_update:
840
+ logger.info('Rewritting the %s namelists file.', namrh.container.actualpath())
841
+ namc.rewrite(namrh.container)
842
+
843
+ def _logged_env_set(self, k, v):
844
+ """Set an environment variable *k* and emit a log message."""
845
+ logger.info('Setting the "%s" environment variable to "%s"', k.upper(), v)
846
+ self.env[k] = v
847
+
848
+ def _logged_env_del(self, k):
849
+ """Delete the environment variable *k* and emit a log message."""
850
+ logger.info('Deleting the "%s" environment variable', k.upper())
851
+ del self.env[k]
852
+
853
+ def _environment_substitution_dict(self):
854
+ """Things that may be substituted in environment variables."""
855
+ sdict = dict()
856
+ mpilib_data = self._mpilib_data()
857
+ if mpilib_data:
858
+ sdict.update(mpilib=mpilib_data[0], mpibindir=mpilib_data[1])
859
+ return sdict
860
+
861
+ def setup_environment(self, opts):
862
+ """MPI environment setup."""
863
+ confdata = from_config(section="mpienv")
864
+ envsub = self._environment_substitution_dict()
865
+ for k, v in confdata.items():
866
+ if k not in self.env:
867
+ try:
868
+ v = str(v).format(**envsub)
869
+ except KeyError:
870
+ logger.warning("Substitution failed for the environment " +
871
+ "variable %s. Ignoring it.", k)
872
+ else:
873
+ self._logged_env_set(k, v)
874
+ # Call the dedicated method en registered MPI binaries
875
+ for bin_obj in self.binaries:
876
+ bin_obj.setup_environment(opts)
877
+
878
+ def setup(self, opts=None):
879
+ """Specific MPI settings to be applied before run."""
880
+ self.setup_namelists(opts)
881
+ if self.target is not None:
882
+ self.setup_environment(opts)
883
+
884
+
885
+ class MpiBinaryDescription(footprints.FootprintBase):
886
+ """Root class for any :class:`MpiBinaryDescription` subclass."""
887
+
888
+ _collector = ('mpibinary',)
889
+ _abstract = True
890
+ _footprint = dict(
891
+ info = 'Holds information about a given MPI binary',
892
+ attr = dict(
893
+ kind = dict(
894
+ info = "A free form description of the binary's type",
895
+ values = ['basic', ],
896
+ ),
897
+ nodes = dict(
898
+ info = "The number of nodes for this MPI binary",
899
+ type = int,
900
+ optional = True,
901
+ access = 'rwx'
902
+ ),
903
+ tasks = dict(
904
+ info = "The number of tasks per node for this MPI binary",
905
+ type = int,
906
+ optional = True,
907
+ access = 'rwx'
908
+ ),
909
+ openmp = dict(
910
+ info = "The number of threads per task for this MPI binary",
911
+ type = int,
912
+ optional = True,
913
+ access = 'rwx'
914
+ ),
915
+ ranks = dict(
916
+ info = "The number of MPI ranks to use (only when working in an envelope)",
917
+ type = int,
918
+ optional = True,
919
+ access = 'rwx'
920
+ ),
921
+ allowbind = dict(
922
+ info = "Allow the MpiTool to bind this executable",
923
+ type = bool,
924
+ optional = True,
925
+ default = True,
926
+ ),
927
+ basics = dict(
928
+ type = footprints.FPList,
929
+ optional = True,
930
+ default = footprints.FPList(['system', 'env', 'target', 'context'])
931
+ )
932
+ )
933
+ )
934
+
935
+ def __init__(self, *args, **kw):
936
+ """After parent initialization, set master and options to undefined."""
937
+ logger.debug('Abstract mpi tool init %s', self.__class__)
938
+ super().__init__(*args, **kw)
939
+ self._master = None
940
+ self._arguments = ()
941
+ self._options = None
942
+ self._group = None
943
+
944
+ def __getattr__(self, key):
945
+ """Have a look to basics values provided by some proxy."""
946
+ if key in self.basics:
947
+ return getattr(self, '_' + key)
948
+ else:
949
+ raise AttributeError('Attribute [%s] is not a basic mpitool attribute' % key)
950
+
951
+ def import_basics(self, obj, attrs=None):
952
+ """Import some current values such as system, env, target and context from provided ``obj``."""
953
+ if attrs is None:
954
+ attrs = self.basics
955
+ for k in [x for x in attrs if x in self.basics and hasattr(obj, x)]:
956
+ setattr(self, '_' + k, getattr(obj, k))
957
+
958
+ def _get_options(self):
959
+ """Retrieve the current set of MPI options."""
960
+ if self._options is None:
961
+ self._set_options(None)
962
+ return self._options
963
+
964
+ def _set_options(self, value=None):
965
+ """Input a raw list of MPI options."""
966
+ self._options = dict()
967
+ if value is None:
968
+ value = dict()
969
+ if self.ranks is not None:
970
+ self._options['np'] = self.ranks
971
+ if self.nodes is not None or self.tasks is not None:
972
+ raise ValueError('Incompatible options provided.')
973
+ else:
974
+ if self.nodes is not None:
975
+ self._options['nn'] = self.nodes
976
+ if self.tasks is not None:
977
+ self._options['nnp'] = self.tasks
978
+ if self.openmp is not None:
979
+ self._options['openmp'] = self.openmp
980
+ for k, v in value.items():
981
+ self._options[k.lstrip('-').lower()] = v
982
+
983
+ options = property(_get_options, _set_options)
984
+
985
+ def expanded_options(self):
986
+ """The MPI options actually used by the :class:`MpiTool` object to generate the command line."""
987
+ options = self.options.copy()
988
+ options.setdefault('np', self.nprocs)
989
+ return options
990
+
991
+ def _get_group(self):
992
+ """The group the current binary belongs to (may be ``None``)."""
993
+ return self._group
994
+
995
+ def _set_group(self, value):
996
+ """Set the binary's group."""
997
+ self._group = value
998
+
999
+ group = property(_get_group, _set_group)
1000
+
1001
+ @property
1002
+ def nprocs(self):
1003
+ """Figure out what is the effective total number of tasks."""
1004
+ if 'np' in self.options:
1005
+ nbproc = int(self.options['np'])
1006
+ elif 'nnp' in self.options and 'nn' in self.options:
1007
+ nbproc = int(self.options.get('nnp')) * int(self.options.get('nn'))
1008
+ else:
1009
+ raise MpiException('Impossible to compute nprocs.')
1010
+ return nbproc
1011
+
1012
+ def _get_master(self):
1013
+ """Retrieve the master binary name that should be used."""
1014
+ return self._master
1015
+
1016
+ def _set_master(self, master):
1017
+ """Keep a copy of the master binary pathname."""
1018
+ self._master = master
1019
+
1020
+ master = property(_get_master, _set_master)
1021
+
1022
+ def _get_arguments(self):
1023
+ """Retrieve the master's arguments list."""
1024
+ return self._arguments
1025
+
1026
+ def _set_arguments(self, args):
1027
+ """Keep a copy of the master binary pathname."""
1028
+ if isinstance(args, str):
1029
+ self._arguments = args.split()
1030
+ elif isinstance(args, collections.abc.Iterable):
1031
+ self._arguments = [str(a) for a in args]
1032
+ else:
1033
+ raise ValueError('Improper *args* argument provided.')
1034
+
1035
+ arguments = property(_get_arguments, _set_arguments)
1036
+
1037
+ def clean(self, opts=None):
1038
+ """Abstract method for post-execution cleaning."""
1039
+ pass
1040
+
1041
+ def setup_namelist_delta(self, namcontents, namlocal):
1042
+ """Abstract method for applying a delta: return False."""
1043
+ return False
1044
+
1045
+ def setup_environment(self, opts):
1046
+ """Abstract MPI environment setup."""
1047
+ pass
1048
+
1049
+
1050
+ class MpiEnvelopeBit(MpiBinaryDescription):
1051
+ """Set NPROC and NBPROC in namelists given the MPI distribution."""
1052
+
1053
+ _footprint = dict(
1054
+ attr = dict(
1055
+ kind = dict(
1056
+ values = ['basicenvelopebit', ],
1057
+ ),
1058
+ )
1059
+ )
1060
+
1061
+
1062
+ class MpiBinary(MpiBinaryDescription):
1063
+ _footprint = dict(
1064
+ attr = dict(
1065
+ distribution=dict(
1066
+ info="Describes how the various nodes are distributed accross nodes",
1067
+ values=['continuous', 'roundrobin'],
1068
+ optional=True,
1069
+ ),
1070
+ )
1071
+ )
1072
+
1073
+
1074
+ class MpiBinaryBasic(MpiBinary):
1075
+ """Set NPROC and NBPROC in namelists given the MPI distribution."""
1076
+
1077
+ _footprint = dict(
1078
+ attr = dict(
1079
+ kind = dict(
1080
+ values = ['basicsingle', ],
1081
+ ),
1082
+ )
1083
+ )
1084
+
1085
+ def setup_namelist_delta(self, namcontents, namlocal):
1086
+ """Applying MPI profile on local namelist ``namlocal`` with contents namcontents."""
1087
+ namw = False
1088
+ # List of macros actualy used in the namelist
1089
+ nam_macros = set()
1090
+ for nam_block in namcontents.values():
1091
+ nam_macros.update(nam_block.macros())
1092
+ # Look for relevant once
1093
+ nprocs_macros = ('NPROC', 'NBPROC', 'NTASKS')
1094
+ if any([n in nam_macros for n in nprocs_macros]):
1095
+ for n in nprocs_macros:
1096
+ logger.info('Setup macro %s=%s in %s', n, self.nprocs, namlocal)
1097
+ namcontents.setmacro(n, self.nprocs)
1098
+ namw = True
1099
+ return namw
1100
+
1101
+
1102
+ class MpiBinaryIOServer(MpiBinary):
1103
+ """Standard binary description for IO Server binaries."""
1104
+
1105
+ _footprint = dict(
1106
+ attr = dict(
1107
+ kind = dict(
1108
+ values = ['ioserv', ],
1109
+ ),
1110
+ )
1111
+ )
1112
+
1113
+ def __init__(self, *args, **kw):
1114
+ """After parent initialization, set launcher value."""
1115
+ logger.debug('Abstract mpi tool init %s', self.__class__)
1116
+ super().__init__(*args, **kw)
1117
+ thisenv = env.current()
1118
+ if self.ranks is None:
1119
+ self.ranks = thisenv.VORTEX_IOSERVER_RANKS
1120
+ if self.nodes is None:
1121
+ self.nodes = thisenv.VORTEX_IOSERVER_NODES
1122
+ if self.tasks is None:
1123
+ self.tasks = thisenv.VORTEX_IOSERVER_TASKS
1124
+ if self.openmp is None:
1125
+ self.openmp = thisenv.VORTEX_IOSERVER_OPENMP
1126
+
1127
+ def expanded_options(self):
1128
+ """The number of IO nodes may be 0: account for that."""
1129
+ if self.nprocs == 0:
1130
+ return dict()
1131
+ else:
1132
+ return super().expanded_options()
1133
+
1134
+
1135
+ class MpiRun(MpiTool):
1136
+ """Standard MPI launcher on most systems: `mpirun`."""
1137
+
1138
+ _footprint = dict(
1139
+ attr = dict(
1140
+ sysname = dict(
1141
+ values = ['Linux', 'Darwin', 'UnitTestLinux']
1142
+ ),
1143
+ mpiname = dict(
1144
+ values = ['mpirun', 'mpiperso', 'default'],
1145
+ remap = dict(
1146
+ default = 'mpirun'
1147
+ ),
1148
+ ),
1149
+ optsep = dict(
1150
+ default = '',
1151
+ ),
1152
+ optprefix = dict(
1153
+ default = '-',
1154
+ ),
1155
+ optmap = dict(
1156
+ default = footprints.FPDict(np='np', nnp='npernode')
1157
+ ),
1158
+ binsep = dict(
1159
+ default = ':',
1160
+ )
1161
+ )
1162
+ )
1163
+
1164
+
1165
+ class SRun(MpiTool):
1166
+ """SLURM's srun launcher."""
1167
+
1168
+ _footprint = dict(
1169
+ attr = dict(
1170
+ sysname = dict(
1171
+ values = ['Linux', 'UnitTestLinux']
1172
+ ),
1173
+ mpiname = dict(
1174
+ values = ['srun', ],
1175
+ ),
1176
+ optsep = dict(
1177
+ default = '',
1178
+ ),
1179
+ optprefix = dict(
1180
+ default = '--',
1181
+ ),
1182
+ optmap = dict(
1183
+ default = footprints.FPDict(nn='nodes', nnp='ntasks-per-node', np='ntasks')
1184
+ ),
1185
+ slurmversion = dict(
1186
+ type = int,
1187
+ optional = True
1188
+ ),
1189
+ mpiwrapstd = dict(
1190
+ default = True,
1191
+ ),
1192
+ bindingmethod = dict(
1193
+ info = 'How to bind the MPI processes',
1194
+ values = ['native', 'vortex', ],
1195
+ access = 'rwx',
1196
+ optional = True,
1197
+ doc_visibility = footprints.doc.visibility.ADVANCED,
1198
+ doc_zorder = -90,
1199
+ ),
1200
+ )
1201
+ )
1202
+
1203
+ _envelope_nodelist_name = './global_envelope_nodelist'
1204
+ _envelope_rank_var = 'SLURM_PROCID'
1205
+ _supports_manual_ranks_mapping = True
1206
+
1207
+ @property
1208
+ def _actual_slurmversion(self):
1209
+ """Return the slurm major version number."""
1210
+ slurmversion = (
1211
+ self.slurmversion or
1212
+ from_config(section="mpitool", key="slurmversion")
1213
+ )
1214
+ if not slurmversion:
1215
+ raise ValueError("No slurm version specified")
1216
+ return slurmversion
1217
+
1218
+ def _set_binaries_hack(self, binaries):
1219
+ """Set the list of :class:`MpiBinaryDescription` objects associated with this instance."""
1220
+ if not self.envelope and len([binary for binary in binaries if binary.expanded_options()]) > 1:
1221
+ self._set_envelope_from_binaries()
1222
+
1223
+ def _valid_envelope(self, value):
1224
+ """Tweak the envelope ddescription values."""
1225
+ for e in value:
1226
+ if not ('nn' in e and 'nnp' in e):
1227
+ raise MpiException("Srun needs a nn/nnp specification to build the envelope.")
1228
+
1229
+ def _set_envelope(self, value):
1230
+ """Set the envelope description."""
1231
+ super()._set_envelope(value)
1232
+ if len(self._envelope) > 1 and self.bindingmethod not in (None, 'vortex'):
1233
+ logger.warning("Resetting the binding method to 'Vortex'.")
1234
+ self.bindingmethod = 'vortex'
1235
+
1236
+ envelope = property(MpiTool._get_envelope, _set_envelope)
1237
+
1238
+ def _set_binaries_envelope_hack(self, binaries):
1239
+ """Tweak the envelope after binaries were setup."""
1240
+ if self.bindingmethod not in (None, 'vortex'):
1241
+ openmps = {b.options.get('openmp', None) for b in binaries}
1242
+ if len(openmps) > 1:
1243
+ logger.warning("Resetting the binding method to 'Vortex' because " +
1244
+ "the number of threads is not uniform.")
1245
+ self.bindingmethod = 'vortex'
1246
+
1247
+ @property
1248
+ def _cpubind_opt(self):
1249
+ return self.optprefix + ('cpu_bind' if self._actual_slurmversion < 18
1250
+ else 'cpu-bind')
1251
+
1252
+ def _build_cpumask(self, cmdl, what, bsize):
1253
+ """Add a --cpu-bind option if needed."""
1254
+ cmdl.append(self._cpubind_opt)
1255
+ if self.bindingmethod == 'native':
1256
+ assert len(what) == 1, "Only one item is allowed."
1257
+ if what[0].allowbind:
1258
+ ids = self.system.cpus_ids_per_blocks(blocksize=bsize,
1259
+ topology=self.mpibind_topology,
1260
+ hexmask=True)
1261
+ if not ids:
1262
+ raise MpiException('Unable to detect the CPU layout with topology: {:s}'
1263
+ .format(self.mpibind_topology, ))
1264
+ masklist = [m for _, m in zip(range(what[0].options['nnp']),
1265
+ itertools.cycle(ids))]
1266
+ cmdl.append('mask_cpu:' + ','.join(masklist))
1267
+ else:
1268
+ cmdl.append('none')
1269
+ else:
1270
+ cmdl.append('none')
1271
+
1272
+ def _simple_mkcmdline(self, cmdl):
1273
+ """Builds the MPI command line when no envelope is used.
1274
+
1275
+ :param list[str] cmdl: the command line as a list
1276
+ """
1277
+ target_bins = [binary for binary in self.binaries if len(binary.expanded_options())]
1278
+ self._build_cpumask(cmdl, target_bins, target_bins[0].options.get('openmp', 1))
1279
+ super()._simple_mkcmdline(cmdl)
1280
+
1281
+ def _envelope_mkcmdline(self, cmdl):
1282
+ """Builds the MPI command line when an envelope is used.
1283
+
1284
+ :param list[str] cmdl: the command line as a list
1285
+ """
1286
+ # Simple case, only one envelope description
1287
+ has_bin_groups = not all([b.group is None for b in self.binaries])
1288
+ openmps = {b.options.get('openmp', 1) for b in self.binaries}
1289
+ if len(self.envelope) == 1 and not self._complex_ranks_mapping and len(openmps) == 1:
1290
+ self._build_cpumask(cmdl, self.envelope, openmps.pop())
1291
+ super()._envelope_mkcmdline(cmdl)
1292
+ # Multiple entries... use the nodelist stuff :-(
1293
+ else:
1294
+ # Find all the available nodes and ranks
1295
+ base_nodelist = []
1296
+ totalnodes = 0
1297
+ totaltasks = 0
1298
+ availnodes = itertools.cycle(xlist_strings(self.env.SLURM_NODELIST
1299
+ if self._actual_slurmversion < 18
1300
+ else self.env.SLURM_JOB_NODELIST))
1301
+ for e_bit in self.envelope:
1302
+ totaltasks += e_bit.nprocs
1303
+ for _ in range(e_bit.options['nn']):
1304
+ availnode = next(availnodes)
1305
+ logger.debug('Node #%5d is: %s', totalnodes, availnode)
1306
+ base_nodelist.extend([availnode, ] * e_bit.options['nnp'])
1307
+ totalnodes += 1
1308
+ # Re-order the nodelist based on the binary groups
1309
+ nodelist = list()
1310
+ for i_rank in range(len(base_nodelist)):
1311
+ if i_rank < len(self._ranks_mapping):
1312
+ nodelist.append(base_nodelist[self._ranks_mapping[i_rank]])
1313
+ else:
1314
+ nodelist.append(base_nodelist[i_rank])
1315
+ # Write it to the nodefile
1316
+ with open(self._envelope_nodelist_name, 'w') as fhnl:
1317
+ fhnl.write("\n".join(nodelist))
1318
+ # Generate wrappers
1319
+ self._envelope_mkwrapper(cmdl)
1320
+ wrapstd = self._wrapstd_mkwrapper()
1321
+ # Update the command line
1322
+ cmdl.append(self.optprefix + 'nodelist')
1323
+ cmdl.append(self._envelope_nodelist_name)
1324
+ cmdl.append(self.optprefix + 'ntasks')
1325
+ cmdl.append(str(totaltasks))
1326
+ cmdl.append(self.optprefix + 'distribution')
1327
+ cmdl.append('arbitrary')
1328
+ cmdl.append(self._cpubind_opt)
1329
+ cmdl.append('none')
1330
+ if wrapstd:
1331
+ cmdl.append(wrapstd)
1332
+ cmdl.append(e_bit.master)
1333
+
1334
+ def clean(self, opts=None):
1335
+ """post-execution cleaning."""
1336
+ super().clean(opts)
1337
+ if self.envelope and len(self.envelope) > 1:
1338
+ self.system.remove(self._envelope_nodelist_name)
1339
+
1340
+ def _environment_substitution_dict(self): # @UnusedVariable
1341
+ """Things that may be substituted in environment variables."""
1342
+ sdict = super()._environment_substitution_dict()
1343
+ shp = self.system.path
1344
+ # Detect the path to the srun command
1345
+ actlauncher = self.launcher
1346
+ if not shp.exists(self.launcher):
1347
+ actlauncher = self.system.which(actlauncher)
1348
+ if not actlauncher:
1349
+ logger.error('The SRun launcher could not be found.')
1350
+ return sdict
1351
+ sdict['srunpath'] = actlauncher
1352
+ # Detect the path to the PMI library
1353
+ pmilib = shp.normpath(shp.join(shp.dirname(actlauncher),
1354
+ '..', 'lib64', 'libpmi.so'))
1355
+ if not shp.exists(pmilib):
1356
+ pmilib = shp.normpath(shp.join(shp.dirname(actlauncher),
1357
+ '..', 'lib', 'libpmi.so'))
1358
+ if not shp.exists(pmilib):
1359
+ logger.error('Could not find a PMI library')
1360
+ return sdict
1361
+ sdict['pmilib'] = pmilib
1362
+ return sdict
1363
+
1364
+ def setup_environment(self, opts):
1365
+ """Tweak the environment with some srun specific settings."""
1366
+ super().setup_environment(opts)
1367
+ if (self._complex_ranks_mapping and
1368
+ self._mpilib_identification() and
1369
+ self._mpilib_identification()[3] == 'intelmpi'):
1370
+ logger.info('(Sadly) with IntelMPI, I_MPI_SLURM_EXT=0 is needed when a complex arbitrary' +
1371
+ 'ranks distribution is used. Exporting it !')
1372
+ self.env['I_MPI_SLURM_EXT'] = 0
1373
+ if len(self.binaries) == 1 and not self.envelope:
1374
+ omp = self.binaries[0].options.get('openmp', None)
1375
+ if omp is not None:
1376
+ self._logged_env_set('OMP_NUM_THREADS', omp)
1377
+ if self.bindingmethod == 'native' and 'OMP_PROC_BIND' not in self.env:
1378
+ self._logged_env_set('OMP_PROC_BIND', 'true')
1379
+ # cleaning unwanted environment stuff
1380
+ unwanted = set()
1381
+ for k in self.env:
1382
+ if k.startswith('SLURM_'):
1383
+ k = k[6:]
1384
+ if (k in ('NTASKS', 'NPROCS') or
1385
+ re.match('N?TASKS_PER_', k) or
1386
+ re.match('N?CPUS_PER_', k)):
1387
+ unwanted.add(k)
1388
+ for k in unwanted:
1389
+ self.env.delvar('SLURM_{:s}'.format(k))
1390
+
1391
+
1392
+ class SRunDDT(SRun):
1393
+ """SLURM's srun launcher with ARM's DDT."""
1394
+
1395
+ _footprint = dict(
1396
+ attr = dict(
1397
+ mpiname = dict(
1398
+ values = ['srun-ddt', ],
1399
+ ),
1400
+ )
1401
+ )
1402
+
1403
+ _conf_suffix = '-ddt'
1404
+
1405
+ def mkcmdline(self):
1406
+ """Add the DDT prefix command to the command line"""
1407
+ cmdl = super().mkcmdline()
1408
+ armtool = ArmForgeTool(self.ticket)
1409
+ for extra_c in reversed(armtool.ddt_prefix_cmd(
1410
+ sources=self.sources,
1411
+ workdir=self.system.path.dirname(self.binaries[0].master)
1412
+ )):
1413
+ cmdl.insert(0, extra_c)
1414
+ return cmdl
1415
+
1416
+
1417
+ class OmpiMpiRun(MpiTool):
1418
+ """OpenMPI's mpirun launcher."""
1419
+
1420
+ _footprint = dict(
1421
+ attr = dict(
1422
+ sysname = dict(
1423
+ values = ['Linux', 'UnitTestLinux']
1424
+ ),
1425
+ mpiname = dict(
1426
+ values = ['openmpi', ],
1427
+ ),
1428
+ optsep = dict(
1429
+ default = '',
1430
+ ),
1431
+ optprefix = dict(
1432
+ default = '-',
1433
+ ),
1434
+ optmap = dict(
1435
+ default = footprints.FPDict(np='np', nnp='npernode', xopenmp='x')
1436
+ ),
1437
+ binsep = dict(
1438
+ default = ':',
1439
+ ),
1440
+ mpiwrapstd = dict(
1441
+ default = True,
1442
+ ),
1443
+ bindingmethod = dict(
1444
+ info = 'How to bind the MPI processes',
1445
+ values = ['native', 'vortex', ],
1446
+ optional = True,
1447
+ doc_visibility = footprints.doc.visibility.ADVANCED,
1448
+ doc_zorder = -90,
1449
+ ),
1450
+ preexistingenv = dict(
1451
+ optional = True,
1452
+ type = bool,
1453
+ default = False,
1454
+ ),
1455
+ )
1456
+ )
1457
+
1458
+ _envelope_rankfile_name = './global_envelope_rankfile'
1459
+ _envelope_rank_var = 'OMPI_COMM_WORLD_RANK'
1460
+ _supports_manual_ranks_mapping = True
1461
+
1462
+ def _get_launcher(self):
1463
+ """Returns the name of the mpi tool to be used."""
1464
+ if self.mpilauncher:
1465
+ return self.mpilauncher
1466
+ else:
1467
+ mpi_data = self._mpilib_data()
1468
+ if mpi_data:
1469
+ return self.system.path.join(mpi_data[1], 'mpirun')
1470
+ else:
1471
+ return self._launcher
1472
+
1473
+ launcher = property(_get_launcher, MpiTool._set_launcher)
1474
+
1475
+ def _set_binaries_hack(self, binaries):
1476
+ if not self.envelope and self.bindingmethod == 'native':
1477
+ self._set_envelope_from_binaries()
1478
+
1479
+ def _valid_envelope(self, value):
1480
+ """Tweak the envelope description values."""
1481
+ for e in value:
1482
+ if not ('nn' in e and 'nnp' in e):
1483
+ raise MpiException("OpenMPI/mpirun needs a nn/nnp specification " +
1484
+ "to build the envelope.")
1485
+
1486
+ def _hook_binary_mpiopts(self, binary, options):
1487
+ openmp = options.pop('openmp', None)
1488
+ if openmp is not None:
1489
+ options['xopenmp'] = 'OMP_NUM_THREADS={:d}'.format(openmp)
1490
+ return options
1491
+
1492
+ def _simple_mkcmdline(self, cmdl):
1493
+ """Builds the MPI command line when no envelope is used.
1494
+
1495
+ :param list[str] cmdl: the command line as a list
1496
+ """
1497
+ if self.bindingmethod is not None:
1498
+ raise RuntimeError('If bindingmethod is set, an enveloppe should allways be used.')
1499
+ super()._simple_mkcmdline(cmdl)
1500
+
1501
+ def _create_rankfile(self, rankslist, nodeslist, slotslist):
1502
+ rf_strings = []
1503
+
1504
+ def _dump_slot_string(slot_strings, s_start, s_end):
1505
+ if s_start == s_end:
1506
+ slot_strings.append('{:d}'.format(s_start))
1507
+ else:
1508
+ slot_strings.append('{:d}-{:d}'.format(s_start, s_end))
1509
+
1510
+ for rank, node, slot in zip(rankslist, nodeslist, slotslist):
1511
+ slot_strings = list()
1512
+ if slot:
1513
+ slot = sorted(slot)
1514
+ s_end = s_start = slot[0]
1515
+ for s in slot[1:]:
1516
+ if s_end + 1 == s:
1517
+ s_end = s
1518
+ else:
1519
+ _dump_slot_string(slot_strings, s_start, s_end)
1520
+ s_end = s_start = s
1521
+ _dump_slot_string(slot_strings, s_start, s_end)
1522
+ rf_strings.append(
1523
+ 'rank {:d}={:s} slot={:s}'.format(rank,
1524
+ node,
1525
+ ','.join(slot_strings))
1526
+ )
1527
+ logger.info('self.preexistingenv = {}'.format(self.preexistingenv))
1528
+ if self.preexistingenv and self.system.path.exists(self._envelope_rankfile_name):
1529
+ logger.info('envelope file found in the directory')
1530
+ else:
1531
+ if self.preexistingenv:
1532
+ logger.info('preexistingenv set to true, but no envelope file found')
1533
+ logger.info('Using vortex computed one')
1534
+ logger.debug('Here is the rankfile content:\n%s', '\n'.join(rf_strings))
1535
+ with open(self._envelope_rankfile_name, mode='w') as tmp_rf:
1536
+ tmp_rf.write('\n'.join(rf_strings))
1537
+ return self._envelope_rankfile_name
1538
+
1539
+ def _envelope_nodelist(self):
1540
+ """Create the relative nodelist based on the envelope"""
1541
+ base_nodelist = []
1542
+ totalnodes = 0
1543
+ for e_bit in self.envelope:
1544
+ for i_node in range(e_bit.options['nn']):
1545
+ logger.debug('Node #%5d is: +n%d', i_node, totalnodes)
1546
+ base_nodelist.extend(['+n{:d}'.format(totalnodes), ] *
1547
+ e_bit.options['nnp'])
1548
+ totalnodes += 1
1549
+ return base_nodelist
1550
+
1551
+ def _envelope_mkcmdline(self, cmdl):
1552
+ """Builds the MPI command line when an envelope is used.
1553
+
1554
+ :param list[str] args: the command line as a list
1555
+ """
1556
+ cmdl.append(self.optprefix + 'oversubscribe')
1557
+ if self.bindingmethod in (None, 'native'):
1558
+ # Generate the dictionary that associate rank numbers and programs
1559
+ todostack, ranks_bsize = self._envelope_mkwrapper_todostack()
1560
+ # Generate the binding stuff
1561
+ bindingstack, _ = self._envelope_mkwrapper_bindingstack(ranks_bsize)
1562
+ # Generate a relative nodelist
1563
+ base_nodelist = self._envelope_nodelist()
1564
+ # Generate the rankfile
1565
+ ranks = sorted(todostack)
1566
+ nodes = [base_nodelist[self._ranks_mapping[r]] for r in ranks]
1567
+ if bindingstack:
1568
+ slots = [bindingstack[r] for r in ranks]
1569
+ else:
1570
+ slots = [sorted(self.system.cpus_info.cpus.keys()), ] * len(ranks)
1571
+ rfile = self._create_rankfile(ranks, nodes, slots)
1572
+ # Add the rankfile on the command line
1573
+ cmdl.append(self.optprefix + 'rankfile')
1574
+ cmdl.append(rfile)
1575
+ # Add the "usual" call to binaries and setup OMP_NUM_THREADS values
1576
+ wrapstd = self._wrapstd_mkwrapper()
1577
+ for i_bin, a_bin in enumerate(self.binaries):
1578
+ if i_bin > 0:
1579
+ cmdl.append(self.binsep)
1580
+ openmp = a_bin.options.get('openmp', None)
1581
+ if openmp:
1582
+ cmdl.append(self.optprefix + 'x')
1583
+ cmdl.append('OMP_NUM_THREADS={!s}'.format(openmp))
1584
+ cmdl.append(self.optprefix + 'np')
1585
+ cmdl.append(str(a_bin.nprocs))
1586
+ if wrapstd:
1587
+ cmdl.append(wrapstd)
1588
+ cmdl.append(a_bin.master)
1589
+ cmdl.extend(a_bin.arguments)
1590
+ else:
1591
+ # Generate a host file but let vortex deal with the rest...
1592
+ base_nodelist = self._envelope_nodelist()
1593
+ ranks = list(range(len(base_nodelist)))
1594
+ rfile = self._create_rankfile(ranks,
1595
+ [base_nodelist[self._ranks_mapping[r]] for r in ranks],
1596
+ [sorted(self.system.cpus_info.cpus.keys()), ]
1597
+ * len(base_nodelist))
1598
+ # Generate wrappers
1599
+ self._envelope_mkwrapper(cmdl)
1600
+ wrapstd = self._wrapstd_mkwrapper()
1601
+ # Update the command line
1602
+ cmdl.append(self.optprefix + 'rankfile')
1603
+ cmdl.append(rfile)
1604
+ cmdl.append(self.optprefix + 'np')
1605
+ cmdl.append(str(len(base_nodelist)))
1606
+ if wrapstd:
1607
+ cmdl.append(wrapstd)
1608
+ cmdl.append(self.envelope[0].master)
1609
+
1610
+ def clean(self, opts=None):
1611
+ """post-execution cleaning."""
1612
+ super().clean(opts)
1613
+ if self.envelope:
1614
+ self.system.remove(self._envelope_rankfile_name)
1615
+
1616
+ def setup_environment(self, opts):
1617
+ """Tweak the environment with some srun specific settings."""
1618
+ super().setup_environment(opts)
1619
+ if self.bindingmethod == 'native' and 'OMP_PROC_BIND' not in self.env:
1620
+ self._logged_env_set('OMP_PROC_BIND', 'true')
1621
+ for libpath in self._mpilib_identification()[2]:
1622
+ logger.info('Adding "%s" to LD_LIBRARY_PATH', libpath)
1623
+ self.env.setgenericpath('LD_LIBRARY_PATH', libpath)
1624
+
1625
+
1626
+ class OmpiMpiRunDDT(OmpiMpiRun):
1627
+ """SLURM's srun launcher with ARM's DDT."""
1628
+
1629
+ _footprint = dict(
1630
+ attr = dict(
1631
+ mpiname = dict(
1632
+ values = ['openmpi-ddt', ],
1633
+ ),
1634
+ )
1635
+ )
1636
+
1637
+ _conf_suffix = '-ddt'
1638
+
1639
+ def mkcmdline(self):
1640
+ """Add the DDT prefix command to the command line"""
1641
+ cmdl = super(OmpiMpiRun, self).mkcmdline()
1642
+ armtool = ArmForgeTool(self.ticket)
1643
+ for extra_c in reversed(armtool.ddt_prefix_cmd(
1644
+ sources=self.sources,
1645
+ workdir=self.system.path.dirname(self.binaries[0].master)
1646
+ )):
1647
+ cmdl.insert(0, extra_c)
1648
+ return cmdl