vortex-nwp 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. vortex/__init__.py +159 -0
  2. vortex/algo/__init__.py +13 -0
  3. vortex/algo/components.py +2462 -0
  4. vortex/algo/mpitools.py +1953 -0
  5. vortex/algo/mpitools_templates/__init__.py +1 -0
  6. vortex/algo/mpitools_templates/envelope_wrapper_default.tpl +27 -0
  7. vortex/algo/mpitools_templates/envelope_wrapper_mpiauto.tpl +29 -0
  8. vortex/algo/mpitools_templates/wrapstd_wrapper_default.tpl +18 -0
  9. vortex/algo/serversynctools.py +171 -0
  10. vortex/config.py +112 -0
  11. vortex/data/__init__.py +19 -0
  12. vortex/data/abstractstores.py +1510 -0
  13. vortex/data/containers.py +835 -0
  14. vortex/data/contents.py +622 -0
  15. vortex/data/executables.py +275 -0
  16. vortex/data/flow.py +119 -0
  17. vortex/data/geometries.ini +2689 -0
  18. vortex/data/geometries.py +799 -0
  19. vortex/data/handlers.py +1230 -0
  20. vortex/data/outflow.py +67 -0
  21. vortex/data/providers.py +487 -0
  22. vortex/data/resources.py +207 -0
  23. vortex/data/stores.py +1390 -0
  24. vortex/data/sync_templates/__init__.py +0 -0
  25. vortex/gloves.py +309 -0
  26. vortex/layout/__init__.py +20 -0
  27. vortex/layout/contexts.py +577 -0
  28. vortex/layout/dataflow.py +1220 -0
  29. vortex/layout/monitor.py +969 -0
  30. vortex/nwp/__init__.py +14 -0
  31. vortex/nwp/algo/__init__.py +21 -0
  32. vortex/nwp/algo/assim.py +537 -0
  33. vortex/nwp/algo/clim.py +1086 -0
  34. vortex/nwp/algo/coupling.py +831 -0
  35. vortex/nwp/algo/eda.py +840 -0
  36. vortex/nwp/algo/eps.py +785 -0
  37. vortex/nwp/algo/forecasts.py +886 -0
  38. vortex/nwp/algo/fpserver.py +1303 -0
  39. vortex/nwp/algo/ifsnaming.py +463 -0
  40. vortex/nwp/algo/ifsroot.py +404 -0
  41. vortex/nwp/algo/monitoring.py +263 -0
  42. vortex/nwp/algo/mpitools.py +694 -0
  43. vortex/nwp/algo/odbtools.py +1258 -0
  44. vortex/nwp/algo/oopsroot.py +916 -0
  45. vortex/nwp/algo/oopstests.py +220 -0
  46. vortex/nwp/algo/request.py +660 -0
  47. vortex/nwp/algo/stdpost.py +1641 -0
  48. vortex/nwp/data/__init__.py +30 -0
  49. vortex/nwp/data/assim.py +380 -0
  50. vortex/nwp/data/boundaries.py +314 -0
  51. vortex/nwp/data/climfiles.py +521 -0
  52. vortex/nwp/data/configfiles.py +153 -0
  53. vortex/nwp/data/consts.py +954 -0
  54. vortex/nwp/data/ctpini.py +149 -0
  55. vortex/nwp/data/diagnostics.py +209 -0
  56. vortex/nwp/data/eda.py +147 -0
  57. vortex/nwp/data/eps.py +432 -0
  58. vortex/nwp/data/executables.py +1045 -0
  59. vortex/nwp/data/fields.py +111 -0
  60. vortex/nwp/data/gridfiles.py +380 -0
  61. vortex/nwp/data/logs.py +584 -0
  62. vortex/nwp/data/modelstates.py +363 -0
  63. vortex/nwp/data/monitoring.py +193 -0
  64. vortex/nwp/data/namelists.py +696 -0
  65. vortex/nwp/data/obs.py +840 -0
  66. vortex/nwp/data/oopsexec.py +74 -0
  67. vortex/nwp/data/providers.py +207 -0
  68. vortex/nwp/data/query.py +206 -0
  69. vortex/nwp/data/stores.py +160 -0
  70. vortex/nwp/data/surfex.py +337 -0
  71. vortex/nwp/syntax/__init__.py +9 -0
  72. vortex/nwp/syntax/stdattrs.py +437 -0
  73. vortex/nwp/tools/__init__.py +10 -0
  74. vortex/nwp/tools/addons.py +40 -0
  75. vortex/nwp/tools/agt.py +67 -0
  76. vortex/nwp/tools/bdap.py +59 -0
  77. vortex/nwp/tools/bdcp.py +41 -0
  78. vortex/nwp/tools/bdm.py +24 -0
  79. vortex/nwp/tools/bdmp.py +54 -0
  80. vortex/nwp/tools/conftools.py +1661 -0
  81. vortex/nwp/tools/drhook.py +66 -0
  82. vortex/nwp/tools/grib.py +294 -0
  83. vortex/nwp/tools/gribdiff.py +104 -0
  84. vortex/nwp/tools/ifstools.py +203 -0
  85. vortex/nwp/tools/igastuff.py +273 -0
  86. vortex/nwp/tools/mars.py +68 -0
  87. vortex/nwp/tools/odb.py +657 -0
  88. vortex/nwp/tools/partitioning.py +258 -0
  89. vortex/nwp/tools/satrad.py +71 -0
  90. vortex/nwp/util/__init__.py +6 -0
  91. vortex/nwp/util/async.py +212 -0
  92. vortex/nwp/util/beacon.py +40 -0
  93. vortex/nwp/util/diffpygram.py +447 -0
  94. vortex/nwp/util/ens.py +279 -0
  95. vortex/nwp/util/hooks.py +139 -0
  96. vortex/nwp/util/taskdeco.py +85 -0
  97. vortex/nwp/util/usepygram.py +697 -0
  98. vortex/nwp/util/usetnt.py +101 -0
  99. vortex/proxy.py +6 -0
  100. vortex/sessions.py +374 -0
  101. vortex/syntax/__init__.py +9 -0
  102. vortex/syntax/stdattrs.py +867 -0
  103. vortex/syntax/stddeco.py +185 -0
  104. vortex/toolbox.py +1117 -0
  105. vortex/tools/__init__.py +20 -0
  106. vortex/tools/actions.py +523 -0
  107. vortex/tools/addons.py +316 -0
  108. vortex/tools/arm.py +96 -0
  109. vortex/tools/compression.py +325 -0
  110. vortex/tools/date.py +27 -0
  111. vortex/tools/ddhpack.py +10 -0
  112. vortex/tools/delayedactions.py +782 -0
  113. vortex/tools/env.py +541 -0
  114. vortex/tools/folder.py +834 -0
  115. vortex/tools/grib.py +738 -0
  116. vortex/tools/lfi.py +953 -0
  117. vortex/tools/listings.py +423 -0
  118. vortex/tools/names.py +637 -0
  119. vortex/tools/net.py +2124 -0
  120. vortex/tools/odb.py +10 -0
  121. vortex/tools/parallelism.py +368 -0
  122. vortex/tools/prestaging.py +210 -0
  123. vortex/tools/rawfiles.py +10 -0
  124. vortex/tools/schedulers.py +480 -0
  125. vortex/tools/services.py +940 -0
  126. vortex/tools/storage.py +996 -0
  127. vortex/tools/surfex.py +61 -0
  128. vortex/tools/systems.py +3976 -0
  129. vortex/tools/targets.py +440 -0
  130. vortex/util/__init__.py +9 -0
  131. vortex/util/config.py +1122 -0
  132. vortex/util/empty.py +24 -0
  133. vortex/util/helpers.py +216 -0
  134. vortex/util/introspection.py +69 -0
  135. vortex/util/iosponge.py +80 -0
  136. vortex/util/roles.py +49 -0
  137. vortex/util/storefunctions.py +129 -0
  138. vortex/util/structs.py +26 -0
  139. vortex/util/worker.py +162 -0
  140. vortex_nwp-2.0.0.dist-info/METADATA +67 -0
  141. vortex_nwp-2.0.0.dist-info/RECORD +144 -0
  142. vortex_nwp-2.0.0.dist-info/WHEEL +5 -0
  143. vortex_nwp-2.0.0.dist-info/licenses/LICENSE +517 -0
  144. vortex_nwp-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,694 @@
1
+ """
2
+ General interest and NWP specific MPI launchers.
3
+ """
4
+
5
+ import collections
6
+ import re
7
+ import math
8
+
9
+ from bronx.fancies import loggers
10
+ from bronx.syntax.iterators import interleave
11
+ import footprints
12
+
13
+ from vortex.algo import mpitools
14
+ from vortex.syntax.stdattrs import DelayedEnvValue
15
+ from vortex.tools.arm import ArmForgeTool
16
+ from ..tools.partitioning import setup_partitioning_in_namelist
17
+
18
+ #: No automatic export
19
+ __all__ = []
20
+
21
+ logger = loggers.getLogger(__name__)
22
+
23
+
24
+ class MpiAuto(mpitools.MpiTool):
25
+ """MpiTools that uses mpiauto as a proxy to several MPI implementations"""
26
+
27
+ _footprint = dict(
28
+ attr=dict(
29
+ mpiname=dict(
30
+ values=[
31
+ "mpiauto",
32
+ ],
33
+ ),
34
+ mpiopts=dict(default=None),
35
+ optprefix=dict(default="--"),
36
+ optmap=dict(
37
+ default=footprints.FPDict(
38
+ nn="nn",
39
+ nnp="nnp",
40
+ openmp="openmp",
41
+ np="np",
42
+ prefixcommand="prefix-command",
43
+ allowodddist="mpi-allow-odd-dist",
44
+ )
45
+ ),
46
+ timeoutrestart=dict(
47
+ info="The number of attempts made by mpiauto",
48
+ optional=True,
49
+ default=DelayedEnvValue("MPI_INIT_TIMEOUT_RESTART", 2),
50
+ doc_visibility=footprints.doc.visibility.ADVANCED,
51
+ doc_zorder=-90,
52
+ ),
53
+ sublauncher=dict(
54
+ info="How to actualy launch the MPI program",
55
+ values=["srun", "libspecific"],
56
+ optional=True,
57
+ doc_visibility=footprints.doc.visibility.ADVANCED,
58
+ doc_zorder=-90,
59
+ ),
60
+ mpiwrapstd=dict(
61
+ values=[
62
+ False,
63
+ ],
64
+ ),
65
+ bindingmethod=dict(
66
+ info="How to bind the MPI processes",
67
+ values=["vortex", "arch", "launcherspecific"],
68
+ optional=True,
69
+ doc_visibility=footprints.doc.visibility.ADVANCED,
70
+ doc_zorder=-90,
71
+ ),
72
+ mplbased=dict(
73
+ info="Is the executable based on MPL?",
74
+ type=bool,
75
+ optional=True,
76
+ default=False,
77
+ ),
78
+ )
79
+ )
80
+
81
+ _envelope_wrapper_tpl = "envelope_wrapper_mpiauto.tpl"
82
+ _envelope_rank_var = "MPIAUTORANK"
83
+ _needs_mpilib_specific_mpienv = False
84
+
85
+ def __init__(self, *args, **kwargs):
86
+ super().__init__(*args, **kwargs)
87
+ self.bindingmethod = "arch" if self.mplbased else "vortex"
88
+
89
+ def _reshaped_mpiopts(self):
90
+ """Raw list of mpi tool command line options."""
91
+ options = super()._reshaped_mpiopts()
92
+ options["init-timeout-restart"] = [(self.timeoutrestart,)]
93
+ if self.sublauncher == "srun":
94
+ options["use-slurm-mpi"] = [()]
95
+ elif self.sublauncher == "libspecific":
96
+ options["no-use-slurm-mpi"] = [()]
97
+ if self.bindingmethod:
98
+ for k in [
99
+ "{:s}use-{:s}-bind".format(p, t)
100
+ for p in ("", "no-")
101
+ for t in ("arch", "slurm", "intelmpi", "openmpi")
102
+ ]:
103
+ options.pop(k, None)
104
+ if self.bindingmethod == "arch":
105
+ options["use-arch-bind"] = [()]
106
+ elif (
107
+ self.bindingmethod == "launcherspecific"
108
+ and self.sublauncher == "srun"
109
+ ):
110
+ options["no-use-arch-bind"] = [()]
111
+ options["use-slurm-bind"] = [()]
112
+ elif self.bindingmethod == "launcherspecific":
113
+ options["no-use-arch-bind"] = [()]
114
+ for k in [
115
+ "use-{:s}-bind".format(t)
116
+ for t in ("slurm", "intelmpi", "openmpi")
117
+ ]:
118
+ options[k] = [()]
119
+ elif self.bindingmethod == "vortex":
120
+ options["no-use-arch-bind"] = [()]
121
+ return options
122
+
123
+ def _envelope_fix_envelope_bit(self, e_bit, e_desc):
124
+ """Set the envelope fake binary options."""
125
+ e_bit.options = {
126
+ k: v for k, v in e_desc.items() if k not in ("openmp",)
127
+ }
128
+ e_bit.options["prefixcommand"] = self._envelope_wrapper_name
129
+ if self.binaries:
130
+ e_bit.master = self.binaries[0].master
131
+
132
+ def _set_binaries_hack(self, binaries):
133
+ """Set the list of :class:`MpiBinaryDescription` objects associated with this instance."""
134
+ if len(binaries) > 1 and self.bindingmethod not in (
135
+ None,
136
+ "arch",
137
+ "vortex",
138
+ ):
139
+ logger.info(
140
+ "The '{:s}' binding method is not working properly with multiple binaries.".format(
141
+ self.bindingmethod
142
+ )
143
+ )
144
+ logger.warning("Resetting the binding method to 'vortex'.")
145
+ self.bindingmethod = "vortex"
146
+
147
+ def _set_binaries_envelope_hack(self, binaries):
148
+ """Tweak the envelope after binaries were setup."""
149
+ super()._set_binaries_envelope_hack(binaries)
150
+ for e_bit in self.envelope:
151
+ e_bit.master = binaries[0].master
152
+
153
+ def _set_envelope(self, value):
154
+ """Set the envelope description."""
155
+ super()._set_envelope(value)
156
+ if len(self._envelope) > 1 and self.bindingmethod not in (
157
+ None,
158
+ "arch",
159
+ "vortex",
160
+ ):
161
+ logger.info(
162
+ "The '{:s}' binding method is not working properly with complex envelopes.".format(
163
+ self.bindingmethod
164
+ )
165
+ )
166
+ logger.warning("Resetting the binding method to 'vortex'.")
167
+ self.bindingmethod = "vortex"
168
+
169
+ envelope = property(mpitools.MpiTool._get_envelope, _set_envelope)
170
+
171
+ def _hook_binary_mpiopts(self, binary, options):
172
+ tuned = options.copy()
173
+ # Regular MPI tasks count (the usual...)
174
+ if "nnp" in options and "nn" in options:
175
+ if options["nn"] * options["nnp"] == options["np"]:
176
+ # Remove harmful options
177
+ del tuned["np"]
178
+ tuned.pop("allowodddist", None)
179
+ # that's the strange MPI distribution...
180
+ else:
181
+ tuned["allowodddist"] = (
182
+ None # With this, let mpiauto determine its own partitioning
183
+ )
184
+ else:
185
+ msg = "The provided mpiopts are insufficient to build the command line: {!s}".format(
186
+ options
187
+ )
188
+ raise mpitools.MpiException(msg)
189
+ return tuned
190
+
191
+ def _envelope_mkwrapper_todostack(self):
192
+ ranksidx = 0
193
+ todostack, ranks_bsize = super()._envelope_mkwrapper_todostack()
194
+ for bin_obj in self.binaries:
195
+ if bin_obj.options:
196
+ for mpirank in range(ranksidx, ranksidx + bin_obj.nprocs):
197
+ prefix_c = bin_obj.options.get("prefixcommand", None)
198
+ if prefix_c:
199
+ todostack[mpirank] = (
200
+ prefix_c,
201
+ [
202
+ todostack[mpirank][0],
203
+ ]
204
+ + todostack[mpirank][1],
205
+ todostack[mpirank][2],
206
+ )
207
+ ranksidx += bin_obj.nprocs
208
+ return todostack, ranks_bsize
209
+
210
+ def _envelope_mkcmdline_extra(self, cmdl):
211
+ """If possible, add an openmp option when the arch binding method is used."""
212
+
213
+ if self.bindingmethod != "vortex":
214
+ openmps = {b.options.get("openmp", None) for b in self.binaries}
215
+ if len(openmps) > 1:
216
+ if self.bindingmethod is not None:
217
+ logger.warning(
218
+ "Non-uniform OpenMP threads number... Not specifying anything."
219
+ )
220
+ else:
221
+ openmp = openmps.pop() or 1
222
+ cmdl.append(self.optprefix + self.optmap["openmp"])
223
+ cmdl.append(str(openmp))
224
+
225
+ def setup_environment(self, opts):
226
+ """Last minute fixups."""
227
+ super().setup_environment(opts)
228
+ if self.bindingmethod in ("arch", "vortex"):
229
+ # Make sure srun does nothing !
230
+ self._logged_env_set("SLURM_CPU_BIND", "none")
231
+
232
+ def setup(self, opts=None):
233
+ """Ensure that the prefixcommand has the execution rights."""
234
+ for bin_obj in self.binaries:
235
+ prefix_c = bin_obj.options.get("prefixcommand", None)
236
+ if prefix_c is not None:
237
+ if self.system.path.exists(prefix_c):
238
+ self.system.xperm(prefix_c, force=True)
239
+ else:
240
+ raise OSError("The prefixcommand do not exists.")
241
+ super().setup(opts)
242
+
243
+
244
+ class MpiAutoDDT(MpiAuto):
245
+ """
246
+ MpiTools that uses mpiauto as a proxy to several MPI implementations
247
+ with DDT support.
248
+ """
249
+
250
+ _footprint = dict(
251
+ attr=dict(
252
+ mpiname=dict(
253
+ values=[
254
+ "mpiauto-ddt",
255
+ ],
256
+ ),
257
+ )
258
+ )
259
+
260
+ _conf_suffix = "-ddt"
261
+
262
+ def _reshaped_mpiopts(self):
263
+ options = super()._reshaped_mpiopts()
264
+ if "prefix-mpirun" in options:
265
+ raise mpitools.MpiException(
266
+ "It is not allowed to start DDT with another "
267
+ + 'prefix_mpirun command defined: "{:s}"'.format(options)
268
+ )
269
+ armtool = ArmForgeTool(self.ticket)
270
+ options["prefix-mpirun"] = [
271
+ (
272
+ " ".join(
273
+ armtool.ddt_prefix_cmd(
274
+ sources=self.sources,
275
+ workdir=self.system.path.dirname(
276
+ self.binaries[0].master
277
+ ),
278
+ )
279
+ ),
280
+ )
281
+ ]
282
+ return options
283
+
284
+
285
+ # Some IFS/Arpege specific things :
286
+
287
+
288
+ def arpifs_obsort_nprocab_binarydeco(cls):
289
+ """Handle usual IFS/Arpege environment tweaking for OBSORT (nproca & nprocb).
290
+
291
+ Note: This is a class decorator for class somehow based on MpiBinaryDescription
292
+ """
293
+ orig_setup_env = getattr(cls, "setup_environment")
294
+
295
+ def setup_environment(self, opts):
296
+ orig_setup_env(self, opts)
297
+ self.env.NPROCA = int(self.env.NPROCA or self.nprocs)
298
+ self.env.NPROCB = int(
299
+ self.env.NPROCB or self.nprocs // self.env.NPROCA
300
+ )
301
+ logger.info(
302
+ "MPI Setup NPROCA=%d and NPROCB=%d",
303
+ self.env.NPROCA,
304
+ self.env.NPROCB,
305
+ )
306
+
307
+ if hasattr(orig_setup_env, "__doc__"):
308
+ setup_environment.__doc__ = orig_setup_env.__doc__
309
+
310
+ setattr(cls, "setup_environment", setup_environment)
311
+ return cls
312
+
313
+
314
+ class _NWPIoServerMixin:
315
+ _NWP_IOSERV_PATTERNS = ("io_serv.*.d",)
316
+
317
+ def _nwp_ioserv_setup_namelist(
318
+ self, namcontents, namlocal, total_iotasks, computed_iodist_value=None
319
+ ):
320
+ """Applying IO Server profile on local namelist ``namlocal`` with contents namcontents."""
321
+ if "NAMIO_SERV" in namcontents:
322
+ namio = namcontents["NAMIO_SERV"]
323
+ else:
324
+ namio = namcontents.newblock("NAMIO_SERV")
325
+
326
+ namio.nproc_io = total_iotasks
327
+ if computed_iodist_value is not None:
328
+ namio.idistio = computed_iodist_value
329
+
330
+ if "VORTEX_IOSERVER_METHOD" in self.env:
331
+ namio.nio_serv_method = self.env.VORTEX_IOSERVER_METHOD
332
+
333
+ if "VORTEX_IOSERVER_BUFMAX" in self.env:
334
+ namio.nio_serv_buf_maxsize = self.env.VORTEX_IOSERVER_BUFMAX
335
+
336
+ if "VORTEX_IOSERVER_MLSERVER" in self.env:
337
+ namio.nmsg_level_server = self.env.VORTEX_IOSERVER_MLSERVER
338
+
339
+ if "VORTEX_IOSERVER_MLCLIENT" in self.env:
340
+ namio.nmsg_level_client = self.env.VORTEX_IOSERVER_MLCLIENT
341
+
342
+ if "VORTEX_IOSERVER_PROCESS" in self.env:
343
+ namio.nprocess_level = self.env.VORTEX_IOSERVER_PROCESS
344
+
345
+ if "VORTEX_IOSERVER_PIOMODEL" in self.env:
346
+ namio.pioprocr_MDL = self.env.VORTEX_IOSERVER_PIOMODEL
347
+
348
+ self.system.highlight(
349
+ "Parallel io server namelist for {:s}".format(namlocal)
350
+ )
351
+ print(namio.dumps())
352
+
353
+ return True
354
+
355
+ def _nwp_ioserv_iodirs(self):
356
+ """Return an ordered list of directories matching the ``pattern`` attribute."""
357
+ found = []
358
+ for pattern in self._NWP_IOSERV_PATTERNS:
359
+ found.extend(self.system.glob(pattern))
360
+ return sorted(found)
361
+
362
+ def _nwp_ioserv_clean(self):
363
+ """Post-execution cleaning for io server."""
364
+
365
+ # Old fashion way to make clear that some polling is needed.
366
+ self.system.touch("io_poll.todo")
367
+
368
+ # Get a look inside io server output directories according to its own pattern
369
+ ioserv_filelist = set()
370
+ ioserv_prefixes = set()
371
+ iofile_re = re.compile(
372
+ r"((ICMSH|PF|GRIBPF).*\+\d+(?::\d+)?(?:\.sfx)?)(?:\..+)?$"
373
+ )
374
+ self.system.highlight("Dealing with IO directories")
375
+ iodirs = self._nwp_ioserv_iodirs()
376
+ if iodirs:
377
+ logger.info("List of IO directories: %s", ",".join(iodirs))
378
+ f_summary = collections.defaultdict(lambda: [" "] * len(iodirs))
379
+ for i, iodir in enumerate(iodirs):
380
+ for iofile in self.system.listdir(iodir):
381
+ zf = iofile_re.match(iofile)
382
+ if zf:
383
+ f_summary[zf.group(1)][i] = "+"
384
+ ioserv_filelist.add((zf.group(1), zf.group(2)))
385
+ ioserv_prefixes.add(zf.group(2))
386
+ else:
387
+ f_summary[iofile][i] = "?"
388
+ max_names_len = max([len(iofile) for iofile in f_summary.keys()])
389
+ fmt_names = "{:" + str(max_names_len) + "s}"
390
+ logger.info(
391
+ "Data location accross the various IOserver directories:\n%s",
392
+ "\n".join(
393
+ [
394
+ (fmt_names + " |{:s}|").format(iofile, "".join(where))
395
+ for iofile, where in sorted(f_summary.items())
396
+ ]
397
+ ),
398
+ )
399
+ else:
400
+ logger.info("No IO directories were found")
401
+
402
+ if "GRIBPF" in ioserv_prefixes:
403
+ # If GRIB are requested, do not bother with old FA PF files
404
+ ioserv_prefixes.discard("PF")
405
+ ioserv_filelist = {(f, p) for f, p in ioserv_filelist if p != "PF"}
406
+
407
+ # Touch the output files
408
+ for tgfile, _ in ioserv_filelist:
409
+ self.system.touch(tgfile)
410
+
411
+ # Touch the io_poll.todo.PREFIX
412
+ for prefix in ioserv_prefixes:
413
+ self.system.touch("io_poll.todo.{:s}".format(prefix))
414
+
415
+
416
+ class _AbstractMpiNWP(mpitools.MpiBinaryBasic, _NWPIoServerMixin):
417
+ """The kind of binaries used in IFS/Arpege."""
418
+
419
+ _abstract = True
420
+
421
+ def __init__(self, *kargs, **kwargs):
422
+ super().__init__(*kargs, **kwargs)
423
+ self._incore_iotasks = None
424
+ self._effective_incore_iotasks = None
425
+ self._incore_iotasks_fixer = None
426
+ self._incore_iodist = None
427
+
428
+ @property
429
+ def incore_iotasks(self):
430
+ """The number of tasks dedicated to the IO server."""
431
+ return self._incore_iotasks
432
+
433
+ @incore_iotasks.setter
434
+ def incore_iotasks(self, value):
435
+ """The number of tasks dedicated to the IO server."""
436
+ if isinstance(value, str) and value.endswith("%"):
437
+ value = math.ceil(self.nprocs * float(value[:-1]) / 100)
438
+ self._incore_iotasks = int(value)
439
+ self._effective_incore_iotasks = None
440
+
441
+ @property
442
+ def incore_iotasks_fixer(self):
443
+ """Tweak the number of iotasks in order to respect a given constraints."""
444
+ return self._incore_iotasks_fixer
445
+
446
+ @incore_iotasks_fixer.setter
447
+ def incore_iotasks_fixer(self, value):
448
+ """Tweak the number of iotasks in order to respect a given constraints."""
449
+ if not isinstance(value, str):
450
+ raise ValueError("A string is expected")
451
+ if value.startswith("nproc_multiple_of_"):
452
+ self._incore_iotasks_fixer = (
453
+ "nproc_multiple_of",
454
+ [int(i) for i in value[18:].split(",")],
455
+ )
456
+ else:
457
+ raise ValueError('The "{:s}" value is incorrect'.format(value))
458
+
459
+ @property
460
+ def effective_incore_iotasks(self):
461
+ """Apply fixers to incore_iotasks and return this value.
462
+
463
+ e.g. "nproc_multiple_of_15,16,17" ensure that the number of processes
464
+ dedicated to computations (i.e. total number of process - IO processes)
465
+ is a multiple of 15, 16 or 17.
466
+ """
467
+ if self.incore_iotasks is not None:
468
+ if self._effective_incore_iotasks is None:
469
+ if self.incore_iotasks_fixer is not None:
470
+ if self.incore_iotasks_fixer[0] == "nproc_multiple_of":
471
+ # Allow for 5% less, or add some tasks
472
+ for candidate in interleave(
473
+ range(self.incore_iotasks, self.nprocs + 1),
474
+ range(
475
+ self.incore_iotasks - 1,
476
+ int(math.ceil(0.95 * self.incore_iotasks)) - 1,
477
+ -1,
478
+ ),
479
+ ):
480
+ if any(
481
+ [
482
+ (self.nprocs - candidate) % multiple == 0
483
+ for multiple in self.incore_iotasks_fixer[
484
+ 1
485
+ ]
486
+ ]
487
+ ):
488
+ self._effective_incore_iotasks = candidate
489
+ break
490
+ else:
491
+ raise RuntimeError("Unsupported fixer")
492
+ if self._effective_incore_iotasks != self.incore_iotasks:
493
+ logger.info(
494
+ "The number of IO tasks was updated form %d to %d "
495
+ + 'because of the "%s" fixer',
496
+ self.incore_iotasks,
497
+ self._effective_incore_iotasks,
498
+ self.incore_iotasks_fixer[0],
499
+ )
500
+ else:
501
+ self._effective_incore_iotasks = self.incore_iotasks
502
+ return self._effective_incore_iotasks
503
+ else:
504
+ return None
505
+
506
+ @property
507
+ def incore_iodist(self):
508
+ """How to distribute IO server tasks within model tasks."""
509
+ return self._incore_iodist
510
+
511
+ @incore_iodist.setter
512
+ def incore_iodist(self, value):
513
+ """How to distribute IO server tasks within model tasks."""
514
+ allowed = (
515
+ "begining",
516
+ "end",
517
+ "scattered",
518
+ )
519
+ if not (isinstance(value, str) and value in allowed):
520
+ raise ValueError(
521
+ "'{!s}' is not an allowed value ('{:s}')".format(
522
+ value, ", ".join(allowed)
523
+ )
524
+ )
525
+ self._incore_iodist = value
526
+
527
+ def _set_nam_macro(self, namcontents, namlocal, macro, value):
528
+ """Set a namelist macro and log it!"""
529
+ namcontents.setmacro(macro, value)
530
+ logger.info("Setup macro %s=%s in %s", macro, str(value), namlocal)
531
+
532
+ def setup_namelist_delta(self, namcontents, namlocal):
533
+ """Applying MPI profile on local namelist ``namlocal`` with contents namcontents."""
534
+ namw = False
535
+ # List of macros actually used in the namelist
536
+ nam_macros = set()
537
+ for nam_block in namcontents.values():
538
+ nam_macros.update(nam_block.macros())
539
+ # The actual number of tasks involved in computations
540
+ effective_nprocs = self.nprocs
541
+ if self.effective_incore_iotasks is not None:
542
+ effective_nprocs -= self.effective_incore_iotasks
543
+ # Set up the effective_nprocs related macros
544
+ nprocs_macros = ("NPROC", "NBPROC", "NTASKS")
545
+ if any([n in nam_macros for n in nprocs_macros]):
546
+ for n in nprocs_macros:
547
+ self._set_nam_macro(namcontents, namlocal, n, effective_nprocs)
548
+ namw = True
549
+ if any([n in nam_macros for n in ("NCPROC", "NDPROC")]):
550
+ self._set_nam_macro(
551
+ namcontents,
552
+ namlocal,
553
+ "NCPROC",
554
+ int(self.env.VORTEX_NPRGPNS or effective_nprocs),
555
+ )
556
+ self._set_nam_macro(
557
+ namcontents,
558
+ namlocal,
559
+ "NDPROC",
560
+ int(self.env.VORTEX_NPRGPEW or 1),
561
+ )
562
+ namw = True
563
+ if "NAMPAR1" in namcontents:
564
+ np1 = namcontents["NAMPAR1"]
565
+ for nstr in [x for x in ("NSTRIN", "NSTROUT") if x in np1]:
566
+ if (
567
+ isinstance(np1[nstr], (int, float))
568
+ and np1[nstr] > effective_nprocs
569
+ ):
570
+ logger.info(
571
+ "Setup %s=%s in NAMPAR1 %s",
572
+ nstr,
573
+ effective_nprocs,
574
+ namlocal,
575
+ )
576
+ np1[nstr] = effective_nprocs
577
+ namw = True
578
+ # Deal with partitioning macros
579
+ namw_p = setup_partitioning_in_namelist(
580
+ namcontents,
581
+ effective_nprocs,
582
+ self.options.get("openmp", 1),
583
+ namlocal,
584
+ )
585
+ namw = namw or namw_p
586
+ # Incore IO tasks
587
+ if self.effective_incore_iotasks is not None:
588
+ c_iodist = None
589
+ if self.incore_iodist is not None:
590
+ if self.incore_iodist == "begining":
591
+ c_iodist = -1
592
+ elif self.incore_iodist == "end":
593
+ c_iodist = 0
594
+ elif self.incore_iodist == "scattered":
595
+ # Ensure that there is at least one task on the first node
596
+ c_iodist = min(
597
+ self.nprocs // self.effective_incore_iotasks,
598
+ self.options.get("nnp", self.nprocs),
599
+ )
600
+ else:
601
+ raise RuntimeError(
602
+ "incore_iodist '{!s}' is not supported: check your code".format(
603
+ self.incore_iodist
604
+ )
605
+ )
606
+ namw_io = self._nwp_ioserv_setup_namelist(
607
+ namcontents,
608
+ namlocal,
609
+ self.effective_incore_iotasks,
610
+ computed_iodist_value=c_iodist,
611
+ )
612
+ namw = namw or namw_io
613
+ return namw
614
+
615
+ def clean(self, opts=None):
616
+ """Finalise the IO server run."""
617
+ super().clean(opts=opts)
618
+ if self.incore_iotasks:
619
+ self._nwp_ioserv_clean()
620
+
621
+
622
+ class MpiNWP(_AbstractMpiNWP):
623
+ """The kind of binaries used in IFS/Arpege."""
624
+
625
+ _footprint = dict(
626
+ attr=dict(
627
+ kind=dict(
628
+ values=[
629
+ "basicnwp",
630
+ ]
631
+ ),
632
+ ),
633
+ )
634
+
635
+
636
+ @arpifs_obsort_nprocab_binarydeco
637
+ class MpiNWPObsort(_AbstractMpiNWP):
638
+ """The kind of binaries used in IFS/Arpege when the ODB OBSSORT code needs to be run."""
639
+
640
+ _footprint = dict(
641
+ attr=dict(
642
+ kind=dict(
643
+ values=[
644
+ "basicnwpobsort",
645
+ ]
646
+ ),
647
+ ),
648
+ )
649
+
650
+
651
+ @arpifs_obsort_nprocab_binarydeco
652
+ class MpiObsort(mpitools.MpiBinaryBasic):
653
+ """The kind of binaries used when the ODB OBSSORT code needs to be run."""
654
+
655
+ _footprint = dict(
656
+ attr=dict(
657
+ kind=dict(
658
+ values=[
659
+ "basicobsort",
660
+ ]
661
+ ),
662
+ ),
663
+ )
664
+
665
+
666
+ class MpiNWPIO(mpitools.MpiBinaryIOServer, _NWPIoServerMixin):
667
+ """Standard IFS/Arpege NWP IO server."""
668
+
669
+ _footprint = dict(
670
+ attr=dict(
671
+ kind=dict(
672
+ values=[
673
+ "nwpioserv",
674
+ ]
675
+ ),
676
+ iolocation=dict(
677
+ values=[-1, 0], default=0, optional=True, type=int
678
+ ),
679
+ )
680
+ )
681
+
682
+ def setup_namelist_delta(self, namcontents, namlocal):
683
+ """Setup the IO Server."""
684
+ self._nwp_ioserv_setup_namelist(
685
+ namcontents,
686
+ namlocal,
687
+ self.nprocs,
688
+ computed_iodist_value=(-1 if self.iolocation == 0 else None),
689
+ )
690
+
691
+ def clean(self, opts=None):
692
+ """Finalise the IO server run."""
693
+ super().clean(opts=opts)
694
+ self._nwp_ioserv_clean()