vortex-nwp 2.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vortex/__init__.py +135 -0
- vortex/algo/__init__.py +12 -0
- vortex/algo/components.py +2136 -0
- vortex/algo/mpitools.py +1648 -0
- vortex/algo/mpitools_templates/envelope_wrapper_default.tpl +27 -0
- vortex/algo/mpitools_templates/envelope_wrapper_mpiauto.tpl +29 -0
- vortex/algo/mpitools_templates/wrapstd_wrapper_default.tpl +18 -0
- vortex/algo/serversynctools.py +170 -0
- vortex/config.py +115 -0
- vortex/data/__init__.py +13 -0
- vortex/data/abstractstores.py +1572 -0
- vortex/data/containers.py +780 -0
- vortex/data/contents.py +596 -0
- vortex/data/executables.py +284 -0
- vortex/data/flow.py +113 -0
- vortex/data/geometries.ini +2689 -0
- vortex/data/geometries.py +703 -0
- vortex/data/handlers.py +1021 -0
- vortex/data/outflow.py +67 -0
- vortex/data/providers.py +465 -0
- vortex/data/resources.py +201 -0
- vortex/data/stores.py +1271 -0
- vortex/gloves.py +282 -0
- vortex/layout/__init__.py +27 -0
- vortex/layout/appconf.py +109 -0
- vortex/layout/contexts.py +511 -0
- vortex/layout/dataflow.py +1069 -0
- vortex/layout/jobs.py +1276 -0
- vortex/layout/monitor.py +833 -0
- vortex/layout/nodes.py +1424 -0
- vortex/layout/subjobs.py +464 -0
- vortex/nwp/__init__.py +11 -0
- vortex/nwp/algo/__init__.py +12 -0
- vortex/nwp/algo/assim.py +483 -0
- vortex/nwp/algo/clim.py +920 -0
- vortex/nwp/algo/coupling.py +609 -0
- vortex/nwp/algo/eda.py +632 -0
- vortex/nwp/algo/eps.py +613 -0
- vortex/nwp/algo/forecasts.py +745 -0
- vortex/nwp/algo/fpserver.py +927 -0
- vortex/nwp/algo/ifsnaming.py +403 -0
- vortex/nwp/algo/ifsroot.py +311 -0
- vortex/nwp/algo/monitoring.py +202 -0
- vortex/nwp/algo/mpitools.py +554 -0
- vortex/nwp/algo/odbtools.py +974 -0
- vortex/nwp/algo/oopsroot.py +735 -0
- vortex/nwp/algo/oopstests.py +186 -0
- vortex/nwp/algo/request.py +579 -0
- vortex/nwp/algo/stdpost.py +1285 -0
- vortex/nwp/data/__init__.py +12 -0
- vortex/nwp/data/assim.py +392 -0
- vortex/nwp/data/boundaries.py +261 -0
- vortex/nwp/data/climfiles.py +539 -0
- vortex/nwp/data/configfiles.py +149 -0
- vortex/nwp/data/consts.py +929 -0
- vortex/nwp/data/ctpini.py +133 -0
- vortex/nwp/data/diagnostics.py +181 -0
- vortex/nwp/data/eda.py +148 -0
- vortex/nwp/data/eps.py +383 -0
- vortex/nwp/data/executables.py +1039 -0
- vortex/nwp/data/fields.py +96 -0
- vortex/nwp/data/gridfiles.py +308 -0
- vortex/nwp/data/logs.py +551 -0
- vortex/nwp/data/modelstates.py +334 -0
- vortex/nwp/data/monitoring.py +220 -0
- vortex/nwp/data/namelists.py +644 -0
- vortex/nwp/data/obs.py +748 -0
- vortex/nwp/data/oopsexec.py +72 -0
- vortex/nwp/data/providers.py +182 -0
- vortex/nwp/data/query.py +217 -0
- vortex/nwp/data/stores.py +147 -0
- vortex/nwp/data/surfex.py +338 -0
- vortex/nwp/syntax/__init__.py +9 -0
- vortex/nwp/syntax/stdattrs.py +375 -0
- vortex/nwp/tools/__init__.py +10 -0
- vortex/nwp/tools/addons.py +35 -0
- vortex/nwp/tools/agt.py +55 -0
- vortex/nwp/tools/bdap.py +48 -0
- vortex/nwp/tools/bdcp.py +38 -0
- vortex/nwp/tools/bdm.py +21 -0
- vortex/nwp/tools/bdmp.py +49 -0
- vortex/nwp/tools/conftools.py +1311 -0
- vortex/nwp/tools/drhook.py +62 -0
- vortex/nwp/tools/grib.py +268 -0
- vortex/nwp/tools/gribdiff.py +99 -0
- vortex/nwp/tools/ifstools.py +163 -0
- vortex/nwp/tools/igastuff.py +249 -0
- vortex/nwp/tools/mars.py +56 -0
- vortex/nwp/tools/odb.py +548 -0
- vortex/nwp/tools/partitioning.py +234 -0
- vortex/nwp/tools/satrad.py +56 -0
- vortex/nwp/util/__init__.py +6 -0
- vortex/nwp/util/async.py +184 -0
- vortex/nwp/util/beacon.py +40 -0
- vortex/nwp/util/diffpygram.py +359 -0
- vortex/nwp/util/ens.py +198 -0
- vortex/nwp/util/hooks.py +128 -0
- vortex/nwp/util/taskdeco.py +81 -0
- vortex/nwp/util/usepygram.py +591 -0
- vortex/nwp/util/usetnt.py +87 -0
- vortex/proxy.py +6 -0
- vortex/sessions.py +341 -0
- vortex/syntax/__init__.py +9 -0
- vortex/syntax/stdattrs.py +628 -0
- vortex/syntax/stddeco.py +176 -0
- vortex/toolbox.py +982 -0
- vortex/tools/__init__.py +11 -0
- vortex/tools/actions.py +457 -0
- vortex/tools/addons.py +297 -0
- vortex/tools/arm.py +76 -0
- vortex/tools/compression.py +322 -0
- vortex/tools/date.py +20 -0
- vortex/tools/ddhpack.py +10 -0
- vortex/tools/delayedactions.py +672 -0
- vortex/tools/env.py +513 -0
- vortex/tools/folder.py +663 -0
- vortex/tools/grib.py +559 -0
- vortex/tools/lfi.py +746 -0
- vortex/tools/listings.py +354 -0
- vortex/tools/names.py +575 -0
- vortex/tools/net.py +1790 -0
- vortex/tools/odb.py +10 -0
- vortex/tools/parallelism.py +336 -0
- vortex/tools/prestaging.py +186 -0
- vortex/tools/rawfiles.py +10 -0
- vortex/tools/schedulers.py +413 -0
- vortex/tools/services.py +871 -0
- vortex/tools/storage.py +1061 -0
- vortex/tools/surfex.py +61 -0
- vortex/tools/systems.py +3396 -0
- vortex/tools/targets.py +384 -0
- vortex/util/__init__.py +9 -0
- vortex/util/config.py +1071 -0
- vortex/util/empty.py +24 -0
- vortex/util/helpers.py +184 -0
- vortex/util/introspection.py +63 -0
- vortex/util/iosponge.py +76 -0
- vortex/util/roles.py +51 -0
- vortex/util/storefunctions.py +103 -0
- vortex/util/structs.py +26 -0
- vortex/util/worker.py +150 -0
- vortex_nwp-2.0.0b1.dist-info/LICENSE +517 -0
- vortex_nwp-2.0.0b1.dist-info/METADATA +50 -0
- vortex_nwp-2.0.0b1.dist-info/RECORD +146 -0
- vortex_nwp-2.0.0b1.dist-info/WHEEL +5 -0
- vortex_nwp-2.0.0b1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Compute simple domain partitionings.
|
|
3
|
+
|
|
4
|
+
The partitioning classes can be used on their own. Alternatively, the
|
|
5
|
+
:meth:`setup_partitioning_in_namelist` method can be used to interact with
|
|
6
|
+
namelist's Content objects.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import functools
|
|
10
|
+
import math
|
|
11
|
+
import re
|
|
12
|
+
|
|
13
|
+
from bronx.syntax import iterators as b_iter
|
|
14
|
+
from bronx.fancies import loggers
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
logger = loggers.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
__all__ = ['PartitioningError',
|
|
20
|
+
'Rectangular2DPartitioner',
|
|
21
|
+
'setup_partitioning_in_namelist']
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class PartitioningError(ValueError):
|
|
25
|
+
"""Any error raised during domain partitionings."""
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class AbstratctPartitioner:
|
|
30
|
+
"""The base class for any concrete partitioning class.
|
|
31
|
+
|
|
32
|
+
Provides the mechanism to filter the partitioning methods and
|
|
33
|
+
cache the results.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
_REGISTERED_METHODS = ()
|
|
37
|
+
|
|
38
|
+
def __init__(self, p_method_specification):
|
|
39
|
+
"""
|
|
40
|
+
:param p_method_specification: The partitioning method definition
|
|
41
|
+
"""
|
|
42
|
+
# Process the partitioning method specification string
|
|
43
|
+
p_method_parts = p_method_specification.lower().split('_')
|
|
44
|
+
self.p_method_name = None
|
|
45
|
+
self.p_method_args = ()
|
|
46
|
+
for a_method, a_method_n_args in self._REGISTERED_METHODS:
|
|
47
|
+
if p_method_parts[0] == a_method:
|
|
48
|
+
self.p_method_name = p_method_parts[0]
|
|
49
|
+
if len(p_method_parts) - 1 != a_method_n_args:
|
|
50
|
+
raise ValueError('Erroneous number of interger args ' +
|
|
51
|
+
'for the {:s} p_method ({:d} required)'
|
|
52
|
+
.format(a_method, a_method_n_args))
|
|
53
|
+
self.p_method_args = tuple([int(s) for s in p_method_parts[1:]])
|
|
54
|
+
# Unknown method -> crash
|
|
55
|
+
if self.p_method_name is None:
|
|
56
|
+
raise PartitioningError("Unknown partitioning method ({:s})."
|
|
57
|
+
.format(p_method_specification))
|
|
58
|
+
# The actual class' method that will be used to compute a and b
|
|
59
|
+
self.p_method = functools.partial(getattr(self, '_' + self.p_method_name),
|
|
60
|
+
* self.p_method_args)
|
|
61
|
+
# Implement a caching mechanism
|
|
62
|
+
self._p_cache = dict()
|
|
63
|
+
|
|
64
|
+
def __call__(self, ntasks):
|
|
65
|
+
"""Return the appropriate partitioning given **ntasks**."""
|
|
66
|
+
if ntasks not in self._p_cache:
|
|
67
|
+
self._p_cache[ntasks] = self.p_method(ntasks)
|
|
68
|
+
return self._p_cache[ntasks]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class Rectangular2DPartitioner(AbstratctPartitioner):
|
|
72
|
+
"""Find an adequat 2D simple partitioning given the number of tasks.
|
|
73
|
+
|
|
74
|
+
Here is a description of the problem :
|
|
75
|
+
|
|
76
|
+
* Let ``D`` be a 2D array/field;
|
|
77
|
+
* Let ``N`` be the number of partitions you want to create;
|
|
78
|
+
* What are ``x`` and ``x`` that satisfies ``x * y = N`` (so that the
|
|
79
|
+
``D`` array can be partitionned in ``x`` (reps. ``y``) chunks
|
|
80
|
+
in its first (resp. second) dimension ?
|
|
81
|
+
|
|
82
|
+
For exemple, with N=16, an array can be split in 4 chunks in each
|
|
83
|
+
dimension. It can also be partitioned in 2 chunks in the first
|
|
84
|
+
dimension and 8 in the second... There is no unique solution.
|
|
85
|
+
Consequently, the user needs to provide a partitioning method.
|
|
86
|
+
|
|
87
|
+
Example::
|
|
88
|
+
|
|
89
|
+
# Look for a partitioning around a given fixed value
|
|
90
|
+
# e.g with xcloseto_16, the x value will be close to 16
|
|
91
|
+
>>> Rectangular2DPartitioner('xcloseto_16')(128)
|
|
92
|
+
(16, 8)
|
|
93
|
+
>>> Rectangular2DPartitioner('xcloseto_16')(990)
|
|
94
|
+
(15, 66)
|
|
95
|
+
>>> Rectangular2DPartitioner('xcloseto_16')(500)
|
|
96
|
+
(20, 25)
|
|
97
|
+
|
|
98
|
+
# e.g with ycloseto_16, the y value will be close to 16
|
|
99
|
+
>>> Rectangular2DPartitioner('ycloseto_16')(128)
|
|
100
|
+
(8, 16)
|
|
101
|
+
>>> Rectangular2DPartitioner('ycloseto_16')(990)
|
|
102
|
+
(66, 15)
|
|
103
|
+
>>> Rectangular2DPartitioner('ycloseto_16')(500)
|
|
104
|
+
(25, 20)
|
|
105
|
+
|
|
106
|
+
# Squar-est partition of the domain: y and y as close
|
|
107
|
+
# as possible
|
|
108
|
+
>>> Rectangular2DPartitioner('square')(16)
|
|
109
|
+
(4, 4)
|
|
110
|
+
>>> Rectangular2DPartitioner('square')(12)
|
|
111
|
+
(3, 4)
|
|
112
|
+
>>> Rectangular2DPartitioner('square')(7)
|
|
113
|
+
(1, 7)
|
|
114
|
+
|
|
115
|
+
# Try to find x and y so that a given aspect ratio is preserved
|
|
116
|
+
# e.g with aspect_16_9, x / y should roughly be equal to 16 / 9
|
|
117
|
+
>>> Rectangular2DPartitioner('aspect_2_1')(32)
|
|
118
|
+
(8, 4)
|
|
119
|
+
>>> Rectangular2DPartitioner('aspect_2_1')(27)
|
|
120
|
+
(9, 3)
|
|
121
|
+
>>> Rectangular2DPartitioner('aspect_16_9')(28) # roughly 16/9e like a TV...
|
|
122
|
+
(7, 4)
|
|
123
|
+
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
_REGISTERED_METHODS = (
|
|
127
|
+
('xcloseto', 1),
|
|
128
|
+
('ycloseto', 1),
|
|
129
|
+
('square', 0),
|
|
130
|
+
('aspect', 2)
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def _test_and_return(ntasks, guesses):
|
|
135
|
+
found = 1
|
|
136
|
+
for i_guess in guesses:
|
|
137
|
+
if ntasks % i_guess == 0:
|
|
138
|
+
found = i_guess
|
|
139
|
+
break
|
|
140
|
+
return found, ntasks // found
|
|
141
|
+
|
|
142
|
+
def _xcloseto(self, close_to_what, ntasks):
|
|
143
|
+
"""Find ``x`` as the closest possible value to **close_to_what**."""
|
|
144
|
+
guesses = b_iter.interleave(
|
|
145
|
+
range(close_to_what, 0, -1),
|
|
146
|
+
range(close_to_what + 1, min(close_to_what * 2,
|
|
147
|
+
ntasks))
|
|
148
|
+
)
|
|
149
|
+
return self._test_and_return(ntasks, guesses)
|
|
150
|
+
|
|
151
|
+
def _ycloseto(self, close_to_what, ntasks):
|
|
152
|
+
"""Find ``y`` as the closest possible value to **close_to_what**."""
|
|
153
|
+
y_value, x_value = self._xcloseto(close_to_what, ntasks)
|
|
154
|
+
return x_value, y_value
|
|
155
|
+
|
|
156
|
+
def _square(self, ntasks):
|
|
157
|
+
"""Find ``x`` and ``y`` so that they are close to the square root of ``N``.
|
|
158
|
+
|
|
159
|
+
With this method, ``x`` is always the smalest value.
|
|
160
|
+
"""
|
|
161
|
+
guesses = range(int(math.sqrt(ntasks)), 0, -1)
|
|
162
|
+
return self._test_and_return(ntasks, guesses)
|
|
163
|
+
|
|
164
|
+
def _aspect(self, x_spec, y_spec, ntasks):
|
|
165
|
+
"""Find ``x`` and ``y`` so that ``x / y =~ x_spec / y_spec``."""
|
|
166
|
+
aspect_ratio = x_spec / y_spec
|
|
167
|
+
return self._xcloseto(int(math.sqrt(ntasks * aspect_ratio)),
|
|
168
|
+
ntasks)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
_PARTITIONERS_CACHE = dict()
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def setup_partitioning_in_namelist(namcontents,
|
|
175
|
+
effective_tasks,
|
|
176
|
+
effective_threads,
|
|
177
|
+
namlocal=None):
|
|
178
|
+
"""Look in a namelist Content object and replace the macros related to partitioning.
|
|
179
|
+
|
|
180
|
+
:param nwp.data.namelists.NamelistContent namcontents: The namelist's Content
|
|
181
|
+
object to work with
|
|
182
|
+
:param int effective_tasks: The number of tasks that will be used when computing
|
|
183
|
+
the partitioning
|
|
184
|
+
:param int effective_threads: The number of threads that will be used when computing
|
|
185
|
+
the partitioning
|
|
186
|
+
:param str namlocal: The namelist's file name
|
|
187
|
+
:return: ``True`` if the namelist's Contents object has been modified
|
|
188
|
+
:rtype: bool
|
|
189
|
+
|
|
190
|
+
This function will detect namelist macros like ``PART_TASKS2D_X_SQUARE`` where:
|
|
191
|
+
|
|
192
|
+
* ``TASKS`` tells that **effective_tasks** will be used to compute the
|
|
193
|
+
decomposition (alternatively, ``THREADS`` can be used.
|
|
194
|
+
* ``2D`` tells that the :class:`Rectangular2DPartitioner` class will be used
|
|
195
|
+
to compute the partitioning. For now, ``2D`` is the only available option.
|
|
196
|
+
* ``X`` tells that the user wants to get the X value of the computed partioning.
|
|
197
|
+
Alternatively, ``Y`` can be used.
|
|
198
|
+
* ``SQUARE`` refers to the partitioning method that will be used by the
|
|
199
|
+
partitioning class. Any value that is accepted by the partitioning class is
|
|
200
|
+
fine.
|
|
201
|
+
"""
|
|
202
|
+
macrovalid = re.compile('PART_' +
|
|
203
|
+
'(?P<what>TASKS|THREADS)(?P<cls>2D)_' +
|
|
204
|
+
'(?P<dim>[XY])_(?P<def>.*)$')
|
|
205
|
+
partitioning_classes = {'2D': Rectangular2DPartitioner}
|
|
206
|
+
namw = False
|
|
207
|
+
# Find the list of existing macros
|
|
208
|
+
all_macros = set()
|
|
209
|
+
for nam in namcontents.values():
|
|
210
|
+
all_macros.update(nam.macros())
|
|
211
|
+
# Consider only relevant macros
|
|
212
|
+
for macroname in all_macros:
|
|
213
|
+
macroname_re = macrovalid.match(macroname)
|
|
214
|
+
if macroname_re:
|
|
215
|
+
cache_key = (macroname_re.group('cls'), macroname_re.group('def'))
|
|
216
|
+
if cache_key not in _PARTITIONERS_CACHE:
|
|
217
|
+
partitioning_class = partitioning_classes[macroname_re.group('cls')]
|
|
218
|
+
_PARTITIONERS_CACHE[cache_key] = partitioning_class(macroname_re.group('def'))
|
|
219
|
+
effective_n = dict(TASKS=effective_tasks,
|
|
220
|
+
THREADS=effective_threads)[macroname_re.group('what')]
|
|
221
|
+
part_x, part_y = _PARTITIONERS_CACHE[cache_key](effective_n)
|
|
222
|
+
final_result = part_x if macroname_re.group('dim') == 'X' else part_y
|
|
223
|
+
if namlocal:
|
|
224
|
+
logger.info('Setup macro %s=%s in %s', macroname, final_result, namlocal)
|
|
225
|
+
else:
|
|
226
|
+
logger.info('Setup macro %s=%s', macroname, final_result)
|
|
227
|
+
namcontents.setmacro(macroname, final_result)
|
|
228
|
+
namw = True
|
|
229
|
+
return namw
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
if __name__ == '__main__':
|
|
233
|
+
import doctest
|
|
234
|
+
doctest.testmod()
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Common interest classes to help setup the RTTOV/IFS environment.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from bronx.fancies import loggers
|
|
8
|
+
|
|
9
|
+
from vortex.algo.components import AlgoComponentDecoMixin, AlgoComponentError
|
|
10
|
+
from vortex.algo.components import algo_component_deco_mixin_autodoc
|
|
11
|
+
|
|
12
|
+
#: No automatic export
|
|
13
|
+
__all__ = []
|
|
14
|
+
|
|
15
|
+
logger = loggers.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@algo_component_deco_mixin_autodoc
|
|
19
|
+
class SatRadDecoMixin(AlgoComponentDecoMixin):
|
|
20
|
+
"""RTTOV settings + Satellites related stuffs.
|
|
21
|
+
|
|
22
|
+
This mixin class is intended to be used with AlgoComponent classes. It will
|
|
23
|
+
automatically set up the path to RTTOV coefficient files
|
|
24
|
+
(:meth:`_satrad_coeffdir_setup`).
|
|
25
|
+
|
|
26
|
+
In addition it provides the :meth:`setchannels` utility method (that have to
|
|
27
|
+
be called manually if needed).
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def _satrad_coeffdir_setup(self, rh, opts): # @UnusedVariable
|
|
31
|
+
"""Look for RTTOV coefficient files and act on it."""
|
|
32
|
+
rtcoefs = self.context.sequence.effective_inputs(role='RtCoef', kind='rtcoef')
|
|
33
|
+
if rtcoefs:
|
|
34
|
+
sh = self.system
|
|
35
|
+
rtpaths = {sh.path.dirname(sh.path.realpath(rtcoef.rh.container.localpath()))
|
|
36
|
+
for rtcoef in rtcoefs}
|
|
37
|
+
if len(rtpaths) != 1:
|
|
38
|
+
raise AlgoComponentError('The Radiative Transfer Coefficients are scattered in' +
|
|
39
|
+
'several directories: {!s}'.format(rtpaths))
|
|
40
|
+
rtpath = rtpaths.pop()
|
|
41
|
+
logger.info('Setting %s = %s', 'RTTOV_COEFDIR', rtpath)
|
|
42
|
+
self.env['RTTOV_COEFDIR'] = rtpath
|
|
43
|
+
|
|
44
|
+
_MIXIN_PREPARE_HOOKS = (_satrad_coeffdir_setup, )
|
|
45
|
+
|
|
46
|
+
def setchannels(self):
|
|
47
|
+
"""Look up for channels namelists in effective inputs."""
|
|
48
|
+
namchan = [
|
|
49
|
+
x.rh for x in self.context.sequence.effective_inputs(kind='namelist')
|
|
50
|
+
if 'channel' in x.rh.options
|
|
51
|
+
]
|
|
52
|
+
for thisnam in namchan:
|
|
53
|
+
thisloc = re.sub(r'\d+$', '', thisnam.options['channel']) + 'channels'
|
|
54
|
+
if thisloc != thisnam.container.localpath():
|
|
55
|
+
logger.info('Linking < %s > to < %s >', thisnam.container.localpath(), thisloc)
|
|
56
|
+
self.system.softlink(thisnam.container.localpath(), thisloc)
|
vortex/nwp/util/async.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Callback functions for Jeeves.
|
|
3
|
+
If needed, VORTEX must be loaded via a VortexWorker in this context.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from vortex.tools import compression, systems
|
|
7
|
+
from vortex.util.worker import VortexWorker
|
|
8
|
+
|
|
9
|
+
#: No automatic export
|
|
10
|
+
__all__ = []
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _double_ssh(sh, loginnode, transfernode):
|
|
14
|
+
"""Applies a double ssh to retrieve the effective name of a machine.
|
|
15
|
+
|
|
16
|
+
This trick enables the load balancing and node crash recovery
|
|
17
|
+
capabilities handled by the network teams through DNS names.
|
|
18
|
+
|
|
19
|
+
May return None when network problems occur.
|
|
20
|
+
"""
|
|
21
|
+
cmd = ['ssh', '-x', loginnode, 'ssh', '-x', transfernode, 'hostname', '-s']
|
|
22
|
+
rc = sh.spawn(cmd, shell=False, output=True, fatal=False)
|
|
23
|
+
if not rc:
|
|
24
|
+
return None
|
|
25
|
+
return rc[0]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def system_ftput(pnum, ask, config, logger, **opts):
|
|
29
|
+
"""Ftp transfer to some archive host.
|
|
30
|
+
|
|
31
|
+
Removes the source on success.
|
|
32
|
+
In phase mode, raw ftp is not allowed, and the hostname is dynamically
|
|
33
|
+
obtained by a double ssh.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
logger.info('System', todo=ask.todo, pnum=pnum, opts=opts)
|
|
37
|
+
value = dict(rpool='retry')
|
|
38
|
+
|
|
39
|
+
phasemode = opts.get('phasemode', False)
|
|
40
|
+
nbtries = opts.get('attempts', 1)
|
|
41
|
+
if phasemode:
|
|
42
|
+
rawftput = False
|
|
43
|
+
else:
|
|
44
|
+
rawftput = opts.get('rawftput', False)
|
|
45
|
+
trynum = 0
|
|
46
|
+
|
|
47
|
+
profile = config['driver'].get('profile', None)
|
|
48
|
+
with VortexWorker(logger=logger, profile=profile) as vwork:
|
|
49
|
+
sh = vwork.session.sh
|
|
50
|
+
sh.trace = 'log'
|
|
51
|
+
sh.ftpflavour = systems.FTP_FLAVOUR.STD # Because errors are handled directly by jeeves
|
|
52
|
+
|
|
53
|
+
data = vwork.get_dataset(ask)
|
|
54
|
+
logger.info('ftput', source=data.source, destination=data.destination)
|
|
55
|
+
if not sh.path.exists(data.source):
|
|
56
|
+
logger.error('The source file is missing - sorry')
|
|
57
|
+
return pnum, False, dict(rpool='error')
|
|
58
|
+
|
|
59
|
+
if phasemode:
|
|
60
|
+
data.hostname = _double_ssh(sh, data.phase_loginnode, data.phase_transfernode)
|
|
61
|
+
if data.hostname is None:
|
|
62
|
+
return pnum, False, dict(rpool='retry')
|
|
63
|
+
|
|
64
|
+
cpipeline = (None if not hasattr(data, 'cpipeline') or not data.cpipeline
|
|
65
|
+
else compression.CompressionPipeline(sh, data.cpipeline))
|
|
66
|
+
|
|
67
|
+
logger.info('FTPut host', hostname=data.hostname, logname=data.logname)
|
|
68
|
+
logger.info('FTPut data', source=data.source, destination=data.destination)
|
|
69
|
+
while trynum < nbtries:
|
|
70
|
+
trynum += 1
|
|
71
|
+
if nbtries > 1:
|
|
72
|
+
logger.info('FTPut loop', attempt=trynum)
|
|
73
|
+
try:
|
|
74
|
+
if rawftput:
|
|
75
|
+
putrc = sh.rawftput(data.source, data.destination, hostname=data.hostname,
|
|
76
|
+
logname=data.logname, cpipeline=cpipeline,
|
|
77
|
+
fmt=data.fmt)
|
|
78
|
+
else:
|
|
79
|
+
putrc = sh.ftput(data.source, data.destination, hostname=data.hostname,
|
|
80
|
+
logname=data.logname, cpipeline=cpipeline,
|
|
81
|
+
fmt=data.fmt)
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.warning('FTPut failed', attempt=trynum, error=e)
|
|
84
|
+
putrc = False
|
|
85
|
+
if putrc:
|
|
86
|
+
value = dict(clear=sh.rm(data.source, fmt=data.fmt))
|
|
87
|
+
break
|
|
88
|
+
|
|
89
|
+
return pnum, putrc and vwork.rc, value
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def system_cp(pnum, ask, config, logger, **opts):
|
|
93
|
+
"""Local transfers (between filesystems) on a given host."""
|
|
94
|
+
|
|
95
|
+
logger.info('System', todo=ask.todo, pnum=pnum, opts=opts)
|
|
96
|
+
value = dict(rpool='retry')
|
|
97
|
+
|
|
98
|
+
profile = config['driver'].get('profile', None)
|
|
99
|
+
with VortexWorker(logger=logger, profile=profile) as vwork:
|
|
100
|
+
sh = vwork.session.sh
|
|
101
|
+
sh.trace = 'log'
|
|
102
|
+
data = vwork.get_dataset(ask)
|
|
103
|
+
logger.info('cp', source=data.source, destination=data.destination)
|
|
104
|
+
if not sh.path.exists(data.source):
|
|
105
|
+
logger.error('The source file is missing - sorry')
|
|
106
|
+
return pnum, False, dict(rpool='error')
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
rc = sh.cp(data.source, data.destination, fmt=data.fmt)
|
|
110
|
+
except Exception as e:
|
|
111
|
+
logger.warning('cp failed', error=e)
|
|
112
|
+
rc = False
|
|
113
|
+
if rc:
|
|
114
|
+
value = dict(clear=sh.rm(data.source, fmt=data.fmt))
|
|
115
|
+
|
|
116
|
+
return pnum, rc and vwork.rc, value
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def system_scp(pnum, ask, config, logger, **opts):
|
|
120
|
+
"""Scp transfer to some archive host.
|
|
121
|
+
|
|
122
|
+
Removes the source on success.
|
|
123
|
+
In phase mode, raw ftp is not allowed, and the hostname is dynamically
|
|
124
|
+
obtained by a double ssh.
|
|
125
|
+
"""
|
|
126
|
+
logger.info('System', todo=ask.todo, pnum=pnum, opts=opts)
|
|
127
|
+
value = dict(rpool='retry')
|
|
128
|
+
|
|
129
|
+
phasemode = opts.get('phasemode', False)
|
|
130
|
+
|
|
131
|
+
profile = config['driver'].get('profile', None)
|
|
132
|
+
with VortexWorker(logger=logger, profile=profile) as vwork:
|
|
133
|
+
sh = vwork.session.sh
|
|
134
|
+
sh.trace = 'log'
|
|
135
|
+
|
|
136
|
+
data = vwork.get_dataset(ask)
|
|
137
|
+
logger.info('scp', source=data.source, destination=data.destination)
|
|
138
|
+
if not sh.path.exists(data.source):
|
|
139
|
+
logger.error('The source file is missing - sorry')
|
|
140
|
+
return pnum, False, dict(rpool='error')
|
|
141
|
+
|
|
142
|
+
if phasemode:
|
|
143
|
+
data.hostname = _double_ssh(sh, data.phase_loginnode, data.phase_transfernode)
|
|
144
|
+
if data.hostname is None:
|
|
145
|
+
return pnum, False, value
|
|
146
|
+
logger.info('scp host', hostname=data.hostname, logname=data.logname)
|
|
147
|
+
logger.info('scp data', source=data.source, destination=data.destination)
|
|
148
|
+
try:
|
|
149
|
+
putrc = sh.scpput(data.source, data.destination, hostname=data.hostname,
|
|
150
|
+
logname=data.logname, fmt=data.fmt)
|
|
151
|
+
except Exception as e:
|
|
152
|
+
logger.warning('scp failed', error=e)
|
|
153
|
+
putrc = False
|
|
154
|
+
if putrc:
|
|
155
|
+
value = dict(clear=sh.rm(data.source, fmt=data.fmt))
|
|
156
|
+
|
|
157
|
+
return pnum, putrc and vwork.rc, value
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def system_noop(pnum, ask, config, logger, **opts):
|
|
161
|
+
"""A callback able to do nothing, but cleanly.
|
|
162
|
+
|
|
163
|
+
Used to desactivate jeeves when mirroring the operational suite.
|
|
164
|
+
"""
|
|
165
|
+
logger.info('Noop', todo=ask.todo, pnum=pnum, opts=opts)
|
|
166
|
+
|
|
167
|
+
profile = config['driver'].get('profile', None)
|
|
168
|
+
with VortexWorker(logger=logger, profile=profile) as vwork:
|
|
169
|
+
sh = vwork.session.sh
|
|
170
|
+
sh.trace = 'log'
|
|
171
|
+
data = vwork.get_dataset(ask)
|
|
172
|
+
value = dict(clear=sh.rm(data.source, fmt=data.fmt))
|
|
173
|
+
|
|
174
|
+
return pnum, vwork.rc, value
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
if __name__ == '__main__':
|
|
178
|
+
import vortex
|
|
179
|
+
|
|
180
|
+
t = vortex.ticket()
|
|
181
|
+
main_sh = t.sh
|
|
182
|
+
main_sh.trace = True
|
|
183
|
+
main_sh.verbose = True
|
|
184
|
+
print(_double_ssh(main_sh, 'beaufixoper', 'beaufixtransfert-agt'))
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Functions to create and write a few information in a file using Vortex
|
|
3
|
+
(FunctionStore).
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import io
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
#: No automatic export
|
|
10
|
+
__all__ = []
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def beaconfunction(options):
|
|
14
|
+
"""Function to create a file and write information in it.
|
|
15
|
+
|
|
16
|
+
- model
|
|
17
|
+
- date
|
|
18
|
+
- cutoff
|
|
19
|
+
- vapp
|
|
20
|
+
- vconf
|
|
21
|
+
- member (optional)
|
|
22
|
+
"""
|
|
23
|
+
rst = dict()
|
|
24
|
+
|
|
25
|
+
# Find out if a resource handler is present and load the elements to be written
|
|
26
|
+
rhdict = options.get('rhandler', None)
|
|
27
|
+
if rhdict:
|
|
28
|
+
rst['vapp'] = rhdict.get('provider', {}).get('vapp', '')
|
|
29
|
+
rst['vconf'] = rhdict.get('provider', {}).get('vconf', '')
|
|
30
|
+
rst['model'] = rhdict.get('resource', {}).get('model', '')
|
|
31
|
+
rst['date'] = rhdict.get('resource', {}).get('date', '')
|
|
32
|
+
rst['cutoff'] = rhdict.get('resource', {}).get('cutoff', '')
|
|
33
|
+
member = rhdict.get('provider', {}).get('member', None)
|
|
34
|
+
if member is not None:
|
|
35
|
+
rst['member'] = member
|
|
36
|
+
else:
|
|
37
|
+
rst['error'] = 'No resource handler here'
|
|
38
|
+
outstr = json.dumps(rst)
|
|
39
|
+
# Return the string, which has to be converted to a file like object
|
|
40
|
+
return io.BytesIO(outstr.encode(encoding='utf_8'))
|