vortex-nwp 2.0.0b1__py3-none-any.whl → 2.0.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vortex/__init__.py +59 -45
- vortex/algo/__init__.py +3 -2
- vortex/algo/components.py +940 -614
- vortex/algo/mpitools.py +802 -497
- vortex/algo/serversynctools.py +34 -33
- vortex/config.py +19 -22
- vortex/data/__init__.py +9 -3
- vortex/data/abstractstores.py +593 -655
- vortex/data/containers.py +217 -162
- vortex/data/contents.py +65 -39
- vortex/data/executables.py +93 -102
- vortex/data/flow.py +40 -34
- vortex/data/geometries.py +228 -132
- vortex/data/handlers.py +428 -225
- vortex/data/outflow.py +15 -15
- vortex/data/providers.py +185 -163
- vortex/data/resources.py +48 -42
- vortex/data/stores.py +544 -413
- vortex/gloves.py +114 -87
- vortex/layout/__init__.py +1 -8
- vortex/layout/contexts.py +150 -84
- vortex/layout/dataflow.py +353 -202
- vortex/layout/monitor.py +264 -128
- vortex/nwp/__init__.py +5 -2
- vortex/nwp/algo/__init__.py +14 -5
- vortex/nwp/algo/assim.py +205 -151
- vortex/nwp/algo/clim.py +683 -517
- vortex/nwp/algo/coupling.py +447 -225
- vortex/nwp/algo/eda.py +437 -229
- vortex/nwp/algo/eps.py +403 -231
- vortex/nwp/algo/forecasts.py +420 -271
- vortex/nwp/algo/fpserver.py +683 -307
- vortex/nwp/algo/ifsnaming.py +205 -145
- vortex/nwp/algo/ifsroot.py +210 -122
- vortex/nwp/algo/monitoring.py +132 -76
- vortex/nwp/algo/mpitools.py +321 -191
- vortex/nwp/algo/odbtools.py +617 -353
- vortex/nwp/algo/oopsroot.py +449 -273
- vortex/nwp/algo/oopstests.py +90 -56
- vortex/nwp/algo/request.py +287 -206
- vortex/nwp/algo/stdpost.py +878 -522
- vortex/nwp/data/__init__.py +22 -4
- vortex/nwp/data/assim.py +125 -137
- vortex/nwp/data/boundaries.py +121 -68
- vortex/nwp/data/climfiles.py +193 -211
- vortex/nwp/data/configfiles.py +73 -69
- vortex/nwp/data/consts.py +426 -401
- vortex/nwp/data/ctpini.py +59 -43
- vortex/nwp/data/diagnostics.py +94 -66
- vortex/nwp/data/eda.py +50 -51
- vortex/nwp/data/eps.py +195 -146
- vortex/nwp/data/executables.py +440 -434
- vortex/nwp/data/fields.py +63 -48
- vortex/nwp/data/gridfiles.py +183 -111
- vortex/nwp/data/logs.py +250 -217
- vortex/nwp/data/modelstates.py +180 -151
- vortex/nwp/data/monitoring.py +72 -99
- vortex/nwp/data/namelists.py +254 -202
- vortex/nwp/data/obs.py +400 -308
- vortex/nwp/data/oopsexec.py +22 -20
- vortex/nwp/data/providers.py +90 -65
- vortex/nwp/data/query.py +71 -82
- vortex/nwp/data/stores.py +49 -36
- vortex/nwp/data/surfex.py +136 -137
- vortex/nwp/syntax/__init__.py +1 -1
- vortex/nwp/syntax/stdattrs.py +173 -111
- vortex/nwp/tools/__init__.py +2 -2
- vortex/nwp/tools/addons.py +22 -17
- vortex/nwp/tools/agt.py +24 -12
- vortex/nwp/tools/bdap.py +16 -5
- vortex/nwp/tools/bdcp.py +4 -1
- vortex/nwp/tools/bdm.py +3 -0
- vortex/nwp/tools/bdmp.py +14 -9
- vortex/nwp/tools/conftools.py +728 -378
- vortex/nwp/tools/drhook.py +12 -8
- vortex/nwp/tools/grib.py +65 -39
- vortex/nwp/tools/gribdiff.py +22 -17
- vortex/nwp/tools/ifstools.py +82 -42
- vortex/nwp/tools/igastuff.py +167 -143
- vortex/nwp/tools/mars.py +14 -2
- vortex/nwp/tools/odb.py +234 -125
- vortex/nwp/tools/partitioning.py +61 -37
- vortex/nwp/tools/satrad.py +27 -12
- vortex/nwp/util/async.py +83 -55
- vortex/nwp/util/beacon.py +10 -10
- vortex/nwp/util/diffpygram.py +174 -86
- vortex/nwp/util/ens.py +144 -63
- vortex/nwp/util/hooks.py +30 -19
- vortex/nwp/util/taskdeco.py +28 -24
- vortex/nwp/util/usepygram.py +278 -172
- vortex/nwp/util/usetnt.py +31 -17
- vortex/sessions.py +72 -39
- vortex/syntax/__init__.py +1 -1
- vortex/syntax/stdattrs.py +410 -171
- vortex/syntax/stddeco.py +31 -22
- vortex/toolbox.py +327 -192
- vortex/tools/__init__.py +11 -2
- vortex/tools/actions.py +125 -59
- vortex/tools/addons.py +111 -92
- vortex/tools/arm.py +42 -22
- vortex/tools/compression.py +72 -69
- vortex/tools/date.py +11 -4
- vortex/tools/delayedactions.py +242 -132
- vortex/tools/env.py +75 -47
- vortex/tools/folder.py +342 -171
- vortex/tools/grib.py +311 -149
- vortex/tools/lfi.py +423 -216
- vortex/tools/listings.py +109 -40
- vortex/tools/names.py +218 -156
- vortex/tools/net.py +632 -298
- vortex/tools/parallelism.py +93 -61
- vortex/tools/prestaging.py +55 -31
- vortex/tools/schedulers.py +172 -105
- vortex/tools/services.py +402 -333
- vortex/tools/storage.py +293 -358
- vortex/tools/surfex.py +24 -24
- vortex/tools/systems.py +1211 -631
- vortex/tools/targets.py +156 -100
- vortex/util/__init__.py +1 -1
- vortex/util/config.py +377 -327
- vortex/util/empty.py +2 -2
- vortex/util/helpers.py +56 -24
- vortex/util/introspection.py +18 -12
- vortex/util/iosponge.py +8 -4
- vortex/util/roles.py +4 -6
- vortex/util/storefunctions.py +39 -13
- vortex/util/structs.py +3 -3
- vortex/util/worker.py +29 -17
- vortex_nwp-2.0.0b2.dist-info/METADATA +66 -0
- vortex_nwp-2.0.0b2.dist-info/RECORD +142 -0
- {vortex_nwp-2.0.0b1.dist-info → vortex_nwp-2.0.0b2.dist-info}/WHEEL +1 -1
- vortex/layout/appconf.py +0 -109
- vortex/layout/jobs.py +0 -1276
- vortex/layout/nodes.py +0 -1424
- vortex/layout/subjobs.py +0 -464
- vortex_nwp-2.0.0b1.dist-info/METADATA +0 -50
- vortex_nwp-2.0.0b1.dist-info/RECORD +0 -146
- {vortex_nwp-2.0.0b1.dist-info → vortex_nwp-2.0.0b2.dist-info}/LICENSE +0 -0
- {vortex_nwp-2.0.0b1.dist-info → vortex_nwp-2.0.0b2.dist-info}/top_level.txt +0 -0
vortex/layout/jobs.py
DELETED
|
@@ -1,1276 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This modules defines helpers to build job's scripts.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import ast
|
|
6
|
-
import collections
|
|
7
|
-
import functools
|
|
8
|
-
import importlib
|
|
9
|
-
import re
|
|
10
|
-
import sys
|
|
11
|
-
import tempfile
|
|
12
|
-
import traceback
|
|
13
|
-
|
|
14
|
-
from bronx.fancies import loggers
|
|
15
|
-
from bronx.stdtypes import date
|
|
16
|
-
from bronx.syntax.decorators import nicedeco
|
|
17
|
-
import footprints
|
|
18
|
-
from footprints import proxy as fpx
|
|
19
|
-
from footprints.stdtypes import FPSet
|
|
20
|
-
|
|
21
|
-
import vortex
|
|
22
|
-
from vortex.layout import subjobs
|
|
23
|
-
from vortex.layout.appconf import ConfigSet
|
|
24
|
-
from vortex.tools.actions import actiond as ad
|
|
25
|
-
from vortex.tools.actions import FlowSchedulerGateway
|
|
26
|
-
from vortex.tools.systems import istruedef
|
|
27
|
-
from vortex.util.config import GenericConfigParser, ExtendedReadOnlyConfigParser, AppConfigStringDecoder
|
|
28
|
-
from vortex.util.config import load_template
|
|
29
|
-
|
|
30
|
-
#: Export nothing
|
|
31
|
-
__all__ = []
|
|
32
|
-
|
|
33
|
-
logger = loggers.getLogger(__name__)
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
_RE_VORTEXDATE = re.compile(r'_(?P<date>\d{8})T(?P<hh>\d{2})(?P<mm>\d{2})(?P<cutoff>[AP])',
|
|
37
|
-
re.IGNORECASE)
|
|
38
|
-
_RE_OPTIME = re.compile(r'_t?(?P<hh>\d{2})(?:[:h-]?(?P<mm>\d{2})?)', re.IGNORECASE)
|
|
39
|
-
_RE_MEMBER = re.compile(r'_mb(?P<member>\d+)', re.IGNORECASE)
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
_JobBasicConf = collections.namedtuple('_JobBasicConf', ['appbase', 'xpid', 'vapp', 'vconf'])
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def _guess_vapp_vconf_xpid(t, path=None):
|
|
46
|
-
"""
|
|
47
|
-
Extract from specified or current ``path`` what could be actual
|
|
48
|
-
``xpid``, ``vapp`` and ``vconf`` values.
|
|
49
|
-
"""
|
|
50
|
-
if path is None:
|
|
51
|
-
path = t.sh.pwd()
|
|
52
|
-
lpath = path.split('/')
|
|
53
|
-
if lpath[-1] in ('demo', 'gco', 'genv', 'jobs', 'logs', 'src', 'tasks', 'vortex'):
|
|
54
|
-
lpath.pop()
|
|
55
|
-
if re.match('jobs_[^' + t.sh.path.sep + ']+', lpath[-1]):
|
|
56
|
-
lpath.pop()
|
|
57
|
-
return _JobBasicConf('/'.join(lpath), *lpath[-3:])
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def _mkjob_opts_detect_1(t, ** opts):
|
|
61
|
-
"""Detect options that does not depend on the configuration file."""
|
|
62
|
-
tr_opts = dict()
|
|
63
|
-
auto_opts = dict()
|
|
64
|
-
|
|
65
|
-
# Things guessed from the directory name
|
|
66
|
-
opset = _guess_vapp_vconf_xpid(t)
|
|
67
|
-
appbase = opts.pop('appbase', opset.appbase)
|
|
68
|
-
target_appbase = opts.get('target_appbase', opset.appbase)
|
|
69
|
-
xpid = opts.get('xpid', opset.xpid)
|
|
70
|
-
vapp = opts.pop('vapp', opset.vapp)
|
|
71
|
-
vconf = opts.pop('vconf', opset.vconf)
|
|
72
|
-
|
|
73
|
-
taskconf = opts.pop('taskconf', None)
|
|
74
|
-
if taskconf:
|
|
75
|
-
jobconf = '{:s}/conf/{:s}_{:s}_{:s}.ini'.format(appbase, vapp, vconf, taskconf)
|
|
76
|
-
taskconf = '_' + taskconf
|
|
77
|
-
else:
|
|
78
|
-
jobconf = '{:s}/conf/{:s}_{:s}.ini'.format(appbase, vapp, vconf)
|
|
79
|
-
taskconf = ''
|
|
80
|
-
|
|
81
|
-
# Other pre-calculated stuff
|
|
82
|
-
tr_opts['appbase'] = appbase
|
|
83
|
-
tr_opts['target_appbase'] = target_appbase
|
|
84
|
-
tr_opts['xpid'] = xpid
|
|
85
|
-
tr_opts['vapp'] = vapp
|
|
86
|
-
tr_opts['vconf'] = vconf
|
|
87
|
-
tr_opts['jobconf'] = jobconf
|
|
88
|
-
tr_opts['taskconf'] = taskconf
|
|
89
|
-
|
|
90
|
-
return tr_opts, auto_opts, opts
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def _mkjob_opts_detect_2(t, tplconf, jobconf, jobconf_defaults, tr_opts, auto_opts, ** opts):
|
|
94
|
-
"""Detect options that depend on the configuration file."""
|
|
95
|
-
|
|
96
|
-
# Fix the task's name
|
|
97
|
-
name = re.sub(r'\.py$', '', opts.pop('name', 'autojob'))
|
|
98
|
-
|
|
99
|
-
# Try to find default rundate/runtime according to the jobname
|
|
100
|
-
runtime = opts.pop('runtime', None)
|
|
101
|
-
rundate = opts.pop('rundate', None)
|
|
102
|
-
cutoff = opts.pop('cutoff', None)
|
|
103
|
-
if runtime is None and rundate is None:
|
|
104
|
-
vtxdate = _RE_VORTEXDATE.search(name)
|
|
105
|
-
if vtxdate:
|
|
106
|
-
rundate = date.Date(vtxdate.group('date') +
|
|
107
|
-
vtxdate.group('hh') + vtxdate.group('mm'))
|
|
108
|
-
runtime = date.Time('{:s}:{:s}'.format(vtxdate.group('hh'),
|
|
109
|
-
vtxdate.group('mm')))
|
|
110
|
-
if cutoff is None:
|
|
111
|
-
cutoff = dict(A='assim', P='production').get(vtxdate.group('cutoff'))
|
|
112
|
-
name = _RE_VORTEXDATE.sub('', name)
|
|
113
|
-
else:
|
|
114
|
-
optime = _RE_OPTIME.search(name)
|
|
115
|
-
if optime:
|
|
116
|
-
runtime = date.Time('{:s}:{:s}'.format(optime.group('hh'), optime.group('mm')))
|
|
117
|
-
name = _RE_OPTIME.sub('', name)
|
|
118
|
-
|
|
119
|
-
# Try to find default member number according to the jobname
|
|
120
|
-
member = opts.pop('member', None)
|
|
121
|
-
if member is None:
|
|
122
|
-
mblookup = _RE_MEMBER.search(name)
|
|
123
|
-
if mblookup:
|
|
124
|
-
member = int(mblookup.group('member'))
|
|
125
|
-
name = _RE_MEMBER.sub('', name)
|
|
126
|
-
|
|
127
|
-
# Get the job's configuration
|
|
128
|
-
p_jobconf = jobconf.get(name, None)
|
|
129
|
-
if p_jobconf is None:
|
|
130
|
-
logger.warning('No job configuration for job name=%s', name)
|
|
131
|
-
logger.info('The job configuration build from the [DEFAULT] section... This may be a bad idea !')
|
|
132
|
-
p_jobconf = jobconf_defaults
|
|
133
|
-
|
|
134
|
-
# The mkjob profile and associated conf
|
|
135
|
-
profile = opts.pop('profile',
|
|
136
|
-
p_jobconf.get('profile_mkjob', 'test'))
|
|
137
|
-
|
|
138
|
-
# Find the appropriate config given the template
|
|
139
|
-
p_tplconf = tplconf.get(profile, None)
|
|
140
|
-
if p_tplconf is None:
|
|
141
|
-
emsg = "Job's profile << {:s} >> not found.".format(profile)
|
|
142
|
-
logger.critical(emsg)
|
|
143
|
-
raise ValueError(emsg)
|
|
144
|
-
|
|
145
|
-
def opts_plus_job(what, default):
|
|
146
|
-
"""Function that look up in command line options, then in job's conf."""
|
|
147
|
-
return opts.pop(what, p_jobconf.get(what, default))
|
|
148
|
-
|
|
149
|
-
def opts_plus_job_plus_tpl(what, default):
|
|
150
|
-
"""
|
|
151
|
-
Function that look up in command line options, then in job's conf,
|
|
152
|
-
then in template's conf.
|
|
153
|
-
"""
|
|
154
|
-
return opts.pop(what, p_jobconf.get(what, p_tplconf.get(what, default)))
|
|
155
|
-
|
|
156
|
-
# A last chance for these super-stars : they may be set in job's conf...
|
|
157
|
-
if rundate is None:
|
|
158
|
-
rundate = p_jobconf.get('rundate', None)
|
|
159
|
-
if runtime is None:
|
|
160
|
-
runtime = p_jobconf.get('runtime', None)
|
|
161
|
-
if cutoff is None:
|
|
162
|
-
cutoff = p_jobconf.get('cutoff', None)
|
|
163
|
-
if member is None:
|
|
164
|
-
member = p_jobconf.get('member', None)
|
|
165
|
-
|
|
166
|
-
if member is not None:
|
|
167
|
-
try:
|
|
168
|
-
member = int(member)
|
|
169
|
-
except ValueError:
|
|
170
|
-
pass
|
|
171
|
-
|
|
172
|
-
# Special treatment for xpid and target_appbase (they may be in jobconf but
|
|
173
|
-
# command line value remain the preferred value)
|
|
174
|
-
for stuff in ('xpid', 'target_appbase'):
|
|
175
|
-
if stuff not in opts:
|
|
176
|
-
if stuff in p_jobconf:
|
|
177
|
-
tr_opts[stuff] = p_jobconf[stuff]
|
|
178
|
-
else:
|
|
179
|
-
del opts[stuff]
|
|
180
|
-
|
|
181
|
-
# Switch verbosity from boolean to plain string
|
|
182
|
-
verb = opts_plus_job_plus_tpl('verbose', True)
|
|
183
|
-
if isinstance(verb, bool):
|
|
184
|
-
verb = 'verbose' if verb else 'noverbose'
|
|
185
|
-
|
|
186
|
-
# Adapt the partition name if refill is on
|
|
187
|
-
refill = opts_plus_job_plus_tpl('refill', False)
|
|
188
|
-
if not isinstance(refill, bool):
|
|
189
|
-
refill = bool(istruedef.match(refill))
|
|
190
|
-
warmstart = opts_plus_job_plus_tpl('warmstart', False)
|
|
191
|
-
if not isinstance(warmstart, bool):
|
|
192
|
-
warmstart = bool(istruedef.match(warmstart))
|
|
193
|
-
partition = opts_plus_job_plus_tpl('partition', None)
|
|
194
|
-
if refill or warmstart:
|
|
195
|
-
partition = opts_plus_job_plus_tpl('refill_partition', None)
|
|
196
|
-
|
|
197
|
-
# SuiteBg
|
|
198
|
-
suitebg = opts_plus_job_plus_tpl('suitebg', None)
|
|
199
|
-
|
|
200
|
-
# Rundates
|
|
201
|
-
rundates = opts_plus_job_plus_tpl('rundates', None)
|
|
202
|
-
|
|
203
|
-
# Lists...
|
|
204
|
-
for explist in ('loadedmods', 'loadedaddons', 'loadedjaplugins',
|
|
205
|
-
'ldlibs', 'extrapythonpath'):
|
|
206
|
-
val = opts_plus_job_plus_tpl(explist, None)
|
|
207
|
-
if val:
|
|
208
|
-
tr_opts[explist] = ','.join(["'{:s}'".format(x)
|
|
209
|
-
for x in re.split(r'\s*,\s*', val)
|
|
210
|
-
if len(x)])
|
|
211
|
-
if tr_opts[explist]:
|
|
212
|
-
tr_opts[explist] += ',' # Always ends with a ,
|
|
213
|
-
|
|
214
|
-
# A lot of basic stuffs...
|
|
215
|
-
tr_opts['create'] = opts.pop('create', date.at_second().iso8601())
|
|
216
|
-
tr_opts['mkuser'] = opts.pop('mkuser', t.glove.user)
|
|
217
|
-
tr_opts['mkhost'] = opts.pop('mkhost', t.sh.hostname)
|
|
218
|
-
tr_opts['mkopts'] = opts.pop('mkopts')
|
|
219
|
-
tr_opts['pwd'] = opts.pop('pwd', t.sh.getcwd())
|
|
220
|
-
tr_opts['home'] = opts_plus_job('home', t.env.HOME)
|
|
221
|
-
|
|
222
|
-
tr_opts['python_mkjob'] = t.sh.which('python')
|
|
223
|
-
tr_opts['python'] = opts_plus_job_plus_tpl('python', tr_opts['python_mkjob'])
|
|
224
|
-
tr_opts['pyopts'] = opts_plus_job_plus_tpl('pyopts', '-u')
|
|
225
|
-
|
|
226
|
-
tr_opts['task'] = opts_plus_job_plus_tpl('task', 'void')
|
|
227
|
-
|
|
228
|
-
# Other pre-calculated stuff
|
|
229
|
-
tr_opts['verbose'] = verb
|
|
230
|
-
tr_opts['name'] = name
|
|
231
|
-
tr_opts['file'] = opts.pop('file', name + '.py')
|
|
232
|
-
if rundate is None:
|
|
233
|
-
tr_opts['rundate'] = None
|
|
234
|
-
else:
|
|
235
|
-
try:
|
|
236
|
-
rundate = date.Date(rundate).ymdh
|
|
237
|
-
except (ValueError, TypeError):
|
|
238
|
-
pass
|
|
239
|
-
tr_opts['rundate'] = "'" + str(rundate) + "'" # Ugly, but that's history
|
|
240
|
-
if runtime is None:
|
|
241
|
-
tr_opts['runtime'] = None
|
|
242
|
-
else:
|
|
243
|
-
try:
|
|
244
|
-
runtime = date.Time(runtime)
|
|
245
|
-
except (ValueError, TypeError):
|
|
246
|
-
pass
|
|
247
|
-
tr_opts['runtime'] = "'" + str(runtime) + "'" # Ugly, but that's history
|
|
248
|
-
if cutoff is not None:
|
|
249
|
-
tr_opts['cutoff'] = cutoff
|
|
250
|
-
tr_opts['member'] = member
|
|
251
|
-
auto_opts['member'] = member
|
|
252
|
-
if suitebg is None:
|
|
253
|
-
tr_opts['suitebg'] = suitebg
|
|
254
|
-
else:
|
|
255
|
-
tr_opts['suitebg'] = "'" + suitebg + "'" # Ugly, but that's history
|
|
256
|
-
auto_opts['suitebg'] = suitebg
|
|
257
|
-
tr_opts['refill'] = refill
|
|
258
|
-
tr_opts['warmstart'] = warmstart
|
|
259
|
-
if partition is not None:
|
|
260
|
-
tr_opts['partition'] = partition
|
|
261
|
-
if rundates:
|
|
262
|
-
tr_opts['rundates'] = rundates
|
|
263
|
-
auto_opts['rundates'] = rundates
|
|
264
|
-
else:
|
|
265
|
-
tr_opts['rundates'] = ''
|
|
266
|
-
|
|
267
|
-
# The list of auto command-line options to ignore
|
|
268
|
-
auto_options_filter_opts = opts.pop('auto_options_filter', ())
|
|
269
|
-
auto_options_filter = (opts_plus_job_plus_tpl('auto_options_filter', '').split(',') +
|
|
270
|
-
list(auto_options_filter_opts))
|
|
271
|
-
# All the remaining stuff...
|
|
272
|
-
for k, v in opts.items():
|
|
273
|
-
tr_opts.setdefault(k, v)
|
|
274
|
-
if k not in auto_options_filter:
|
|
275
|
-
auto_opts.setdefault(k, v)
|
|
276
|
-
for k, v in p_jobconf.items():
|
|
277
|
-
tr_opts.setdefault(k, v)
|
|
278
|
-
for k, v in p_tplconf.items():
|
|
279
|
-
tr_opts.setdefault(k, v)
|
|
280
|
-
return tr_opts, auto_opts
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
def _mkjob_type_translate(k, v):
|
|
284
|
-
"""Dump values as strings for auto_options export..."""
|
|
285
|
-
if 'dates' in k:
|
|
286
|
-
return "bronx.stdtypes.date.daterangex('{:s}')".format(v)
|
|
287
|
-
elif 'date' in k:
|
|
288
|
-
return "bronx.stdtypes.date.Date('{:s}')".format(v)
|
|
289
|
-
else:
|
|
290
|
-
if isinstance(v, str):
|
|
291
|
-
return "'{:s}'".format(v)
|
|
292
|
-
else:
|
|
293
|
-
return str(v)
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
def _mkjob_opts_autoexport(auto_opts):
|
|
297
|
-
return ',\n'.join([' ' + k + '=' + _mkjob_type_translate(k, v)
|
|
298
|
-
for k, v in sorted(auto_opts.items())])
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
def mkjob(t, **kw):
|
|
302
|
-
"""Build a complete job file according to a template and some parameters."""
|
|
303
|
-
opts = dict(
|
|
304
|
-
inifile='@job-default.ini',
|
|
305
|
-
wrap=False,
|
|
306
|
-
)
|
|
307
|
-
opts.update(kw)
|
|
308
|
-
|
|
309
|
-
# Detect some basic options that do not depend on the configuration files
|
|
310
|
-
tr_opts, auto_opts, r_kw = _mkjob_opts_detect_1(t, mkopts=str(kw), **kw)
|
|
311
|
-
|
|
312
|
-
# Read the configuration files
|
|
313
|
-
try:
|
|
314
|
-
iniparser = ExtendedReadOnlyConfigParser(inifile=opts['inifile'])
|
|
315
|
-
tplconf = iniparser.as_dict()
|
|
316
|
-
except Exception as pb:
|
|
317
|
-
emsg = 'Could not read the << {:s} >> config file: {!s}'.format(opts['inifile'], pb)
|
|
318
|
-
logger.critical(emsg)
|
|
319
|
-
raise ValueError(emsg)
|
|
320
|
-
|
|
321
|
-
if t.sh.path.exists(tr_opts['jobconf']):
|
|
322
|
-
t.sh.header('Reading ' + tr_opts['jobconf'])
|
|
323
|
-
try:
|
|
324
|
-
jobparser = ExtendedReadOnlyConfigParser(inifile=tr_opts['jobconf'])
|
|
325
|
-
jobconf = jobparser.as_dict()
|
|
326
|
-
jobconf_default = jobparser.defaults()
|
|
327
|
-
except Exception as pb:
|
|
328
|
-
emsg = 'Could not read the << {:s} >> config file: {!s}'.format(tr_opts['jobconf'], pb)
|
|
329
|
-
logger.critical(emsg)
|
|
330
|
-
raise ValueError(emsg)
|
|
331
|
-
else:
|
|
332
|
-
emsg = 'Could not find the << {:s} >> config file.'.format(tr_opts['jobconf'])
|
|
333
|
-
logger.error(emsg)
|
|
334
|
-
raise ValueError(emsg)
|
|
335
|
-
|
|
336
|
-
# Detect most of the options that depend on the configuration file
|
|
337
|
-
tr_opts, auto_opts = _mkjob_opts_detect_2(t, tplconf, jobconf, jobconf_default,
|
|
338
|
-
tr_opts, auto_opts, ** r_kw)
|
|
339
|
-
|
|
340
|
-
# Dump auto_exported options
|
|
341
|
-
tr_opts['auto_options'] = _mkjob_opts_autoexport(auto_opts)
|
|
342
|
-
|
|
343
|
-
# Generate the job
|
|
344
|
-
corejob = load_template(t,
|
|
345
|
-
tr_opts['template'],
|
|
346
|
-
encoding="script",
|
|
347
|
-
default_templating='twopasslegacy')
|
|
348
|
-
tr_opts['tplfile'] = corejob.srcfile
|
|
349
|
-
|
|
350
|
-
# Variable starting with j2_ are dealt with using the AppConfigStringDecoder.
|
|
351
|
-
# It allows fancier things when jinja2 templates are used
|
|
352
|
-
j2_activated = corejob.KIND == 'jinja2'
|
|
353
|
-
if j2_activated:
|
|
354
|
-
csd = AppConfigStringDecoder(substitution_cb=lambda k: tr_opts[k])
|
|
355
|
-
for k in [k for k in tr_opts.keys() if k.startswith('j2_')]:
|
|
356
|
-
tr_opts[k] = csd(tr_opts[k])
|
|
357
|
-
|
|
358
|
-
pycode = corejob(** tr_opts)
|
|
359
|
-
|
|
360
|
-
if opts['wrap']:
|
|
361
|
-
def autojob():
|
|
362
|
-
eval(compile(pycode, 'compile.mkjob.log', 'exec'))
|
|
363
|
-
objcode = autojob
|
|
364
|
-
else:
|
|
365
|
-
# Using ast ensures that a valid python script was generated
|
|
366
|
-
try:
|
|
367
|
-
ast.parse(pycode, 'compile.mkjob.log', 'exec')
|
|
368
|
-
except SyntaxError as e:
|
|
369
|
-
logger.error("Error while attempting to parse the following script:\n%s",
|
|
370
|
-
pycode)
|
|
371
|
-
raise
|
|
372
|
-
objcode = pycode
|
|
373
|
-
|
|
374
|
-
return objcode, tr_opts
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
@nicedeco
|
|
378
|
-
def _extendable(func):
|
|
379
|
-
"""Decorator for some of the JobAssistant method
|
|
380
|
-
|
|
381
|
-
The added behaviour is to look into the plugins list and call appropriate
|
|
382
|
-
methods upon them.
|
|
383
|
-
"""
|
|
384
|
-
def new_me(self, *kargs, **kw):
|
|
385
|
-
# Call the original function, save the result
|
|
386
|
-
res = func(self, *kargs, **kw)
|
|
387
|
-
# Automatically add the session (if missing)
|
|
388
|
-
dargs = list(kargs)
|
|
389
|
-
if not (dargs and isinstance(dargs[0], vortex.sessions.Ticket)):
|
|
390
|
-
dargs.insert(0, vortex.sessions.current())
|
|
391
|
-
# The method we are looking for
|
|
392
|
-
plugable_n = 'plugable_' + func.__name__.lstrip('_')
|
|
393
|
-
# Go through the plugins and look for available methods
|
|
394
|
-
for p in [p for p in self.plugins if hasattr(p, plugable_n)]:
|
|
395
|
-
# If the previous result was a session, use it...
|
|
396
|
-
if isinstance(res, vortex.sessions.Ticket):
|
|
397
|
-
dargs[0] = res
|
|
398
|
-
res = getattr(p, plugable_n)(*dargs, **kw)
|
|
399
|
-
# Look into the session's default target
|
|
400
|
-
tg_callback = getattr(dargs[0].sh.default_target, plugable_n, None)
|
|
401
|
-
if tg_callback is not None:
|
|
402
|
-
# If the previous result was a session, use it...
|
|
403
|
-
if isinstance(res, vortex.sessions.Ticket):
|
|
404
|
-
dargs[0] = res
|
|
405
|
-
res = tg_callback(self, *dargs, **kw)
|
|
406
|
-
return res
|
|
407
|
-
return new_me
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
class JobAssistant(footprints.FootprintBase):
|
|
411
|
-
"""Class in charge of setting various session and environment settings for a Vortex job."""
|
|
412
|
-
|
|
413
|
-
_collector = ('jobassistant',)
|
|
414
|
-
_footprint = dict(
|
|
415
|
-
info = 'Abstract JobAssistant',
|
|
416
|
-
attr = dict(
|
|
417
|
-
kind = dict(
|
|
418
|
-
values = ['generic', 'minimal']
|
|
419
|
-
),
|
|
420
|
-
modules = dict(
|
|
421
|
-
info = 'A set of Python modules/packages to be imported.',
|
|
422
|
-
type = FPSet,
|
|
423
|
-
optional = True,
|
|
424
|
-
default = FPSet(()),
|
|
425
|
-
),
|
|
426
|
-
addons = dict(
|
|
427
|
-
info = 'A set of Vortex shell addons to load in the main System object',
|
|
428
|
-
type = FPSet,
|
|
429
|
-
optional = True,
|
|
430
|
-
default = FPSet(()),
|
|
431
|
-
),
|
|
432
|
-
ldlibs = dict(
|
|
433
|
-
info = 'A set of paths to prepend to the LD_LIBRARY_PATH variable.',
|
|
434
|
-
type = FPSet,
|
|
435
|
-
optional = True,
|
|
436
|
-
default = FPSet(()),
|
|
437
|
-
),
|
|
438
|
-
special_prefix = dict(
|
|
439
|
-
info = 'The prefix of environment variable with a special meaning.',
|
|
440
|
-
optional = True,
|
|
441
|
-
default = 'op_',
|
|
442
|
-
)
|
|
443
|
-
),
|
|
444
|
-
)
|
|
445
|
-
|
|
446
|
-
_P_SESSION_INFO_FMT = '+ {0:14s} = {1!s}'
|
|
447
|
-
_P_ENVVAR_FMT = '+ {0:s} = {1!s}'
|
|
448
|
-
_P_MODULES_FMT = '+ {0:s}'
|
|
449
|
-
_P_ADDON_FMT = '+ Add-on {0:10s} = {1!r}'
|
|
450
|
-
|
|
451
|
-
def __init__(self, *args, **kw):
|
|
452
|
-
super().__init__(*args, **kw)
|
|
453
|
-
self.subjob_allowed = True
|
|
454
|
-
self.subjob_tag = None
|
|
455
|
-
self.subjob_fsid = None
|
|
456
|
-
# By default, no error code is thrown away
|
|
457
|
-
self.unix_exit_code = 0
|
|
458
|
-
self._plugins = list()
|
|
459
|
-
self._conf = None
|
|
460
|
-
self._special_variables = None
|
|
461
|
-
|
|
462
|
-
@property
|
|
463
|
-
def plugins(self):
|
|
464
|
-
return self._plugins
|
|
465
|
-
|
|
466
|
-
def add_plugin(self, kind, **kwargs):
|
|
467
|
-
self._plugins.append(fpx.jobassistant_plugin(kind=kind, masterja=self,
|
|
468
|
-
**kwargs))
|
|
469
|
-
|
|
470
|
-
@property
|
|
471
|
-
def conf(self):
|
|
472
|
-
if self._conf is None:
|
|
473
|
-
raise RuntimeError('It is too soon to access the JobAssisant configuration')
|
|
474
|
-
return self._conf
|
|
475
|
-
|
|
476
|
-
@property
|
|
477
|
-
def special_variables(self):
|
|
478
|
-
if self._special_variables is None:
|
|
479
|
-
raise RuntimeError('It is too soon to access the JobAssisant special variables')
|
|
480
|
-
return self._special_variables
|
|
481
|
-
|
|
482
|
-
def __getattr__(self, name):
|
|
483
|
-
"""Search the plugins for unknown methods."""
|
|
484
|
-
if not (name.startswith('_') or name.startswith('plugable')):
|
|
485
|
-
for plugin in self.plugins:
|
|
486
|
-
if hasattr(plugin, name):
|
|
487
|
-
return getattr(plugin, name)
|
|
488
|
-
raise AttributeError('Attribute not found.')
|
|
489
|
-
|
|
490
|
-
@_extendable
|
|
491
|
-
def _init_special_variables(self, prefix=None, **kw):
|
|
492
|
-
"""Print some of the environment variables."""
|
|
493
|
-
prefix = prefix or self.special_prefix
|
|
494
|
-
# Suffixed variables
|
|
495
|
-
specials = kw.get('actual', dict())
|
|
496
|
-
self._special_variables = {k[len(prefix):].lower(): v
|
|
497
|
-
for k, v in specials.items() if k.startswith(prefix)}
|
|
498
|
-
# Auto variables
|
|
499
|
-
auto = kw.get('auto_options', dict())
|
|
500
|
-
for k, v in auto.items():
|
|
501
|
-
self._special_variables.setdefault(k.lower(), v)
|
|
502
|
-
|
|
503
|
-
def _kw_and_specials_get(self, what, default, **kw):
|
|
504
|
-
"""Look for name in **kw** and **self.special_variables**."""
|
|
505
|
-
return kw.get(what, self.special_variables.get(what, default))
|
|
506
|
-
|
|
507
|
-
def _init_conf(self, **kw):
|
|
508
|
-
"""Read the application's configuration file."""
|
|
509
|
-
jobname = self._kw_and_specials_get('jobname', None)
|
|
510
|
-
iniconf = self._kw_and_specials_get('iniconf', None)
|
|
511
|
-
iniencoding = self._kw_and_specials_get('inienconding', None)
|
|
512
|
-
self._conf = ConfigSet()
|
|
513
|
-
if iniconf:
|
|
514
|
-
try:
|
|
515
|
-
iniparser = GenericConfigParser(iniconf, encoding=iniencoding)
|
|
516
|
-
except Exception:
|
|
517
|
-
logger.critical('Could not read config %s', iniconf)
|
|
518
|
-
raise
|
|
519
|
-
thisconf = iniparser.as_dict(merged=False)
|
|
520
|
-
# Conf defaults
|
|
521
|
-
self._conf.update(thisconf.get('defaults', dict()))
|
|
522
|
-
if jobname is not None:
|
|
523
|
-
# Job specific conf
|
|
524
|
-
self._conf.update(thisconf.get(jobname, dict()))
|
|
525
|
-
# Stuff from the script and command-line
|
|
526
|
-
self._conf.update(self.special_variables)
|
|
527
|
-
|
|
528
|
-
@staticmethod
|
|
529
|
-
def _printfmt(fmt, *kargs, **kwargs):
|
|
530
|
-
print(fmt.format(*kargs, **kwargs))
|
|
531
|
-
|
|
532
|
-
@_extendable
|
|
533
|
-
def _print_session_info(self, t):
|
|
534
|
-
"""Display informations about the current session."""
|
|
535
|
-
|
|
536
|
-
locprint = functools.partial(self._printfmt, self._P_SESSION_INFO_FMT)
|
|
537
|
-
|
|
538
|
-
t.sh.header('Toolbox description')
|
|
539
|
-
|
|
540
|
-
locprint('Root directory', t.glove.siteroot)
|
|
541
|
-
locprint('Path directory', t.glove.sitesrc)
|
|
542
|
-
locprint('Conf directory', t.glove.siteconf)
|
|
543
|
-
|
|
544
|
-
t.sh.header('Session & Target description')
|
|
545
|
-
|
|
546
|
-
locprint('Session Ticket', t)
|
|
547
|
-
locprint('Session Glove', t.glove)
|
|
548
|
-
locprint('Session System', t.sh)
|
|
549
|
-
locprint('Session Env', t.env)
|
|
550
|
-
tg = t.sh.default_target
|
|
551
|
-
locprint('Target name', tg.hostname)
|
|
552
|
-
locprint('Target system', tg.sysname)
|
|
553
|
-
locprint('Target inifile', tg.inifile)
|
|
554
|
-
|
|
555
|
-
@_extendable
|
|
556
|
-
def _print_toolbox_settings(self, t):
|
|
557
|
-
"""Display the toolbox settings."""
|
|
558
|
-
vortex.toolbox.show_toolbox_settings()
|
|
559
|
-
|
|
560
|
-
@classmethod
|
|
561
|
-
def print_somevariables(cls, t, prefix=''):
|
|
562
|
-
"""Print some of the environment variables."""
|
|
563
|
-
prefix = prefix.upper()
|
|
564
|
-
filtered = sorted([x for x in t.env.keys() if x.startswith(prefix)])
|
|
565
|
-
if filtered:
|
|
566
|
-
t.sh.highlight('{:s} environment variables'.format(prefix if prefix else 'All'))
|
|
567
|
-
maxlen = max([len(x) for x in filtered])
|
|
568
|
-
for var_name in filtered:
|
|
569
|
-
cls._printfmt(cls._P_ENVVAR_FMT,
|
|
570
|
-
var_name.ljust(maxlen), t.env.native(var_name))
|
|
571
|
-
return len(filtered)
|
|
572
|
-
|
|
573
|
-
@_extendable
|
|
574
|
-
def _add_specials(self, t, prefix=None, **kw):
|
|
575
|
-
"""Print some of the environment variables."""
|
|
576
|
-
prefix = prefix or self.special_prefix
|
|
577
|
-
if self.special_variables:
|
|
578
|
-
filtered = {prefix + k: v for k, v in self.special_variables.items()}
|
|
579
|
-
self._printfmt('Copying actual {:s} variables to the environment', prefix)
|
|
580
|
-
t.env.update(filtered)
|
|
581
|
-
self.print_somevariables(t, prefix=prefix)
|
|
582
|
-
|
|
583
|
-
@_extendable
|
|
584
|
-
def _modules_preload(self, t):
|
|
585
|
-
"""Import all the modules listed in the footprint."""
|
|
586
|
-
t.sh.header('External imports')
|
|
587
|
-
for module in sorted(self.modules):
|
|
588
|
-
importlib.import_module(module)
|
|
589
|
-
self._printfmt(self._P_MODULES_FMT, module)
|
|
590
|
-
|
|
591
|
-
@_extendable
|
|
592
|
-
def _addons_preload(self, t):
|
|
593
|
-
"""Load shell addons."""
|
|
594
|
-
t.sh.header('Add-ons to the shell')
|
|
595
|
-
for addon in self.addons:
|
|
596
|
-
shadd = footprints.proxy.addon(kind=addon, shell=t.sh)
|
|
597
|
-
self._printfmt(self._P_ADDON_FMT, addon.upper(), shadd)
|
|
598
|
-
|
|
599
|
-
@_extendable
|
|
600
|
-
def _system_setup(self, t, **kw):
|
|
601
|
-
"""Set usual settings for the system shell."""
|
|
602
|
-
t.sh.header("Session and system basic setup")
|
|
603
|
-
self._printfmt('+ Setting "stack" and "memlock" limits to unlimited.')
|
|
604
|
-
t.sh.setulimit('stack')
|
|
605
|
-
t.sh.setulimit('memlock')
|
|
606
|
-
for ldlib in self.ldlibs:
|
|
607
|
-
self._printfmt('+ Prepending "{}" to the LD_LIBRARY_PATH.', ldlib)
|
|
608
|
-
t.env.setgenericpath('LD_LIBRARY_PATH', ldlib, pos=0)
|
|
609
|
-
|
|
610
|
-
@_extendable
|
|
611
|
-
def _early_session_setup(self, t, **kw):
|
|
612
|
-
"""Create a now session, set important things, ..."""
|
|
613
|
-
t.sh.header("Session's early setup")
|
|
614
|
-
t.glove.vapp = self._kw_and_specials_get('vapp', None)
|
|
615
|
-
t.glove.vconf = self._kw_and_specials_get('vconf', None)
|
|
616
|
-
# Ensure that the script's path is an absolute path
|
|
617
|
-
sys.argv[0] = t.sh.path.abspath(sys.argv[0])
|
|
618
|
-
return t
|
|
619
|
-
|
|
620
|
-
@_extendable
|
|
621
|
-
def _extra_session_setup(self, t, **kw):
|
|
622
|
-
"""Additional setup for the session."""
|
|
623
|
-
t.sh.header("Session's final setup")
|
|
624
|
-
# Handle session's datastore for subjobs
|
|
625
|
-
if self.subjob_tag is not None:
|
|
626
|
-
t.datastore.pickle_load(subjobs._DSTORE_IN.format(self.subjob_fsid))
|
|
627
|
-
self._printfmt('+ The datastore was read from disk: ' + subjobs._DSTORE_IN,
|
|
628
|
-
self.subjob_fsid)
|
|
629
|
-
# Possibly setup the default user names for file-transfers
|
|
630
|
-
ftuser = self.conf.get('ftuser', None)
|
|
631
|
-
if ftuser is not None:
|
|
632
|
-
if isinstance(ftuser, dict):
|
|
633
|
-
for dest, d_ftuser in ftuser.items():
|
|
634
|
-
if not (isinstance(dest, str) and isinstance(d_ftuser, str)):
|
|
635
|
-
logger.error('Improper ftuser configuration (Destination=%s, Logname=%s)',
|
|
636
|
-
dest, d_ftuser)
|
|
637
|
-
continue
|
|
638
|
-
if dest.lower() == 'default':
|
|
639
|
-
self._printfmt('+ Setting the default file-transfer user to: {:s}', d_ftuser)
|
|
640
|
-
t.glove.setftuser(d_ftuser)
|
|
641
|
-
else:
|
|
642
|
-
self._printfmt('+ Setting the {:s} file-transfer user to: {:s}', dest, d_ftuser)
|
|
643
|
-
t.glove.setftuser(d_ftuser, dest)
|
|
644
|
-
elif isinstance(ftuser, str):
|
|
645
|
-
self._printfmt('+ Setting the default file-transfer user to: {:s}', ftuser)
|
|
646
|
-
t.glove.setftuser(ftuser)
|
|
647
|
-
else:
|
|
648
|
-
logger.error('Improper ftuser value %s', ftuser)
|
|
649
|
-
# Possibly setup the default hostname for file-transfers
|
|
650
|
-
fthost = self.conf.get('fthost', None)
|
|
651
|
-
if fthost is not None:
|
|
652
|
-
t.glove.default_fthost = fthost
|
|
653
|
-
self._printfmt('+ Setting the default file-transfer hostname to: {:s}', fthost)
|
|
654
|
-
|
|
655
|
-
@_extendable
|
|
656
|
-
def _env_setup(self, t, **kw):
|
|
657
|
-
"""Session's environment setup."""
|
|
658
|
-
t.sh.header("Environment variables setup")
|
|
659
|
-
t.env.verbose(True, t.sh)
|
|
660
|
-
self._add_specials(t, **kw)
|
|
661
|
-
|
|
662
|
-
@_extendable
|
|
663
|
-
def _toolbox_setup(self, t, **kw):
|
|
664
|
-
"""Toolbox default setup."""
|
|
665
|
-
t.sh.header('Toolbox module settings')
|
|
666
|
-
vortex.toolbox.active_verbose = True
|
|
667
|
-
vortex.toolbox.active_now = True
|
|
668
|
-
vortex.toolbox.active_clear = True
|
|
669
|
-
|
|
670
|
-
@_extendable
|
|
671
|
-
def _actions_setup(self, t, **kw):
|
|
672
|
-
"""Setup the action dispatcher."""
|
|
673
|
-
t.sh.header('Actions setup')
|
|
674
|
-
|
|
675
|
-
@_extendable
|
|
676
|
-
def _job_final_init(self, t, **kw):
|
|
677
|
-
"""Final initialisations for a job."""
|
|
678
|
-
t.sh.header("Job's final init")
|
|
679
|
-
|
|
680
|
-
def _subjob_detect(self, t):
|
|
681
|
-
if 'VORTEX_SUBJOB_ACTIVATED' in t.env:
|
|
682
|
-
tag, fsid = t.env['VORTEX_SUBJOB_ACTIVATED'].split(':', 1)
|
|
683
|
-
self.subjob_tag = tag
|
|
684
|
-
self.subjob_fsid = fsid
|
|
685
|
-
|
|
686
|
-
def setup(self, **kw):
|
|
687
|
-
"""This is the main method. it setups everything in the session."""
|
|
688
|
-
# We need the root session
|
|
689
|
-
t = vortex.ticket()
|
|
690
|
-
t.system().prompt = t.prompt
|
|
691
|
-
t.sh.subtitle("Starting JobAssistant's setup")
|
|
692
|
-
# Am I a subjob ?
|
|
693
|
-
self._subjob_detect(t)
|
|
694
|
-
# JA object setup
|
|
695
|
-
self._init_special_variables(**kw)
|
|
696
|
-
self._init_conf(**kw)
|
|
697
|
-
# A new session can be created here
|
|
698
|
-
t = self._early_session_setup(t, **kw)
|
|
699
|
-
# Then, go on with initialisations...
|
|
700
|
-
self._system_setup(t) # Tweak the session's System object
|
|
701
|
-
self._print_session_info(t) # Print some info about the session
|
|
702
|
-
self._env_setup(t, **kw) # Setup the session's Environment object
|
|
703
|
-
self._modules_preload(t) # Load a few modules
|
|
704
|
-
self._addons_preload(t) # Active some shell addons
|
|
705
|
-
self._extra_session_setup(t, **kw) # Some extra configuration on the session
|
|
706
|
-
self._toolbox_setup(t, **kw) # Setup toolbox settings
|
|
707
|
-
self._print_toolbox_settings(t) # Print a summary of the toolbox settings
|
|
708
|
-
self._actions_setup(t, **kw) # Setup the actionDispatcher
|
|
709
|
-
# Begin signal handling
|
|
710
|
-
t.sh.signal_intercept_on()
|
|
711
|
-
# A last word ?
|
|
712
|
-
self._job_final_init(t, **kw)
|
|
713
|
-
self._printfmt('')
|
|
714
|
-
return t, t.env, t.sh
|
|
715
|
-
|
|
716
|
-
@_extendable
|
|
717
|
-
def add_extra_traces(self, t):
|
|
718
|
-
"""Switch the system shell to verbose mode."""
|
|
719
|
-
t.sh.trace = True
|
|
720
|
-
|
|
721
|
-
@_extendable
|
|
722
|
-
def register_cycle(self, cycle):
|
|
723
|
-
"""A callback to register GCO cycles."""
|
|
724
|
-
from vortex.nwp.syntax.stdattrs import GgetId
|
|
725
|
-
try:
|
|
726
|
-
cycle = GgetId(cycle)
|
|
727
|
-
except ValueError:
|
|
728
|
-
self._printfmt('** Cycle << {!s} >> will auto-register whenever necessary **', cycle)
|
|
729
|
-
return
|
|
730
|
-
from vortex_gco.tools import genv
|
|
731
|
-
if cycle in genv.cycles():
|
|
732
|
-
self._printfmt('** Cycle << {!s} >> already registered **', cycle)
|
|
733
|
-
else:
|
|
734
|
-
self._printfmt('\n** Cycle << {!s} >> is to be registered **', cycle)
|
|
735
|
-
genv.autofill(cycle)
|
|
736
|
-
print(genv.as_rawstr(cycle=cycle))
|
|
737
|
-
|
|
738
|
-
@_extendable
|
|
739
|
-
def complete(self):
|
|
740
|
-
"""Should be called when a job finishes successfully"""
|
|
741
|
-
t = vortex.ticket()
|
|
742
|
-
t.sh.subtitle("Executing JobAssistant's complete actions")
|
|
743
|
-
|
|
744
|
-
@_extendable
|
|
745
|
-
def fulltraceback(self, latest_error=None):
|
|
746
|
-
"""Produce some nice traceback at the point of failure.
|
|
747
|
-
|
|
748
|
-
:param Exception latest_error: The latest caught exception.
|
|
749
|
-
"""
|
|
750
|
-
t = vortex.ticket()
|
|
751
|
-
t.sh.subtitle('Handling exception')
|
|
752
|
-
(exc_type, exc_value, exc_traceback) = sys.exc_info() # @UnusedVariable
|
|
753
|
-
self._printfmt('Exception type: {!s}', exc_type)
|
|
754
|
-
self._printfmt('Exception info: {!s}', latest_error)
|
|
755
|
-
t.sh.header('Traceback Error / BEGIN')
|
|
756
|
-
print("\n".join(traceback.format_tb(exc_traceback)))
|
|
757
|
-
t.sh.header('Traceback Error / END')
|
|
758
|
-
|
|
759
|
-
@_extendable
|
|
760
|
-
def rescue(self):
|
|
761
|
-
"""Called at the end of a job when something went wrong."""
|
|
762
|
-
t = vortex.ticket()
|
|
763
|
-
t.sh.subtitle("Executing JobAssistant's rescue actions")
|
|
764
|
-
self.unix_exit_code = 1
|
|
765
|
-
|
|
766
|
-
@_extendable
|
|
767
|
-
def finalise(self):
|
|
768
|
-
"""Called whenever a job finishes (either successfully or badly)."""
|
|
769
|
-
t = vortex.ticket()
|
|
770
|
-
t.sh.subtitle("Executing JobAssistant's finalise actions")
|
|
771
|
-
if self.subjob_tag is not None:
|
|
772
|
-
t.datastore.pickle_dump(subjobs._DSTORE_OUT.format(self.subjob_fsid, self.subjob_tag))
|
|
773
|
-
self._printfmt('+ The datastore was written to disk: ' + subjobs._DSTORE_OUT,
|
|
774
|
-
self.subjob_fsid, self.subjob_tag)
|
|
775
|
-
|
|
776
|
-
def close(self):
|
|
777
|
-
"""This must be the last called method whenever a job finishes."""
|
|
778
|
-
t = vortex.ticket()
|
|
779
|
-
t.sh.subtitle("Executing JobAssistant's close")
|
|
780
|
-
t.sh.signal_intercept_off()
|
|
781
|
-
t.exit()
|
|
782
|
-
if self.unix_exit_code:
|
|
783
|
-
self._printfmt('Something went wrong :-(')
|
|
784
|
-
exit(self.unix_exit_code)
|
|
785
|
-
if self.subjob_tag:
|
|
786
|
-
self._printfmt('Subjob fast exit :-)')
|
|
787
|
-
exit(0)
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
class JobAssistantPlugin(footprints.FootprintBase):
|
|
791
|
-
|
|
792
|
-
_conflicts = []
|
|
793
|
-
_abstract = True
|
|
794
|
-
_collector = ('jobassistant_plugin',)
|
|
795
|
-
_footprint = dict(
|
|
796
|
-
info = 'Abstract JobAssistant Plugin',
|
|
797
|
-
attr = dict(
|
|
798
|
-
kind = dict(),
|
|
799
|
-
masterja = dict(
|
|
800
|
-
type=JobAssistant,
|
|
801
|
-
),
|
|
802
|
-
),
|
|
803
|
-
)
|
|
804
|
-
|
|
805
|
-
def __init__(self, *kargs, **kwargs):
|
|
806
|
-
super().__init__(*kargs, **kwargs)
|
|
807
|
-
# Check for potential conflicts
|
|
808
|
-
for conflicting in self._conflicts:
|
|
809
|
-
if conflicting in [p.kind for p in self.masterja.plugins]:
|
|
810
|
-
raise RuntimeError('"{:s}" conflicts with "{:s}"'.format(self.kind, conflicting))
|
|
811
|
-
|
|
812
|
-
@staticmethod
|
|
813
|
-
def _printfmt(fmt, *kargs, **kwargs):
|
|
814
|
-
JobAssistant._printfmt(fmt, *kargs, **kwargs)
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
class JobAssistantTmpdirPlugin(JobAssistantPlugin):
|
|
818
|
-
|
|
819
|
-
_conflicts = ['mtool', 'autodir']
|
|
820
|
-
_footprint = dict(
|
|
821
|
-
info = 'JobAssistant TMPDIR Plugin',
|
|
822
|
-
attr = dict(
|
|
823
|
-
kind = dict(
|
|
824
|
-
values = ['tmpdir', ]
|
|
825
|
-
),
|
|
826
|
-
),
|
|
827
|
-
)
|
|
828
|
-
|
|
829
|
-
def plugable_extra_session_setup(self, t, **kw):
|
|
830
|
-
"""Set the rundir according to the TMPDIR variable."""
|
|
831
|
-
myrundir = kw.get('rundir', None) or t.env.TMPDIR
|
|
832
|
-
if myrundir:
|
|
833
|
-
t.rundir = kw.get('rundir', myrundir)
|
|
834
|
-
self._printfmt('+ Current rundir < {:s} >', t.rundir)
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
class JobAssistantAutodirPlugin(JobAssistantPlugin):
|
|
838
|
-
|
|
839
|
-
_conflicts = ['mtool', 'tmpdir']
|
|
840
|
-
_footprint = dict(
|
|
841
|
-
info = 'JobAssistant Automatic Directory Plugin',
|
|
842
|
-
attr = dict(
|
|
843
|
-
kind = dict(
|
|
844
|
-
values = ['autodir', ]
|
|
845
|
-
),
|
|
846
|
-
appbase = dict(
|
|
847
|
-
info="The directory where the application lies.",
|
|
848
|
-
),
|
|
849
|
-
jobname = dict(
|
|
850
|
-
info="The current job name.",
|
|
851
|
-
),
|
|
852
|
-
cleanup = dict(
|
|
853
|
-
info = "Remove the workind directory when the job is done.",
|
|
854
|
-
type = bool,
|
|
855
|
-
optional = True,
|
|
856
|
-
default = True,
|
|
857
|
-
),
|
|
858
|
-
),
|
|
859
|
-
)
|
|
860
|
-
|
|
861
|
-
def __init__(self, *kargs, **kwargs):
|
|
862
|
-
super().__init__(*kargs, **kwargs)
|
|
863
|
-
self._joblabel = None
|
|
864
|
-
|
|
865
|
-
def _autodir_tmpdir(self, t):
|
|
866
|
-
tmpbase = t.sh.path.join(self.appbase, 'run', 'tmp')
|
|
867
|
-
if self._joblabel is None:
|
|
868
|
-
with t.sh.cdcontext(tmpbase, create=True):
|
|
869
|
-
self._joblabel = t.sh.path.basename(tempfile.mkdtemp(
|
|
870
|
-
prefix='{:s}_{:s}_'.format(self.jobname,
|
|
871
|
-
date.now().strftime('%Y%m%d_%H%M%S')),
|
|
872
|
-
dir='.'
|
|
873
|
-
))
|
|
874
|
-
return t.sh.path.join(tmpbase, self._joblabel)
|
|
875
|
-
|
|
876
|
-
def _autodir_abort(self, t):
|
|
877
|
-
abortbase = t.sh.path.join(self.appbase, 'run', 'abort')
|
|
878
|
-
if self._joblabel is None:
|
|
879
|
-
self._autodir_tmpdir(t)
|
|
880
|
-
abortdir = t.sh.path.join(abortbase, self._joblabel)
|
|
881
|
-
t.sh.mkdir(abortdir)
|
|
882
|
-
return abortdir
|
|
883
|
-
|
|
884
|
-
def plugable_extra_session_setup(self, t, **kw):
|
|
885
|
-
"""Set the rundir according to the TMPDIR variable."""
|
|
886
|
-
t.rundir = self._autodir_tmpdir(t)
|
|
887
|
-
self._printfmt('+ Current rundir < {:s} >', t.rundir)
|
|
888
|
-
|
|
889
|
-
def plugable_finalise(self, t):
|
|
890
|
-
"""Should be called when a job finishes successfully"""
|
|
891
|
-
if self.cleanup:
|
|
892
|
-
self._printfmt('+ Removing the rundir < {:s} >', t.rundir)
|
|
893
|
-
t.sh.cd(t.env.HOME)
|
|
894
|
-
t.sh.rm(self._autodir_tmpdir(t))
|
|
895
|
-
|
|
896
|
-
def plugable_rescue(self, t):
|
|
897
|
-
"""Called at the end of a job when something went wrong."""
|
|
898
|
-
t.sh.cd(self._autodir_tmpdir(t))
|
|
899
|
-
if self.masterja.subjob_tag is None:
|
|
900
|
-
vortex.toolbox.rescue(bkupdir=self._autodir_abort(t))
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
class JobAssistantMtoolPlugin(JobAssistantPlugin):
|
|
904
|
-
|
|
905
|
-
_conflicts = ['tmpdir', 'autodir']
|
|
906
|
-
|
|
907
|
-
_footprint = dict(
|
|
908
|
-
info = 'JobAssistant MTOOL Plugin',
|
|
909
|
-
attr = dict(
|
|
910
|
-
kind = dict(
|
|
911
|
-
values = ['mtool', ]
|
|
912
|
-
),
|
|
913
|
-
step = dict(
|
|
914
|
-
info="The number of the current MTOOL step.",
|
|
915
|
-
type=int,
|
|
916
|
-
),
|
|
917
|
-
stepid=dict(
|
|
918
|
-
info="The name (id) of the current MTOOL step.",
|
|
919
|
-
),
|
|
920
|
-
lastid = dict(
|
|
921
|
-
info="The name (id) of the last effective MTOOL step.",
|
|
922
|
-
optional=True,
|
|
923
|
-
),
|
|
924
|
-
mtoolid = dict(
|
|
925
|
-
info="The MTOOL's job number",
|
|
926
|
-
type=int,
|
|
927
|
-
optional=True,
|
|
928
|
-
)
|
|
929
|
-
),
|
|
930
|
-
)
|
|
931
|
-
|
|
932
|
-
@property
|
|
933
|
-
def mtool_steps(self):
|
|
934
|
-
"""The list of Task' steps asociated a given MTOOL step."""
|
|
935
|
-
steps_map = {'transfer': ('early-fetch', 'fetch', 'backup', 'late-backup'),
|
|
936
|
-
'fetch': ('early-fetch', ),
|
|
937
|
-
'compute': ('early-fetch', 'fetch', 'compute', 'backup'),
|
|
938
|
-
'backup': ('backup', 'late-backup'), }
|
|
939
|
-
try:
|
|
940
|
-
return steps_map[self.stepid]
|
|
941
|
-
except KeyError:
|
|
942
|
-
logger.error("Unknown MTOOL step: %s", self.stepid)
|
|
943
|
-
return ()
|
|
944
|
-
|
|
945
|
-
@property
|
|
946
|
-
def mstep_is_first(self):
|
|
947
|
-
"""Is it the first MTOOL step."""
|
|
948
|
-
return self.step == 1
|
|
949
|
-
|
|
950
|
-
@property
|
|
951
|
-
def mstep_is_last(self):
|
|
952
|
-
"""Is it the last MTOOL step (apart from the cleaning)."""
|
|
953
|
-
return self.stepid == self.lastid
|
|
954
|
-
|
|
955
|
-
def plugable_extra_session_setup(self, t, **kw):
|
|
956
|
-
"""Set the rundir according to MTTOL's spool."""
|
|
957
|
-
t.rundir = t.env.MTOOL_STEP_SPOOL
|
|
958
|
-
t.sh.cd(t.rundir)
|
|
959
|
-
self._printfmt('+ Current rundir < {:s} >', t.rundir)
|
|
960
|
-
# Load the session's data store
|
|
961
|
-
if self.step > 1 and self.masterja.subjob_tag is None:
|
|
962
|
-
t.datastore.pickle_load()
|
|
963
|
-
self._printfmt('+ The datastore was read from disk.')
|
|
964
|
-
# Check that the log directory exists
|
|
965
|
-
if "MTOOL_STEP_LOGFILE" in t.env:
|
|
966
|
-
logfile = t.sh.path.normpath(t.env.MTOOL_STEP_LOGFILE)
|
|
967
|
-
logdir = t.sh.path.dirname(logfile)
|
|
968
|
-
if not t.sh.path.isdir(logdir):
|
|
969
|
-
t.sh.mkdir(logdir)
|
|
970
|
-
self._printfmt('+ Current logfile < {:s} >', logfile)
|
|
971
|
-
# Only allow subjobs in compute steps
|
|
972
|
-
self.masterja.subjob_allowed = self.stepid == 'compute'
|
|
973
|
-
|
|
974
|
-
def plugable_toolbox_setup(self, t, **kw):
|
|
975
|
-
"""Toolbox MTOOL setup."""
|
|
976
|
-
if self.stepid == 'compute':
|
|
977
|
-
# No network activity during the compute step + promises already made
|
|
978
|
-
vortex.toolbox.active_promise = False
|
|
979
|
-
vortex.toolbox.active_insitu = True
|
|
980
|
-
vortex.toolbox.active_incache = True
|
|
981
|
-
|
|
982
|
-
def plugable_complete(self, t):
|
|
983
|
-
"""Should be called when a job finishes successfully"""
|
|
984
|
-
t.sh.cd(t.env.MTOOL_STEP_SPOOL)
|
|
985
|
-
# Dump the session datastore in the rundir
|
|
986
|
-
if self.masterja.subjob_tag is None:
|
|
987
|
-
t.datastore.pickle_dump()
|
|
988
|
-
self._printfmt('+ The datastore is dumped to disk')
|
|
989
|
-
|
|
990
|
-
def plugable_rescue(self, t):
|
|
991
|
-
"""Called at the end of a job when something went wrong.
|
|
992
|
-
|
|
993
|
-
It backups the session's rundir and clean promises.
|
|
994
|
-
"""
|
|
995
|
-
t.sh.cd(t.env.MTOOL_STEP_SPOOL)
|
|
996
|
-
if self.masterja.subjob_tag is None:
|
|
997
|
-
vortex.toolbox.rescue(bkupdir=t.env.MTOOL_STEP_ABORT)
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
class JobAssistantFlowSchedPlugin(JobAssistantPlugin):
|
|
1001
|
-
|
|
1002
|
-
_footprint = dict(
|
|
1003
|
-
info = 'JobAssistant Flow Scheduler Plugin',
|
|
1004
|
-
attr = dict(
|
|
1005
|
-
kind = dict(
|
|
1006
|
-
values = ['flow', ]
|
|
1007
|
-
),
|
|
1008
|
-
backend = dict(
|
|
1009
|
-
values = ['ecflow', 'sms']
|
|
1010
|
-
),
|
|
1011
|
-
jobidlabels = dict(
|
|
1012
|
-
info="Update the task's jobid label.",
|
|
1013
|
-
default=False,
|
|
1014
|
-
optional=True,
|
|
1015
|
-
type=bool,
|
|
1016
|
-
),
|
|
1017
|
-
mtoolmeters = dict(
|
|
1018
|
-
info="Update the MTOOL's work meter.",
|
|
1019
|
-
default=False,
|
|
1020
|
-
optional=True,
|
|
1021
|
-
type=bool,
|
|
1022
|
-
)
|
|
1023
|
-
),
|
|
1024
|
-
)
|
|
1025
|
-
|
|
1026
|
-
def __init__(self, *kargs, **kwargs):
|
|
1027
|
-
super().__init__(*kargs, **kwargs)
|
|
1028
|
-
self._flow_sched_saved_mtplug = 0
|
|
1029
|
-
|
|
1030
|
-
@property
|
|
1031
|
-
def _flow_sched_mtool_plugin(self):
|
|
1032
|
-
"""Return the MTOOL plugin (if present)."""
|
|
1033
|
-
if self._flow_sched_saved_mtplug == 0:
|
|
1034
|
-
self._flow_sched_saved_mtplug = None
|
|
1035
|
-
for p in self.masterja.plugins:
|
|
1036
|
-
if p.kind == 'mtool':
|
|
1037
|
-
self._flow_sched_saved_mtplug = p
|
|
1038
|
-
return self._flow_sched_saved_mtplug
|
|
1039
|
-
|
|
1040
|
-
def _flow_sched_ids(self, t):
|
|
1041
|
-
"""Return the jobid and RID."""
|
|
1042
|
-
# Simple heuristic to find a job id
|
|
1043
|
-
jid = t.env.PBS_JOBID or t.env.SLURM_JOB_ID or 'localpid'
|
|
1044
|
-
if jid == 'localpid':
|
|
1045
|
-
jid = t.sh.getpid()
|
|
1046
|
-
# Find a suitable RID
|
|
1047
|
-
mtplug = self._flow_sched_mtool_plugin
|
|
1048
|
-
if mtplug is None:
|
|
1049
|
-
rid = jid
|
|
1050
|
-
else:
|
|
1051
|
-
if mtplug.mtoolid is None:
|
|
1052
|
-
raise RuntimeError("mtplug.mtoolid must be defined")
|
|
1053
|
-
rid = mtplug.mtoolid
|
|
1054
|
-
return jid, rid
|
|
1055
|
-
|
|
1056
|
-
def plugable_actions_setup(self, t, **kw):
|
|
1057
|
-
"""Setup the flow action dispatcher."""
|
|
1058
|
-
if self.masterja.subjob_tag is None:
|
|
1059
|
-
ad.add(FlowSchedulerGateway(service=self.backend))
|
|
1060
|
-
|
|
1061
|
-
# Configure the action
|
|
1062
|
-
jid, rid = self._flow_sched_ids(t)
|
|
1063
|
-
label = "{:s}".format(jid)
|
|
1064
|
-
confdict = kw.get('flowscheduler', dict())
|
|
1065
|
-
confdict.setdefault('ECF_RID', rid)
|
|
1066
|
-
ad.flow_conf(confdict)
|
|
1067
|
-
|
|
1068
|
-
t.sh.highlight('Flow Scheduler ({:s}) Settings'.format(self.backend))
|
|
1069
|
-
ad.flow_info()
|
|
1070
|
-
self._printfmt('')
|
|
1071
|
-
self._printfmt('Flow scheduler client path: {:s}', ad.flow_path())
|
|
1072
|
-
|
|
1073
|
-
# Initialise the flow scheduler
|
|
1074
|
-
mstep_first = getattr(self.masterja, 'mstep_is_first', True)
|
|
1075
|
-
mtplug = self._flow_sched_mtool_plugin
|
|
1076
|
-
if mstep_first:
|
|
1077
|
-
ad.flow_init(rid)
|
|
1078
|
-
if mtplug is not None:
|
|
1079
|
-
label = "{:s} (mtoolid={!s})".format(label, mtplug.mtoolid)
|
|
1080
|
-
if self.mtoolmeters:
|
|
1081
|
-
ad.flow_meter('work', 1 + (mtplug.step - 1) * 2)
|
|
1082
|
-
if self.jobidlabels:
|
|
1083
|
-
ad.flow_label('jobid', label)
|
|
1084
|
-
|
|
1085
|
-
def plugable_complete(self, t):
|
|
1086
|
-
"""Should be called when a job finishes successfully."""
|
|
1087
|
-
if self.masterja.subjob_tag is None:
|
|
1088
|
-
mstep_last = getattr(self.masterja, 'mstep_is_last', True)
|
|
1089
|
-
mtplug = self._flow_sched_mtool_plugin
|
|
1090
|
-
if mtplug is not None:
|
|
1091
|
-
if self.mtoolmeters:
|
|
1092
|
-
ad.flow_meter('work', 2 + (mtplug.step - 1) * 2)
|
|
1093
|
-
if mstep_last:
|
|
1094
|
-
ad.flow_complete()
|
|
1095
|
-
|
|
1096
|
-
def plugable_rescue(self, t):
|
|
1097
|
-
"""Called at the end of a job when something went wrong."""
|
|
1098
|
-
if self.masterja.subjob_tag is None:
|
|
1099
|
-
ad.flow_abort("An exception was caught")
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
class JobAssistantEpygramPlugin(JobAssistantPlugin):
|
|
1103
|
-
|
|
1104
|
-
_footprint = dict(
|
|
1105
|
-
info = 'JobAssistant Plugin to perform the epygram setup',
|
|
1106
|
-
attr = dict(
|
|
1107
|
-
kind = dict(
|
|
1108
|
-
values = ['epygram_setup', ]
|
|
1109
|
-
),
|
|
1110
|
-
),
|
|
1111
|
-
)
|
|
1112
|
-
|
|
1113
|
-
def plugable_env_setup(self, t, **kw): # @UnusedVariable
|
|
1114
|
-
# Is epygram here ?
|
|
1115
|
-
epygram_re = re.compile(r'.*epygram$')
|
|
1116
|
-
epygram_path = [bool(epygram_re.match(p)) for p in sys.path]
|
|
1117
|
-
if any(epygram_path):
|
|
1118
|
-
# Add eccodes and site subdirectories if necessary
|
|
1119
|
-
i_epygram = epygram_path.index(True)
|
|
1120
|
-
logger.info('Epygram package found in path: %s', sys.path[i_epygram])
|
|
1121
|
-
for spath in ('eccodes_python', 'site'):
|
|
1122
|
-
full_spath = t.sh.path.join(sys.path[i_epygram], spath)
|
|
1123
|
-
if full_spath not in sys.path:
|
|
1124
|
-
logger.info('Extending python path with: %s', full_spath)
|
|
1125
|
-
sys.path.insert(i_epygram + 1, full_spath)
|
|
1126
|
-
edir_path = t.sh.path.join(sys.path[i_epygram], 'eccodes_dir')
|
|
1127
|
-
if t.sh.path.exists(edir_path):
|
|
1128
|
-
logger.info('ECCODES_DIR environment variable setup to %s', edir_path)
|
|
1129
|
-
t.env.ECCODES_DIR = edir_path
|
|
1130
|
-
# In any case, run with the Agg matplotlib backend
|
|
1131
|
-
t.env.MPLBACKEND = 'Agg'
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
class JobAssistantAppWideLockPlugin(JobAssistantPlugin):
|
|
1135
|
-
"""Manage an application wide lock.
|
|
1136
|
-
|
|
1137
|
-
If **acquire** is True, the lock will be acquired when the job starts (if
|
|
1138
|
-
the lock is already taken, it will fail). If **release** is True, the
|
|
1139
|
-
lock will be released at the end of the job. In any case, the lock will
|
|
1140
|
-
be released whenever the job crashes.
|
|
1141
|
-
|
|
1142
|
-
The lock mechanism that is used is :meth:`vortex.tools.systems.OSExtended.appwide_lock`.
|
|
1143
|
-
|
|
1144
|
-
Prior to being used, the **label** will be formated by the string's format
|
|
1145
|
-
method using any rd|op_* variable in the submitted job. For exemple::
|
|
1146
|
-
|
|
1147
|
-
>>> label = 'my_lock_{xpid:s}'
|
|
1148
|
-
|
|
1149
|
-
This class is not usable one its own. It must be subclassed in the target
|
|
1150
|
-
application (in the python module that holds the job's driver):
|
|
1151
|
-
|
|
1152
|
-
* **kind** must be provided with a unique authorised value;
|
|
1153
|
-
* **label** must be set optional and given a default value;
|
|
1154
|
-
* **acquire** and **release** default values may be changed depending
|
|
1155
|
-
on ones needs.
|
|
1156
|
-
"""
|
|
1157
|
-
|
|
1158
|
-
_abstract = True
|
|
1159
|
-
_footprint = dict(
|
|
1160
|
-
info='JobAssistant to deal with application wide locks.',
|
|
1161
|
-
attr = dict(
|
|
1162
|
-
label=dict(
|
|
1163
|
-
info="The name of the lock.",
|
|
1164
|
-
),
|
|
1165
|
-
acquire=dict(
|
|
1166
|
-
info="Acquire the lock during the setup phase.",
|
|
1167
|
-
type=bool,
|
|
1168
|
-
optional=True,
|
|
1169
|
-
default=False,
|
|
1170
|
-
),
|
|
1171
|
-
release=dict(
|
|
1172
|
-
info="Release the lock at the end.",
|
|
1173
|
-
type=bool,
|
|
1174
|
-
optional=True,
|
|
1175
|
-
default=False,
|
|
1176
|
-
),
|
|
1177
|
-
blocking=dict(
|
|
1178
|
-
info="Block when acquiring the lock.",
|
|
1179
|
-
type=bool,
|
|
1180
|
-
optional=True,
|
|
1181
|
-
default=False,
|
|
1182
|
-
),
|
|
1183
|
-
blocking_timeout=dict(
|
|
1184
|
-
info="Block at most N seconds.",
|
|
1185
|
-
type=int,
|
|
1186
|
-
optional=True,
|
|
1187
|
-
default=300,
|
|
1188
|
-
),
|
|
1189
|
-
),
|
|
1190
|
-
)
|
|
1191
|
-
|
|
1192
|
-
def __init__(self, *kargs, **kwargs):
|
|
1193
|
-
super().__init__(*kargs, **kwargs)
|
|
1194
|
-
self._appwide_lock_label = None
|
|
1195
|
-
self._appwide_lock_acquired = None
|
|
1196
|
-
|
|
1197
|
-
def plugable_job_final_init(self, t, **kw):
|
|
1198
|
-
"""Acquire the lock on job startup."""
|
|
1199
|
-
self._appwide_lock_label = self.label.format(** self.masterja.special_variables)
|
|
1200
|
-
if self.acquire:
|
|
1201
|
-
if getattr(self.masterja, 'mstep_is_first', True):
|
|
1202
|
-
logger.info("Acquiring the '%s' application wide lock",
|
|
1203
|
-
self._appwide_lock_label)
|
|
1204
|
-
self._appwide_lock_acquired = t.sh.appwide_lock(self._appwide_lock_label,
|
|
1205
|
-
blocking=self.blocking,
|
|
1206
|
-
timeout=self.blocking_timeout)
|
|
1207
|
-
if not self._appwide_lock_acquired:
|
|
1208
|
-
logger.error("Acquiring the '%s' application wide lock failed.",
|
|
1209
|
-
self._appwide_lock_label)
|
|
1210
|
-
raise RuntimeError("Unable to acquire the '{:s}' application wide lock."
|
|
1211
|
-
.format(self._appwide_lock_label))
|
|
1212
|
-
|
|
1213
|
-
def _appwide_lock_release(self, t):
|
|
1214
|
-
"""Actualy release the lock."""
|
|
1215
|
-
if self._appwide_lock_label:
|
|
1216
|
-
logger.info("Releasing the '%s' application wide lock",
|
|
1217
|
-
self._appwide_lock_label)
|
|
1218
|
-
t.sh.appwide_unlock(self._appwide_lock_label)
|
|
1219
|
-
|
|
1220
|
-
def plugable_complete(self, t):
|
|
1221
|
-
"""Should be called when a job finishes successfully."""
|
|
1222
|
-
if self.release:
|
|
1223
|
-
if getattr(self.masterja, 'mstep_is_last', True):
|
|
1224
|
-
self._appwide_lock_release(t)
|
|
1225
|
-
|
|
1226
|
-
def plugable_rescue(self, t):
|
|
1227
|
-
"""Should be called when a job fails."""
|
|
1228
|
-
if self._appwide_lock_acquired is not False:
|
|
1229
|
-
self._appwide_lock_release(t)
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
class JobAssistantRdMailSetupPlugin(JobAssistantPlugin):
|
|
1233
|
-
"""Activate/Deactivate mail actions for R&D tasks."""
|
|
1234
|
-
|
|
1235
|
-
_footprint = dict(
|
|
1236
|
-
info='JobAssistant to deal with application wide locks.',
|
|
1237
|
-
attr = dict(
|
|
1238
|
-
kind=dict(
|
|
1239
|
-
values=['rd_mail_setup', ]
|
|
1240
|
-
),
|
|
1241
|
-
)
|
|
1242
|
-
)
|
|
1243
|
-
|
|
1244
|
-
def plugable_actions_setup(self, t, **kw):
|
|
1245
|
-
"""Acquire the lock on job startup."""
|
|
1246
|
-
if self.masterja.conf.get('mail_to', None):
|
|
1247
|
-
todo = {a for a in ad.actions
|
|
1248
|
-
if a.endswith('mail') and a not in ('mail', 'opmail')}
|
|
1249
|
-
for candidate in todo:
|
|
1250
|
-
for action in ad.candidates(candidate):
|
|
1251
|
-
logger.info('Activating the << %s >> action.', action.kind)
|
|
1252
|
-
action.on()
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
class JobAssistantUenvGdataDetourPlugin(JobAssistantPlugin):
|
|
1256
|
-
"""Setup an alternative location for GCO data (gget) referenced in Uenvs."""
|
|
1257
|
-
|
|
1258
|
-
_footprint = dict(
|
|
1259
|
-
info='JobAssistant to deal with Uenv alternative locations .',
|
|
1260
|
-
attr = dict(
|
|
1261
|
-
kind=dict(
|
|
1262
|
-
values=['uenv_gdata_detour', ]
|
|
1263
|
-
),
|
|
1264
|
-
)
|
|
1265
|
-
)
|
|
1266
|
-
|
|
1267
|
-
def plugable_extra_session_setup(self, t, **kw):
|
|
1268
|
-
"""Acquire the lock on job startup."""
|
|
1269
|
-
detour = self.masterja.conf.get('uenv_gdata_detour', None)
|
|
1270
|
-
if detour:
|
|
1271
|
-
from vortex_gco.tools.uenv import config as u_config
|
|
1272
|
-
u_config('gdata_detour', value=detour)
|
|
1273
|
-
logger.info('gdata referenced in uenvs will be taken in the "@%s" uget location.',
|
|
1274
|
-
detour)
|
|
1275
|
-
else:
|
|
1276
|
-
logger.info('No relevant uenv_gdata_detour variable was found in the job conf.')
|