vortex-nwp 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vortex/__init__.py +159 -0
- vortex/algo/__init__.py +13 -0
- vortex/algo/components.py +2462 -0
- vortex/algo/mpitools.py +1953 -0
- vortex/algo/mpitools_templates/__init__.py +1 -0
- vortex/algo/mpitools_templates/envelope_wrapper_default.tpl +27 -0
- vortex/algo/mpitools_templates/envelope_wrapper_mpiauto.tpl +29 -0
- vortex/algo/mpitools_templates/wrapstd_wrapper_default.tpl +18 -0
- vortex/algo/serversynctools.py +171 -0
- vortex/config.py +112 -0
- vortex/data/__init__.py +19 -0
- vortex/data/abstractstores.py +1510 -0
- vortex/data/containers.py +835 -0
- vortex/data/contents.py +622 -0
- vortex/data/executables.py +275 -0
- vortex/data/flow.py +119 -0
- vortex/data/geometries.ini +2689 -0
- vortex/data/geometries.py +799 -0
- vortex/data/handlers.py +1230 -0
- vortex/data/outflow.py +67 -0
- vortex/data/providers.py +487 -0
- vortex/data/resources.py +207 -0
- vortex/data/stores.py +1390 -0
- vortex/data/sync_templates/__init__.py +0 -0
- vortex/gloves.py +309 -0
- vortex/layout/__init__.py +20 -0
- vortex/layout/contexts.py +577 -0
- vortex/layout/dataflow.py +1220 -0
- vortex/layout/monitor.py +969 -0
- vortex/nwp/__init__.py +14 -0
- vortex/nwp/algo/__init__.py +21 -0
- vortex/nwp/algo/assim.py +537 -0
- vortex/nwp/algo/clim.py +1086 -0
- vortex/nwp/algo/coupling.py +831 -0
- vortex/nwp/algo/eda.py +840 -0
- vortex/nwp/algo/eps.py +785 -0
- vortex/nwp/algo/forecasts.py +886 -0
- vortex/nwp/algo/fpserver.py +1303 -0
- vortex/nwp/algo/ifsnaming.py +463 -0
- vortex/nwp/algo/ifsroot.py +404 -0
- vortex/nwp/algo/monitoring.py +263 -0
- vortex/nwp/algo/mpitools.py +694 -0
- vortex/nwp/algo/odbtools.py +1258 -0
- vortex/nwp/algo/oopsroot.py +916 -0
- vortex/nwp/algo/oopstests.py +220 -0
- vortex/nwp/algo/request.py +660 -0
- vortex/nwp/algo/stdpost.py +1641 -0
- vortex/nwp/data/__init__.py +30 -0
- vortex/nwp/data/assim.py +380 -0
- vortex/nwp/data/boundaries.py +314 -0
- vortex/nwp/data/climfiles.py +521 -0
- vortex/nwp/data/configfiles.py +153 -0
- vortex/nwp/data/consts.py +954 -0
- vortex/nwp/data/ctpini.py +149 -0
- vortex/nwp/data/diagnostics.py +209 -0
- vortex/nwp/data/eda.py +147 -0
- vortex/nwp/data/eps.py +432 -0
- vortex/nwp/data/executables.py +1045 -0
- vortex/nwp/data/fields.py +111 -0
- vortex/nwp/data/gridfiles.py +380 -0
- vortex/nwp/data/logs.py +584 -0
- vortex/nwp/data/modelstates.py +363 -0
- vortex/nwp/data/monitoring.py +193 -0
- vortex/nwp/data/namelists.py +696 -0
- vortex/nwp/data/obs.py +840 -0
- vortex/nwp/data/oopsexec.py +74 -0
- vortex/nwp/data/providers.py +207 -0
- vortex/nwp/data/query.py +206 -0
- vortex/nwp/data/stores.py +160 -0
- vortex/nwp/data/surfex.py +337 -0
- vortex/nwp/syntax/__init__.py +9 -0
- vortex/nwp/syntax/stdattrs.py +437 -0
- vortex/nwp/tools/__init__.py +10 -0
- vortex/nwp/tools/addons.py +40 -0
- vortex/nwp/tools/agt.py +67 -0
- vortex/nwp/tools/bdap.py +59 -0
- vortex/nwp/tools/bdcp.py +41 -0
- vortex/nwp/tools/bdm.py +24 -0
- vortex/nwp/tools/bdmp.py +54 -0
- vortex/nwp/tools/conftools.py +1661 -0
- vortex/nwp/tools/drhook.py +66 -0
- vortex/nwp/tools/grib.py +294 -0
- vortex/nwp/tools/gribdiff.py +104 -0
- vortex/nwp/tools/ifstools.py +203 -0
- vortex/nwp/tools/igastuff.py +273 -0
- vortex/nwp/tools/mars.py +68 -0
- vortex/nwp/tools/odb.py +657 -0
- vortex/nwp/tools/partitioning.py +258 -0
- vortex/nwp/tools/satrad.py +71 -0
- vortex/nwp/util/__init__.py +6 -0
- vortex/nwp/util/async.py +212 -0
- vortex/nwp/util/beacon.py +40 -0
- vortex/nwp/util/diffpygram.py +447 -0
- vortex/nwp/util/ens.py +279 -0
- vortex/nwp/util/hooks.py +139 -0
- vortex/nwp/util/taskdeco.py +85 -0
- vortex/nwp/util/usepygram.py +697 -0
- vortex/nwp/util/usetnt.py +101 -0
- vortex/proxy.py +6 -0
- vortex/sessions.py +374 -0
- vortex/syntax/__init__.py +9 -0
- vortex/syntax/stdattrs.py +867 -0
- vortex/syntax/stddeco.py +185 -0
- vortex/toolbox.py +1117 -0
- vortex/tools/__init__.py +20 -0
- vortex/tools/actions.py +523 -0
- vortex/tools/addons.py +316 -0
- vortex/tools/arm.py +96 -0
- vortex/tools/compression.py +325 -0
- vortex/tools/date.py +27 -0
- vortex/tools/ddhpack.py +10 -0
- vortex/tools/delayedactions.py +782 -0
- vortex/tools/env.py +541 -0
- vortex/tools/folder.py +834 -0
- vortex/tools/grib.py +738 -0
- vortex/tools/lfi.py +953 -0
- vortex/tools/listings.py +423 -0
- vortex/tools/names.py +637 -0
- vortex/tools/net.py +2124 -0
- vortex/tools/odb.py +10 -0
- vortex/tools/parallelism.py +368 -0
- vortex/tools/prestaging.py +210 -0
- vortex/tools/rawfiles.py +10 -0
- vortex/tools/schedulers.py +480 -0
- vortex/tools/services.py +940 -0
- vortex/tools/storage.py +996 -0
- vortex/tools/surfex.py +61 -0
- vortex/tools/systems.py +3976 -0
- vortex/tools/targets.py +440 -0
- vortex/util/__init__.py +9 -0
- vortex/util/config.py +1122 -0
- vortex/util/empty.py +24 -0
- vortex/util/helpers.py +216 -0
- vortex/util/introspection.py +69 -0
- vortex/util/iosponge.py +80 -0
- vortex/util/roles.py +49 -0
- vortex/util/storefunctions.py +129 -0
- vortex/util/structs.py +26 -0
- vortex/util/worker.py +162 -0
- vortex_nwp-2.0.0.dist-info/METADATA +67 -0
- vortex_nwp-2.0.0.dist-info/RECORD +144 -0
- vortex_nwp-2.0.0.dist-info/WHEEL +5 -0
- vortex_nwp-2.0.0.dist-info/licenses/LICENSE +517 -0
- vortex_nwp-2.0.0.dist-info/top_level.txt +1 -0
vortex/tools/net.py
ADDED
|
@@ -0,0 +1,2124 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Net tools.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import abc
|
|
6
|
+
import binascii
|
|
7
|
+
import collections
|
|
8
|
+
from collections import namedtuple
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
import ftplib
|
|
11
|
+
import functools
|
|
12
|
+
import io
|
|
13
|
+
import itertools
|
|
14
|
+
import operator
|
|
15
|
+
import random
|
|
16
|
+
import re
|
|
17
|
+
import shlex
|
|
18
|
+
import socket
|
|
19
|
+
import stat
|
|
20
|
+
import struct
|
|
21
|
+
import time
|
|
22
|
+
from urllib import request as urlrequest
|
|
23
|
+
from urllib import parse as urlparse
|
|
24
|
+
|
|
25
|
+
from bronx.fancies import loggers
|
|
26
|
+
from bronx.net.netrc import netrc
|
|
27
|
+
from bronx.syntax.decorators import nicedeco, secure_getattr
|
|
28
|
+
|
|
29
|
+
#: No automatic export
|
|
30
|
+
__all__ = []
|
|
31
|
+
|
|
32
|
+
logger = loggers.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
DEFAULT_FTP_PORT = ftplib.FTP_PORT
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def uriparse(uristring):
|
|
38
|
+
"""Parse the specified ``uristring`` as a dictionary including keys:
|
|
39
|
+
|
|
40
|
+
* scheme
|
|
41
|
+
* netloc
|
|
42
|
+
* port
|
|
43
|
+
* query
|
|
44
|
+
* username
|
|
45
|
+
* password
|
|
46
|
+
"""
|
|
47
|
+
(realscheme, other) = uristring.split(":", 1)
|
|
48
|
+
rp = urlparse.urlparse("http:" + other)
|
|
49
|
+
uridict = rp._asdict()
|
|
50
|
+
netloc = uridict["netloc"].split("@", 1)
|
|
51
|
+
hostport = netloc.pop().split(":")
|
|
52
|
+
uridict["netloc"] = hostport.pop(0)
|
|
53
|
+
if hostport:
|
|
54
|
+
uridict["port"] = hostport.pop()
|
|
55
|
+
else:
|
|
56
|
+
uridict["port"] = None
|
|
57
|
+
if netloc:
|
|
58
|
+
userpass = netloc.pop().split(":")
|
|
59
|
+
uridict["username"] = userpass.pop(0)
|
|
60
|
+
if userpass:
|
|
61
|
+
uridict["password"] = userpass.pop()
|
|
62
|
+
else:
|
|
63
|
+
uridict["password"] = None
|
|
64
|
+
else:
|
|
65
|
+
uridict["username"] = None
|
|
66
|
+
uridict["password"] = None
|
|
67
|
+
uridict["scheme"] = realscheme
|
|
68
|
+
uridict["query"] = urlparse.parse_qs(uridict["query"])
|
|
69
|
+
return uridict
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def uriunparse(uridesc):
|
|
73
|
+
"""Delegates to :mod:`urlparse` the job to unparse the given description (as a dictionary)."""
|
|
74
|
+
return urlparse.urlunparse(uridesc)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def http_post_data(
|
|
78
|
+
url, data, ok_statuses=(), proxies=None, headers=None, verify=None
|
|
79
|
+
):
|
|
80
|
+
"""Make a http/https POST request, encoding **data**."""
|
|
81
|
+
if isinstance(proxies, (list, tuple)):
|
|
82
|
+
proxies = {scheme: proxies for scheme in ("http", "https")}
|
|
83
|
+
# Try to use the requests package
|
|
84
|
+
try:
|
|
85
|
+
import requests
|
|
86
|
+
|
|
87
|
+
use_requests = True
|
|
88
|
+
except ImportError:
|
|
89
|
+
use_requests = False
|
|
90
|
+
# The modern way
|
|
91
|
+
if use_requests:
|
|
92
|
+
resp = requests.post(
|
|
93
|
+
url=url, data=data, headers=headers, proxies=proxies, verify=verify
|
|
94
|
+
)
|
|
95
|
+
if ok_statuses:
|
|
96
|
+
is_ok = resp.status_code in ok_statuses
|
|
97
|
+
else:
|
|
98
|
+
is_ok = resp.ok
|
|
99
|
+
return is_ok, resp.status_code, resp.headers, resp.text
|
|
100
|
+
else:
|
|
101
|
+
if not isinstance(data, bytes):
|
|
102
|
+
data = urlparse.urlencode(data).encode("utf-8")
|
|
103
|
+
if uriparse(url)["scheme"] == "https":
|
|
104
|
+
raise RuntimeError(
|
|
105
|
+
"HTTPS is not properly supported by urllib.request ({}).".format(
|
|
106
|
+
url
|
|
107
|
+
)
|
|
108
|
+
)
|
|
109
|
+
handlers = []
|
|
110
|
+
if isinstance(proxies, dict):
|
|
111
|
+
handlers.append(urlrequest.ProxyHandler(proxies))
|
|
112
|
+
opener = urlrequest.build_opener(*handlers)
|
|
113
|
+
req = urlrequest.Request(
|
|
114
|
+
url=url, data=data, headers={} if headers is None else headers
|
|
115
|
+
)
|
|
116
|
+
try:
|
|
117
|
+
req_f = opener.open(req)
|
|
118
|
+
except Exception as e:
|
|
119
|
+
try: # ignore UnboundLocalError if req_f has not been created yet
|
|
120
|
+
req_f.close()
|
|
121
|
+
finally:
|
|
122
|
+
raise e
|
|
123
|
+
else:
|
|
124
|
+
try:
|
|
125
|
+
req_rc = req_f.getcode()
|
|
126
|
+
req_info = req_f.info()
|
|
127
|
+
req_data = req_f.read().decode("utf-8")
|
|
128
|
+
if ok_statuses:
|
|
129
|
+
return req_rc in ok_statuses, req_rc, req_info, req_data
|
|
130
|
+
else:
|
|
131
|
+
return 200 <= req_rc < 400, req_rc, req_info, req_data
|
|
132
|
+
finally:
|
|
133
|
+
req_f.close()
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def netrc_lookup(logname, hostname, nrcfile=None):
|
|
137
|
+
"""Looks into the .netrc file to find FTP authentication credentials.
|
|
138
|
+
|
|
139
|
+
:param str logname: The login to look for
|
|
140
|
+
:param str hostname: The hostname to look for
|
|
141
|
+
|
|
142
|
+
For backward compatibility reasons:
|
|
143
|
+
|
|
144
|
+
* If *hostname* is a FQDN, an attempt will be made using the hostname
|
|
145
|
+
alone.
|
|
146
|
+
* If credentials are not found for the *logname*/*hostname* pair, an attempt
|
|
147
|
+
is made ignoring the provided *logname*.
|
|
148
|
+
|
|
149
|
+
"""
|
|
150
|
+
actual_logname = None
|
|
151
|
+
actual_pwd = None
|
|
152
|
+
nrc = netrc(file=nrcfile)
|
|
153
|
+
if nrc:
|
|
154
|
+
auth = nrc.authenticators(hostname, login=logname)
|
|
155
|
+
if not auth:
|
|
156
|
+
# self.host may be a FQDN, try to guess only the hostname
|
|
157
|
+
auth = nrc.authenticators(hostname.split(".")[0], login=logname)
|
|
158
|
+
# for backward compatibility: This might be removed one day
|
|
159
|
+
if not auth:
|
|
160
|
+
auth = nrc.authenticators(hostname)
|
|
161
|
+
if not auth:
|
|
162
|
+
# self.host may be a FQDN, try to guess only the hostname
|
|
163
|
+
auth = nrc.authenticators(hostname.split(".")[0])
|
|
164
|
+
# End of backward compatibility section
|
|
165
|
+
if auth:
|
|
166
|
+
actual_logname = auth[0]
|
|
167
|
+
actual_pwd = auth[2]
|
|
168
|
+
else:
|
|
169
|
+
logger.warning("netrc lookup failed (%s)", str(auth))
|
|
170
|
+
else:
|
|
171
|
+
logger.warning("unable to fetch .netrc file")
|
|
172
|
+
return actual_logname, actual_pwd
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
class ExtendedFtplib:
|
|
176
|
+
"""Simple Vortex's extension to the bare ftplib object.
|
|
177
|
+
|
|
178
|
+
It wraps the standard ftplib object to add or overwrite methods.
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
def __init__(self, system, ftpobj, hostname="", port=DEFAULT_FTP_PORT):
|
|
182
|
+
"""
|
|
183
|
+
:param ~vortex.tools.systems.OSExtended system: The system object to work with
|
|
184
|
+
:param ftplib.FTP ftpobj: The FTP object to work with / to extend
|
|
185
|
+
"""
|
|
186
|
+
self._system = system
|
|
187
|
+
self._ftplib = ftpobj
|
|
188
|
+
self._closed = True
|
|
189
|
+
self._logname = "not_logged_in"
|
|
190
|
+
self._created = datetime.now()
|
|
191
|
+
self._opened = None
|
|
192
|
+
self._deleted = None
|
|
193
|
+
if hostname:
|
|
194
|
+
self._ftplib.connect(hostname, port)
|
|
195
|
+
|
|
196
|
+
@property
|
|
197
|
+
def host(self):
|
|
198
|
+
"""Return the hostname."""
|
|
199
|
+
return self._ftplib.host
|
|
200
|
+
|
|
201
|
+
@property
|
|
202
|
+
def port(self):
|
|
203
|
+
"""Return the port number."""
|
|
204
|
+
return self._ftplib.port
|
|
205
|
+
|
|
206
|
+
def __str__(self):
|
|
207
|
+
"""
|
|
208
|
+
Nicely formatted print, built as the concatenation
|
|
209
|
+
of the class full name and `logname` and `length` attributes.
|
|
210
|
+
"""
|
|
211
|
+
return "{:s} | host={:s} logname={:s} since={!s}>".format(
|
|
212
|
+
repr(self).rstrip(">"),
|
|
213
|
+
self.host,
|
|
214
|
+
self.logname,
|
|
215
|
+
self.length,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
@secure_getattr
|
|
219
|
+
def __getattr__(self, key):
|
|
220
|
+
"""Gateway to undefined method or attributes if present in ``_ftplib``."""
|
|
221
|
+
actualattr = getattr(self._ftplib, key)
|
|
222
|
+
if callable(actualattr):
|
|
223
|
+
|
|
224
|
+
def osproxy(*args, **kw):
|
|
225
|
+
cmd = [key]
|
|
226
|
+
cmd.extend(args)
|
|
227
|
+
cmd.extend(["{:s}={!s}".format(x, kw[x]) for x in kw.keys()])
|
|
228
|
+
self.stderr(*cmd)
|
|
229
|
+
return actualattr(*args, **kw)
|
|
230
|
+
|
|
231
|
+
osproxy.func_name = str(key)
|
|
232
|
+
osproxy.__name__ = str(key)
|
|
233
|
+
osproxy.func_doc = actualattr.__doc__
|
|
234
|
+
setattr(self, key, osproxy)
|
|
235
|
+
return osproxy
|
|
236
|
+
else:
|
|
237
|
+
return actualattr
|
|
238
|
+
|
|
239
|
+
@property
|
|
240
|
+
def system(self):
|
|
241
|
+
"""Current local system interface."""
|
|
242
|
+
return self._system
|
|
243
|
+
|
|
244
|
+
def stderr(self, cmd, *args):
|
|
245
|
+
"""Proxy to local system's standard error."""
|
|
246
|
+
self.system.stderr("ftp:" + cmd, *args)
|
|
247
|
+
|
|
248
|
+
@property
|
|
249
|
+
def closed(self):
|
|
250
|
+
"""Current status of the ftp connection."""
|
|
251
|
+
return self._closed
|
|
252
|
+
|
|
253
|
+
@property
|
|
254
|
+
def logname(self):
|
|
255
|
+
"""Current logname of the ftp connection."""
|
|
256
|
+
return self._logname
|
|
257
|
+
|
|
258
|
+
@property
|
|
259
|
+
def length(self):
|
|
260
|
+
"""Length in seconds of the current opened connection."""
|
|
261
|
+
timelength = 0
|
|
262
|
+
try:
|
|
263
|
+
topnow = datetime.now() if self._deleted is None else self._deleted
|
|
264
|
+
timelength = (topnow - self._opened).total_seconds()
|
|
265
|
+
except TypeError:
|
|
266
|
+
logger.warning(
|
|
267
|
+
"Could not evaluate connexion length %s", repr(self)
|
|
268
|
+
)
|
|
269
|
+
return timelength
|
|
270
|
+
|
|
271
|
+
def close(self):
|
|
272
|
+
"""Proxy to ftplib :meth:`ftplib.FTP.close`."""
|
|
273
|
+
self.stderr("close")
|
|
274
|
+
rc = True
|
|
275
|
+
if not self.closed:
|
|
276
|
+
rc = self._ftplib.close() or True
|
|
277
|
+
self._closed = True
|
|
278
|
+
self._deleted = datetime.now()
|
|
279
|
+
return rc
|
|
280
|
+
|
|
281
|
+
def login(self, *args):
|
|
282
|
+
"""Proxy to ftplib :meth:`ftplib.FTP.login`."""
|
|
283
|
+
self.stderr("login", args[0])
|
|
284
|
+
self._logname = args[0]
|
|
285
|
+
# kept for debugging, but this exposes the user's password!
|
|
286
|
+
# logger.debug('FTP login <args:%s>', str(args))
|
|
287
|
+
rc = self._ftplib.login(*args)
|
|
288
|
+
if rc:
|
|
289
|
+
self._closed = False
|
|
290
|
+
self._deleted = None
|
|
291
|
+
self._opened = datetime.now()
|
|
292
|
+
else:
|
|
293
|
+
logger.warning("FTP could not login <args:%s>", str(args))
|
|
294
|
+
return rc
|
|
295
|
+
|
|
296
|
+
def list(self, *args):
|
|
297
|
+
"""Returns standard directory listing from ftp protocol."""
|
|
298
|
+
self.stderr("list", *args)
|
|
299
|
+
contents = []
|
|
300
|
+
self.retrlines("LIST", callback=contents.append)
|
|
301
|
+
return contents
|
|
302
|
+
|
|
303
|
+
def dir(self, *args):
|
|
304
|
+
"""Proxy to ftplib :meth:`ftplib.FTP.dir`."""
|
|
305
|
+
self.stderr("dir", *args)
|
|
306
|
+
return self._ftplib.dir(*args)
|
|
307
|
+
|
|
308
|
+
def ls(self, *args):
|
|
309
|
+
"""Returns directory listing."""
|
|
310
|
+
self.stderr("ls", *args)
|
|
311
|
+
return self.dir(*args)
|
|
312
|
+
|
|
313
|
+
ll = ls
|
|
314
|
+
|
|
315
|
+
def get(self, source, destination):
|
|
316
|
+
"""Retrieve a remote `destination` file to a local `source` file object."""
|
|
317
|
+
self.stderr("get", source, destination)
|
|
318
|
+
if isinstance(destination, str):
|
|
319
|
+
self.system.filecocoon(destination)
|
|
320
|
+
target = open(destination, "wb")
|
|
321
|
+
xdestination = True
|
|
322
|
+
else:
|
|
323
|
+
target = destination
|
|
324
|
+
xdestination = False
|
|
325
|
+
logger.info("FTP <get:{:s}>".format(source))
|
|
326
|
+
rc = False
|
|
327
|
+
try:
|
|
328
|
+
self.retrbinary("RETR " + source, target.write)
|
|
329
|
+
if xdestination:
|
|
330
|
+
target.seek(0, io.SEEK_END)
|
|
331
|
+
if self.size(source) == target.tell():
|
|
332
|
+
rc = True
|
|
333
|
+
else:
|
|
334
|
+
logger.error("FTP incomplete get %s", repr(source))
|
|
335
|
+
else:
|
|
336
|
+
rc = True
|
|
337
|
+
finally:
|
|
338
|
+
if xdestination:
|
|
339
|
+
target.close()
|
|
340
|
+
# If the ftp GET fails, a zero size file is here: remove it
|
|
341
|
+
if not rc:
|
|
342
|
+
self.system.remove(destination)
|
|
343
|
+
return rc
|
|
344
|
+
|
|
345
|
+
def put(self, source, destination, size=None, exact=False):
|
|
346
|
+
"""Store a local `source` file object to a remote `destination`.
|
|
347
|
+
|
|
348
|
+
When `size` is known, it is sent to the ftp server with the ALLO
|
|
349
|
+
command. It is mesured in this method for real files, but should
|
|
350
|
+
be given for other (non-seekeable) sources such as pipes.
|
|
351
|
+
|
|
352
|
+
When `exact` is True, the size is checked against the size of the
|
|
353
|
+
destination, and a mismatch is considered a failure.
|
|
354
|
+
"""
|
|
355
|
+
self.stderr("put", source, destination)
|
|
356
|
+
if isinstance(source, str):
|
|
357
|
+
inputsrc = open(source, "rb")
|
|
358
|
+
xsource = True
|
|
359
|
+
else:
|
|
360
|
+
inputsrc = source
|
|
361
|
+
xsource = False
|
|
362
|
+
try:
|
|
363
|
+
inputsrc.seek(0, io.SEEK_END)
|
|
364
|
+
size = inputsrc.tell()
|
|
365
|
+
exact = True
|
|
366
|
+
inputsrc.seek(0)
|
|
367
|
+
except AttributeError:
|
|
368
|
+
logger.warning("Could not rewind <source:%s>", str(source))
|
|
369
|
+
except OSError:
|
|
370
|
+
logger.debug("Seek trouble <source:%s>", str(source))
|
|
371
|
+
|
|
372
|
+
self.rmkdir(destination)
|
|
373
|
+
try:
|
|
374
|
+
self.delete(destination)
|
|
375
|
+
logger.info("Replacing <file:%s>", str(destination))
|
|
376
|
+
except ftplib.error_perm:
|
|
377
|
+
logger.info("Creating <file:%s>", str(destination))
|
|
378
|
+
except (
|
|
379
|
+
ValueError,
|
|
380
|
+
TypeError,
|
|
381
|
+
OSError,
|
|
382
|
+
ftplib.error_proto,
|
|
383
|
+
ftplib.error_reply,
|
|
384
|
+
ftplib.error_temp,
|
|
385
|
+
) as e:
|
|
386
|
+
logger.error(
|
|
387
|
+
"Serious delete trouble <file:%s> <error:%s>",
|
|
388
|
+
str(destination),
|
|
389
|
+
str(e),
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
logger.info("FTP <put:%s>", str(destination))
|
|
393
|
+
rc = False
|
|
394
|
+
|
|
395
|
+
if size is not None:
|
|
396
|
+
try:
|
|
397
|
+
self.voidcmd("ALLO {:d}".format(size))
|
|
398
|
+
except ftplib.error_perm:
|
|
399
|
+
pass
|
|
400
|
+
|
|
401
|
+
try:
|
|
402
|
+
self.storbinary("STOR " + destination, inputsrc)
|
|
403
|
+
if exact:
|
|
404
|
+
if self.size(destination) == size:
|
|
405
|
+
rc = True
|
|
406
|
+
else:
|
|
407
|
+
logger.error(
|
|
408
|
+
"FTP incomplete put %s (%d / %d bytes)",
|
|
409
|
+
repr(source),
|
|
410
|
+
self.size(destination),
|
|
411
|
+
size,
|
|
412
|
+
)
|
|
413
|
+
else:
|
|
414
|
+
rc = True
|
|
415
|
+
if self.size(destination) != size:
|
|
416
|
+
logger.info(
|
|
417
|
+
"FTP put %s: estimated %s bytes, real %s bytes",
|
|
418
|
+
repr(source),
|
|
419
|
+
str(size),
|
|
420
|
+
self.size(destination),
|
|
421
|
+
)
|
|
422
|
+
finally:
|
|
423
|
+
if xsource:
|
|
424
|
+
inputsrc.close()
|
|
425
|
+
return rc
|
|
426
|
+
|
|
427
|
+
def rmkdir(self, destination):
|
|
428
|
+
"""Recursive directory creation (mimics `mkdir -p`)."""
|
|
429
|
+
self.stderr("rmkdir", destination)
|
|
430
|
+
origin = self.pwd()
|
|
431
|
+
if destination.startswith("/"):
|
|
432
|
+
path_pre = "/"
|
|
433
|
+
elif destination.startswith("~"):
|
|
434
|
+
path_pre = ""
|
|
435
|
+
else:
|
|
436
|
+
path_pre = origin + "/"
|
|
437
|
+
|
|
438
|
+
for subdir in self.system.path.dirname(destination).split("/"):
|
|
439
|
+
current = path_pre + subdir
|
|
440
|
+
try:
|
|
441
|
+
self.cwd(current)
|
|
442
|
+
path_pre = current + "/"
|
|
443
|
+
except ftplib.error_perm:
|
|
444
|
+
self.stderr("mkdir", current)
|
|
445
|
+
try:
|
|
446
|
+
self.mkd(current)
|
|
447
|
+
except ftplib.error_perm as errmkd:
|
|
448
|
+
if "File exists" not in str(errmkd):
|
|
449
|
+
raise
|
|
450
|
+
self.cwd(current)
|
|
451
|
+
path_pre = current + "/"
|
|
452
|
+
self.cwd(origin)
|
|
453
|
+
|
|
454
|
+
def cd(self, destination):
|
|
455
|
+
"""Change to a directory."""
|
|
456
|
+
return self.cwd(destination)
|
|
457
|
+
|
|
458
|
+
def rm(self, source):
|
|
459
|
+
"""Proxy to ftp delete command."""
|
|
460
|
+
return self.delete(source)
|
|
461
|
+
|
|
462
|
+
def mtime(self, filename):
|
|
463
|
+
"""Retrieve the modification time of a file."""
|
|
464
|
+
resp = self.sendcmd("MDTM " + filename)
|
|
465
|
+
if resp[:3] == "213":
|
|
466
|
+
s = resp[3:].strip().split()[-1]
|
|
467
|
+
return int(s)
|
|
468
|
+
|
|
469
|
+
def size(self, filename):
|
|
470
|
+
"""Retrieve the size of a file."""
|
|
471
|
+
# The SIZE command is defined in RFC-3659
|
|
472
|
+
resp = self.sendcmd("SIZE " + filename)
|
|
473
|
+
if resp[:3] == "213":
|
|
474
|
+
s = resp[3:].strip().split()[-1]
|
|
475
|
+
return int(s)
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
class StdFtp:
|
|
479
|
+
"""Standard wrapper for the crude FTP object (of class :class:`ExtendedFtplib`).
|
|
480
|
+
|
|
481
|
+
It relies heavily on the :class:`ExtendedFtplib` class for FTP commands but
|
|
482
|
+
adds some interesting features such as:
|
|
483
|
+
|
|
484
|
+
* a fast login using the .netrc file;
|
|
485
|
+
* the ability to delay the :class:`ftplib.FTP` object creation as much as possible;
|
|
486
|
+
* the VORTEX_FTP_PROXY environment variable is looked for (if not available
|
|
487
|
+
FTP_PROXY is also scrutated). If defined, a FTP proxy will be used.
|
|
488
|
+
|
|
489
|
+
Methods that are not explicitly defined in the present class will be looked
|
|
490
|
+
for in the associated :class:`ExtendedFtplib` object (and eventually in the
|
|
491
|
+
wrapped :class:`ftplib.FTP` object). For example, it's possible
|
|
492
|
+
to call `self.get(...)` (exactly as one would do with the native
|
|
493
|
+
:class:`ExtendedFtplib` and :class:`ftplib.FTP` class).
|
|
494
|
+
"""
|
|
495
|
+
|
|
496
|
+
_PROXY_TYPES = ("no-auth-logname-based",)
|
|
497
|
+
|
|
498
|
+
_NO_AUTOLOGIN = (
|
|
499
|
+
"set_debuglevel",
|
|
500
|
+
"connect",
|
|
501
|
+
"login",
|
|
502
|
+
"stderr",
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
def __init__(
|
|
506
|
+
self,
|
|
507
|
+
system,
|
|
508
|
+
hostname,
|
|
509
|
+
port=DEFAULT_FTP_PORT,
|
|
510
|
+
nrcfile=None,
|
|
511
|
+
ignoreproxy=False,
|
|
512
|
+
):
|
|
513
|
+
"""
|
|
514
|
+
:param ~vortex.tools.systems.OSExtended system: The system object to work with
|
|
515
|
+
:param str hostname: The remote host's network name
|
|
516
|
+
:param int port: The remote host's FTP port.
|
|
517
|
+
:param str nrcfile: The path to the .netrc file (if `None` the ~/.netrc default is used)
|
|
518
|
+
:param bool ignoreproxy: Forcibly ignore any proxy related environment variables
|
|
519
|
+
"""
|
|
520
|
+
logger.debug("FTP init <host:%s>", hostname)
|
|
521
|
+
self._system = system
|
|
522
|
+
if ignoreproxy:
|
|
523
|
+
self._proxy_host, self._proxy_port, self._proxy_type = (
|
|
524
|
+
None,
|
|
525
|
+
None,
|
|
526
|
+
None,
|
|
527
|
+
)
|
|
528
|
+
else:
|
|
529
|
+
self._proxy_host, self._proxy_port, self._proxy_type = (
|
|
530
|
+
self._proxy_init()
|
|
531
|
+
)
|
|
532
|
+
self._hostname = hostname
|
|
533
|
+
self._port = port
|
|
534
|
+
self._nrcfile = nrcfile
|
|
535
|
+
self._internal_ftp = None
|
|
536
|
+
self._logname = None
|
|
537
|
+
self._cached_pwd = None
|
|
538
|
+
self._barelogname = None
|
|
539
|
+
|
|
540
|
+
def _proxy_init(self):
|
|
541
|
+
"""Return the proxy type, address and port."""
|
|
542
|
+
p_netloc = (None, None)
|
|
543
|
+
p_url = self.system.env.get(
|
|
544
|
+
"VORTEX_FTP_PROXY", self.system.env.get("FTP_PROXY", None)
|
|
545
|
+
)
|
|
546
|
+
if p_url:
|
|
547
|
+
p_netloc = p_url.split(":", 1)
|
|
548
|
+
if len(p_netloc) == 1:
|
|
549
|
+
p_netloc.append(DEFAULT_FTP_PORT)
|
|
550
|
+
else:
|
|
551
|
+
p_netloc[1] = int(p_netloc[1])
|
|
552
|
+
p_type = self.system.env.get(
|
|
553
|
+
"VORTEX_FTP_PROXY_TYPE", self._PROXY_TYPES[0]
|
|
554
|
+
)
|
|
555
|
+
if p_type not in self._PROXY_TYPES:
|
|
556
|
+
raise ValueError(
|
|
557
|
+
"Incorrect value for the VORTEX_FTP_PROXY_TYPE "
|
|
558
|
+
+ "environment variable (got: {:s})".format(p_type)
|
|
559
|
+
)
|
|
560
|
+
return p_netloc[0], p_netloc[1], p_type
|
|
561
|
+
|
|
562
|
+
def _extended_ftp_host_and_port(self):
|
|
563
|
+
if self._proxy_host:
|
|
564
|
+
if self._proxy_type == self._PROXY_TYPES[0]:
|
|
565
|
+
return self._proxy_host, self._proxy_port
|
|
566
|
+
else:
|
|
567
|
+
return self._hostname, self._port
|
|
568
|
+
|
|
569
|
+
@property
|
|
570
|
+
def _extended_ftp(self):
|
|
571
|
+
"""This property provides the :class:`ExtendedFtpLib` to work with.
|
|
572
|
+
|
|
573
|
+
It is created on-demand.
|
|
574
|
+
"""
|
|
575
|
+
if self._internal_ftp is None:
|
|
576
|
+
self._internal_ftp = ExtendedFtplib(
|
|
577
|
+
self._system, ftplib.FTP(), *self._extended_ftp_host_and_port()
|
|
578
|
+
)
|
|
579
|
+
return self._internal_ftp
|
|
580
|
+
|
|
581
|
+
_loginlike_extended_ftp = _extended_ftp
|
|
582
|
+
|
|
583
|
+
@property
|
|
584
|
+
def system(self):
|
|
585
|
+
"""The current local system interface."""
|
|
586
|
+
return self._system
|
|
587
|
+
|
|
588
|
+
@property
|
|
589
|
+
def host(self):
|
|
590
|
+
"""The FTP server hostname."""
|
|
591
|
+
if self._internal_ftp is None or self._proxy_host:
|
|
592
|
+
return self._hostname
|
|
593
|
+
else:
|
|
594
|
+
return self._extended_ftp.host
|
|
595
|
+
|
|
596
|
+
@property
|
|
597
|
+
def port(self):
|
|
598
|
+
"""The FTP server port number."""
|
|
599
|
+
if self._internal_ftp is None or self._proxy_host:
|
|
600
|
+
return self._port
|
|
601
|
+
else:
|
|
602
|
+
return self._extended_ftp.port
|
|
603
|
+
|
|
604
|
+
@property
|
|
605
|
+
def logname(self):
|
|
606
|
+
"""The current logname."""
|
|
607
|
+
return self._barelogname
|
|
608
|
+
|
|
609
|
+
@property
|
|
610
|
+
def proxy(self):
|
|
611
|
+
if self._proxy_host:
|
|
612
|
+
return "{0._proxy_host}:{0._proxy_port}".format(self)
|
|
613
|
+
else:
|
|
614
|
+
return None
|
|
615
|
+
|
|
616
|
+
@property
|
|
617
|
+
def cached_pwd(self):
|
|
618
|
+
"""The current cached password."""
|
|
619
|
+
return self._cached_pwd
|
|
620
|
+
|
|
621
|
+
def netpath(self, remote):
|
|
622
|
+
"""The complete qualified net path of the remote resource."""
|
|
623
|
+
return "{:s}@{:s}:{:s}".format(
|
|
624
|
+
self.logname if self.logname is not None else "unknown",
|
|
625
|
+
self.host,
|
|
626
|
+
remote,
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
def delayedlogin(self):
|
|
630
|
+
"""Login to the FTP server (if it was not already done)."""
|
|
631
|
+
if self._loginlike_extended_ftp.closed:
|
|
632
|
+
if self._logname is None or self.cached_pwd is None:
|
|
633
|
+
logger.warning(
|
|
634
|
+
"FTP logname/password must be set first. Use the fastlogin method."
|
|
635
|
+
)
|
|
636
|
+
raise RuntimeError("logname/password were not provided")
|
|
637
|
+
return self.login(self._logname, self.cached_pwd)
|
|
638
|
+
else:
|
|
639
|
+
return True
|
|
640
|
+
|
|
641
|
+
def _process_logname_password(self, logname, password=None):
|
|
642
|
+
"""Find the actual *logname* and *password*."""
|
|
643
|
+
if logname and password:
|
|
644
|
+
bare_logname = logname
|
|
645
|
+
else:
|
|
646
|
+
bare_logname, password = netrc_lookup(
|
|
647
|
+
logname, self.host, nrcfile=self._nrcfile
|
|
648
|
+
)
|
|
649
|
+
logname = bare_logname
|
|
650
|
+
if logname and self._proxy_host:
|
|
651
|
+
if self._proxy_type == self._PROXY_TYPES[0]:
|
|
652
|
+
logname = "{0:s}@{1.host:s}:{1.port:d}".format(
|
|
653
|
+
bare_logname, self
|
|
654
|
+
)
|
|
655
|
+
if logname:
|
|
656
|
+
return logname, password, bare_logname
|
|
657
|
+
else:
|
|
658
|
+
return None, None, None
|
|
659
|
+
|
|
660
|
+
def close(self):
|
|
661
|
+
"""Terminates the FTP session."""
|
|
662
|
+
rc = True
|
|
663
|
+
if self._internal_ftp is not None:
|
|
664
|
+
rc = self._internal_ftp.close()
|
|
665
|
+
return rc
|
|
666
|
+
|
|
667
|
+
def fastlogin(self, logname, password=None, delayed=True):
|
|
668
|
+
"""
|
|
669
|
+
Simple heuristic using actual attributes and/or netrc information to find
|
|
670
|
+
login informations.
|
|
671
|
+
|
|
672
|
+
If *delayed=True*, the actual login will be performed later (whenever
|
|
673
|
+
necessary).
|
|
674
|
+
"""
|
|
675
|
+
rc = False
|
|
676
|
+
p_logname, p_password, p_barelogname = self._process_logname_password(
|
|
677
|
+
logname, password
|
|
678
|
+
)
|
|
679
|
+
if p_logname and p_password:
|
|
680
|
+
self._logname = p_logname
|
|
681
|
+
self._cached_pwd = p_password
|
|
682
|
+
self._barelogname = p_barelogname
|
|
683
|
+
rc = True
|
|
684
|
+
if not delayed and rc:
|
|
685
|
+
# If one really wants to login...
|
|
686
|
+
rc = self.login(self._logname, self._cached_pwd)
|
|
687
|
+
return bool(rc)
|
|
688
|
+
|
|
689
|
+
def _extended_ftp_lookup_check(self, key):
|
|
690
|
+
"""Are we allowed to look for *key* in the `self._extended_ftp` object ?"""
|
|
691
|
+
return not key.startswith("_")
|
|
692
|
+
|
|
693
|
+
def _extended_ftp_lookup(self, key):
|
|
694
|
+
"""Look if the `self._extended_ftp` object can provide a given method.
|
|
695
|
+
|
|
696
|
+
If so, a possibly wrapped method is returned (in order to perform the
|
|
697
|
+
delayed login).
|
|
698
|
+
"""
|
|
699
|
+
actualattr = getattr(self._extended_ftp, key)
|
|
700
|
+
if callable(actualattr):
|
|
701
|
+
|
|
702
|
+
def osproxy(*args, **kw):
|
|
703
|
+
# For most of the native commands, we want autologin to be performed
|
|
704
|
+
if key not in self._NO_AUTOLOGIN:
|
|
705
|
+
self.delayedlogin()
|
|
706
|
+
# This is important because wrapper functions are cached (see __getattr__)
|
|
707
|
+
actualattr = getattr(self._extended_ftp, key)
|
|
708
|
+
return actualattr(*args, **kw)
|
|
709
|
+
|
|
710
|
+
osproxy.func_name = str(key)
|
|
711
|
+
osproxy.__name__ = str(key)
|
|
712
|
+
osproxy.__doc__ = actualattr.__doc__
|
|
713
|
+
return osproxy
|
|
714
|
+
else:
|
|
715
|
+
return actualattr
|
|
716
|
+
|
|
717
|
+
@secure_getattr
|
|
718
|
+
def __getattr__(self, key):
|
|
719
|
+
"""Gateway to undefined method or attributes if present in ``_extended_ftp``."""
|
|
720
|
+
if self._extended_ftp_lookup_check(key):
|
|
721
|
+
attr = self._extended_ftp_lookup(key)
|
|
722
|
+
if callable(attr):
|
|
723
|
+
setattr(self, key, attr)
|
|
724
|
+
return attr
|
|
725
|
+
raise AttributeError(key)
|
|
726
|
+
|
|
727
|
+
def __enter__(self):
|
|
728
|
+
return self
|
|
729
|
+
|
|
730
|
+
def __exit__(self, exc_type, exc_value, traceback): # @UnusedVariable
|
|
731
|
+
self.close()
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
class AutoRetriesFtp(StdFtp):
|
|
735
|
+
"""An advanced FTP client with retry-on-failure capabilities.
|
|
736
|
+
|
|
737
|
+
It inherits from :class:`StdFtp` class thus providing the same interface (no
|
|
738
|
+
new public methods are added).
|
|
739
|
+
|
|
740
|
+
However, most of the :class:`StdFtp` methods are wrapped in order to implement
|
|
741
|
+
the retry-on-failure capability.
|
|
742
|
+
"""
|
|
743
|
+
|
|
744
|
+
def __init__(
|
|
745
|
+
self,
|
|
746
|
+
system,
|
|
747
|
+
hostname,
|
|
748
|
+
port=DEFAULT_FTP_PORT,
|
|
749
|
+
nrcfile=None,
|
|
750
|
+
ignoreproxy=False,
|
|
751
|
+
retrycount_default=6,
|
|
752
|
+
retrycount_connect=8,
|
|
753
|
+
retrycount_login=3,
|
|
754
|
+
retrydelay_default=15,
|
|
755
|
+
retrydelay_connect=15,
|
|
756
|
+
retrydelay_login=10,
|
|
757
|
+
):
|
|
758
|
+
"""
|
|
759
|
+
:param ~vortex.tools.systems.OSExtended system: The system object to work with.
|
|
760
|
+
:param str hostname: The remote host's network name.
|
|
761
|
+
:param int port: The remote host's FTP port.
|
|
762
|
+
:param str nrcfile: The path to the .netrc file (if `None` the ~/.netrc default is used)
|
|
763
|
+
:param bool ignoreproxy: Forcibly ignore any proxy related environment variables
|
|
764
|
+
:param int retrycount_default: The maximum number of retries for most of the FTP functions.
|
|
765
|
+
:param int retrydelay_default: The delay (in seconds) between two retries for most of the FTP functions.
|
|
766
|
+
:param int retrycount_connect: The maximum number of retries when connecting to the FTP server.
|
|
767
|
+
:param int retrydelay_connect: The delay (in seconds) between two retries when connecting to the FTP server.
|
|
768
|
+
:param int retrycount_login: The maximum number of retries when login in to the FTP server.
|
|
769
|
+
:param int retrydelay_login: The delay (in seconds) between two retries when login in to the FTP server.
|
|
770
|
+
"""
|
|
771
|
+
logger.debug("AutoRetries FTP init <host:%s>", hostname)
|
|
772
|
+
# Retry stuff
|
|
773
|
+
self.retrycount_default = retrycount_default
|
|
774
|
+
self.retrycount_connect = retrycount_connect
|
|
775
|
+
self.retrycount_login = retrycount_login
|
|
776
|
+
self.retrydelay_default = retrydelay_default
|
|
777
|
+
self.retrydelay_connect = retrydelay_connect
|
|
778
|
+
self.retrydelay_login = retrydelay_login
|
|
779
|
+
# Reset everything
|
|
780
|
+
self._initialise()
|
|
781
|
+
# Finalise
|
|
782
|
+
super().__init__(
|
|
783
|
+
system,
|
|
784
|
+
hostname,
|
|
785
|
+
port=port,
|
|
786
|
+
nrcfile=nrcfile,
|
|
787
|
+
ignoreproxy=ignoreproxy,
|
|
788
|
+
)
|
|
789
|
+
|
|
790
|
+
def _initialise(self):
|
|
791
|
+
self._internal_retries_max = None
|
|
792
|
+
self._cwd = ""
|
|
793
|
+
self._autodestroy()
|
|
794
|
+
|
|
795
|
+
def _autodestroy(self):
|
|
796
|
+
"""Reset the proxied :class:`ExtendedFtpLib` object."""
|
|
797
|
+
self._internal_ftp = None
|
|
798
|
+
|
|
799
|
+
def _get_extended_ftp(self, retrycount, retrydelay, exceptions_extras):
|
|
800
|
+
"""Delay the call to 'connect' as much as possible."""
|
|
801
|
+
if self._internal_ftp is None:
|
|
802
|
+
eftplib = self._retry_wrapped_callable(
|
|
803
|
+
ExtendedFtplib,
|
|
804
|
+
retrycount=retrycount,
|
|
805
|
+
retrydelay=retrydelay,
|
|
806
|
+
exceptions_extras=exceptions_extras,
|
|
807
|
+
)
|
|
808
|
+
self._internal_ftp = eftplib(
|
|
809
|
+
self._system, ftplib.FTP(), *self._extended_ftp_host_and_port()
|
|
810
|
+
)
|
|
811
|
+
return self._internal_ftp
|
|
812
|
+
|
|
813
|
+
@property
|
|
814
|
+
def _extended_ftp(self):
|
|
815
|
+
"""Delay the call to 'connect' as much as possible."""
|
|
816
|
+
return self._get_extended_ftp(
|
|
817
|
+
self.retrycount_connect,
|
|
818
|
+
self.retrydelay_connect,
|
|
819
|
+
[
|
|
820
|
+
socket.timeout,
|
|
821
|
+
],
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
@property
|
|
825
|
+
def _loginlike_extended_ftp(self):
|
|
826
|
+
"""Delay the call to 'connect' as much as possible."""
|
|
827
|
+
return self._get_extended_ftp(
|
|
828
|
+
self.retrycount_login,
|
|
829
|
+
self.retrydelay_login,
|
|
830
|
+
[
|
|
831
|
+
ftplib.error_perm,
|
|
832
|
+
socket.error,
|
|
833
|
+
],
|
|
834
|
+
)
|
|
835
|
+
|
|
836
|
+
def _actual_login(self, *args):
|
|
837
|
+
"""Actually log in + save logname/password + correct the cwd if needed."""
|
|
838
|
+
rc = self._extended_ftp.login(*args)
|
|
839
|
+
if rc:
|
|
840
|
+
if self._logname is None or self._logname != args[0]:
|
|
841
|
+
self._logname = args[0]
|
|
842
|
+
self._barelogname = args[0]
|
|
843
|
+
self._cached_pwd = args[1]
|
|
844
|
+
if rc and self._cwd:
|
|
845
|
+
cocoondir = self._cwd
|
|
846
|
+
self._cwd = ""
|
|
847
|
+
rc = rc and self.cwd(cocoondir)
|
|
848
|
+
return rc
|
|
849
|
+
|
|
850
|
+
def login(self, *args):
|
|
851
|
+
"""Proxy to ftplib :meth:`ftplib.FTP.login`."""
|
|
852
|
+
wftplogin = self._retry_wrapped_callable(
|
|
853
|
+
self._actual_login,
|
|
854
|
+
retrycount=self.retrycount_login,
|
|
855
|
+
retrydelay=self.retrydelay_login,
|
|
856
|
+
exceptions_extras=[ftplib.error_perm, socket.error, EOFError],
|
|
857
|
+
)
|
|
858
|
+
return wftplogin(*args)
|
|
859
|
+
|
|
860
|
+
def _retry_wrapped_callable(
|
|
861
|
+
self, func, retrycount=None, retrydelay=None, exceptions_extras=None
|
|
862
|
+
):
|
|
863
|
+
"""
|
|
864
|
+
Wraps the *func* function in order to implement a retry on failure
|
|
865
|
+
mechanism.
|
|
866
|
+
|
|
867
|
+
:param callable func: Any callable that should be wrapped (usually a function)
|
|
868
|
+
:param int retrycount: The wanted retry count (`self.retrycount_default` if omitted)
|
|
869
|
+
:param int retrydelay: The delay between retries (`self.retrydelay_default` if omitted)
|
|
870
|
+
:param list exceptions_extras: Extra exceptions to be catch during the retry
|
|
871
|
+
phase (in addtion of `ftplib.error_temp`, `ftplib.error_proto`,
|
|
872
|
+
`ftplib.error_reply`).
|
|
873
|
+
|
|
874
|
+
Upon failure, :meth:`_autodestroy` is called in order to reset this object
|
|
875
|
+
and start with a clean slate.
|
|
876
|
+
"""
|
|
877
|
+
actual_rcount = retrycount or self.retrycount_default
|
|
878
|
+
actual_rdelay = retrydelay or self.retrydelay_default
|
|
879
|
+
actual_exc = [
|
|
880
|
+
ftplib.error_temp,
|
|
881
|
+
ftplib.error_proto,
|
|
882
|
+
ftplib.error_reply,
|
|
883
|
+
]
|
|
884
|
+
if exceptions_extras:
|
|
885
|
+
actual_exc.extend(exceptions_extras)
|
|
886
|
+
actual_exc = tuple(actual_exc)
|
|
887
|
+
|
|
888
|
+
def retries_wrapper(*args, **kw):
|
|
889
|
+
globalcounter_driver = self._internal_retries_max is None
|
|
890
|
+
if globalcounter_driver:
|
|
891
|
+
self._internal_retries_max = actual_rcount
|
|
892
|
+
retriesleft = max(
|
|
893
|
+
min(self._internal_retries_max, actual_rcount), 1
|
|
894
|
+
)
|
|
895
|
+
try:
|
|
896
|
+
while retriesleft:
|
|
897
|
+
try:
|
|
898
|
+
return func(*args, **kw)
|
|
899
|
+
except actual_exc as e:
|
|
900
|
+
logger.warning(
|
|
901
|
+
'An error occurred (in "%s"): %s', func.__name__, e
|
|
902
|
+
)
|
|
903
|
+
retriesleft -= 1
|
|
904
|
+
self._internal_retries_max -= 1
|
|
905
|
+
if not retriesleft:
|
|
906
|
+
logger.warning(
|
|
907
|
+
"The maximum number of retries (%d) was reached.",
|
|
908
|
+
actual_rcount,
|
|
909
|
+
)
|
|
910
|
+
raise
|
|
911
|
+
logger.warning(
|
|
912
|
+
"Sleeping %d sec. before the next attempt.",
|
|
913
|
+
actual_rdelay,
|
|
914
|
+
)
|
|
915
|
+
self._autodestroy()
|
|
916
|
+
self.system.sleep(actual_rdelay)
|
|
917
|
+
finally:
|
|
918
|
+
if globalcounter_driver:
|
|
919
|
+
self._internal_retries_max = None
|
|
920
|
+
|
|
921
|
+
retries_wrapper.func_name = func.__name__
|
|
922
|
+
retries_wrapper.__name__ = func.__name__
|
|
923
|
+
retries_wrapper.__doc__ = func.__doc__
|
|
924
|
+
return retries_wrapper
|
|
925
|
+
|
|
926
|
+
@secure_getattr
|
|
927
|
+
def __getattr__(self, key):
|
|
928
|
+
"""Gateway to undefined method or attributes if present in ``_extended_ftp``."""
|
|
929
|
+
if self._extended_ftp_lookup_check(key):
|
|
930
|
+
attr = self._extended_ftp_lookup(key)
|
|
931
|
+
if callable(attr):
|
|
932
|
+
if key not in self._NO_AUTOLOGIN:
|
|
933
|
+
attr = self._retry_wrapped_callable(
|
|
934
|
+
attr,
|
|
935
|
+
exceptions_extras=[
|
|
936
|
+
socket.error,
|
|
937
|
+
],
|
|
938
|
+
)
|
|
939
|
+
setattr(self, key, attr)
|
|
940
|
+
return attr
|
|
941
|
+
raise AttributeError(key)
|
|
942
|
+
|
|
943
|
+
def cwd(self, pathname):
|
|
944
|
+
"""Change the current directory to the *pathname* directory."""
|
|
945
|
+
todo = self._retry_wrapped_callable(self._extended_ftp_lookup("cwd"))
|
|
946
|
+
rc = todo(pathname)
|
|
947
|
+
if rc:
|
|
948
|
+
if self.system.path.isabs(pathname):
|
|
949
|
+
self._cwd = pathname
|
|
950
|
+
else:
|
|
951
|
+
self._cwd = self.system.path.join(self._cwd, pathname)
|
|
952
|
+
self._cwd = self.system.path.normpath(self._cwd)
|
|
953
|
+
return rc
|
|
954
|
+
|
|
955
|
+
def cd(self, destination):
|
|
956
|
+
"""Change the current directory to the *pathname* directory."""
|
|
957
|
+
return self.cwd(destination)
|
|
958
|
+
|
|
959
|
+
def quit(self):
|
|
960
|
+
"""Quit the current ftp session politely."""
|
|
961
|
+
try:
|
|
962
|
+
rc = self._retry_wrapped_callable(
|
|
963
|
+
self._extended_ftp_lookup("quit")
|
|
964
|
+
)()
|
|
965
|
+
finally:
|
|
966
|
+
self._initialise()
|
|
967
|
+
return rc
|
|
968
|
+
|
|
969
|
+
def close(self):
|
|
970
|
+
"""Quit the current ftp session abruptly."""
|
|
971
|
+
rc = super().close()
|
|
972
|
+
self._initialise()
|
|
973
|
+
return rc
|
|
974
|
+
|
|
975
|
+
|
|
976
|
+
class ResetableAutoRetriesFtp(AutoRetriesFtp):
|
|
977
|
+
"""
|
|
978
|
+
An advanced FTP client with retry-on-failure capabilities and an additional
|
|
979
|
+
method :meth:`reset` to reset the current working directory to its initial
|
|
980
|
+
value (i.e. The working directory just after login).
|
|
981
|
+
"""
|
|
982
|
+
|
|
983
|
+
def _initialise(self):
|
|
984
|
+
super()._initialise()
|
|
985
|
+
self._initialpath = None
|
|
986
|
+
|
|
987
|
+
def _actual_login(self, *args):
|
|
988
|
+
if self._initialpath is not None and self._cwd:
|
|
989
|
+
rc = super()._actual_login(*args)
|
|
990
|
+
else:
|
|
991
|
+
rc = super()._actual_login(*args)
|
|
992
|
+
if rc:
|
|
993
|
+
self._initialpath = self.pwd()
|
|
994
|
+
return rc
|
|
995
|
+
|
|
996
|
+
def reset(self):
|
|
997
|
+
"""Reset the current working directory to its initial value."""
|
|
998
|
+
if self._initialpath is not None and self._cwd:
|
|
999
|
+
self._cwd = ""
|
|
1000
|
+
return self.cwd(self._initialpath)
|
|
1001
|
+
|
|
1002
|
+
|
|
1003
|
+
class PooledResetableAutoRetriesFtp(ResetableAutoRetriesFtp):
|
|
1004
|
+
"""
|
|
1005
|
+
An advanced FTP client derived from :class:`ResetableAutoRetriesFtp` that can
|
|
1006
|
+
be used in conjunction with an :class:`FtpConnectionPool` object.
|
|
1007
|
+
"""
|
|
1008
|
+
|
|
1009
|
+
def __init__(self, pool, *kargs, **kwargs):
|
|
1010
|
+
"""
|
|
1011
|
+
:param FtpConnectionPool pool: The FTP connection pool to work with.
|
|
1012
|
+
|
|
1013
|
+
*kargs* and *kwargs* are passed directly to the :class:`ResetableAutoRetriesFtp`
|
|
1014
|
+
class constructor (refers to its documentation).
|
|
1015
|
+
"""
|
|
1016
|
+
self._pool = pool
|
|
1017
|
+
super().__init__(*kargs, **kwargs)
|
|
1018
|
+
logger.debug(
|
|
1019
|
+
"Pooled FTP init <host:%s> <pool:%s>", self.host, repr(pool)
|
|
1020
|
+
)
|
|
1021
|
+
|
|
1022
|
+
def forceclose(self):
|
|
1023
|
+
"""Really quit the ftp session."""
|
|
1024
|
+
if self._internal_ftp is not None:
|
|
1025
|
+
return super().close()
|
|
1026
|
+
else:
|
|
1027
|
+
return True
|
|
1028
|
+
|
|
1029
|
+
def close(self):
|
|
1030
|
+
"""
|
|
1031
|
+
The ftp session is not really closed... instead, the current object is
|
|
1032
|
+
given back to the FTP connection pool that will be able to reuse it.
|
|
1033
|
+
"""
|
|
1034
|
+
# If no underlying library is available, do not bother...
|
|
1035
|
+
if self._internal_ftp is not None:
|
|
1036
|
+
self._pool.relinquishing(self)
|
|
1037
|
+
return True
|
|
1038
|
+
|
|
1039
|
+
|
|
1040
|
+
class FtpConnectionPool:
|
|
1041
|
+
"""A class that dispense FTP client objects for a given *hostname*/*logname* pair.
|
|
1042
|
+
|
|
1043
|
+
Dispensed objects can either be new object or re-used pre-existing ones: this
|
|
1044
|
+
makes no differences for the caller since re-used object are properly "reseted"
|
|
1045
|
+
before being dispensed.
|
|
1046
|
+
|
|
1047
|
+
The great advantage of this class is to keep FTP connections open for a given
|
|
1048
|
+
number of clients which avoids multiple connect/login sequences (that are
|
|
1049
|
+
time consuming). On the other hand, the user must be cautious when using this
|
|
1050
|
+
class since having numerous long standing opened connections can harm the
|
|
1051
|
+
remote FTP hosts.
|
|
1052
|
+
"""
|
|
1053
|
+
|
|
1054
|
+
#: The FTP client class that will be used
|
|
1055
|
+
_FTPCLIENT_CLASS = PooledResetableAutoRetriesFtp
|
|
1056
|
+
#: The maximum number of spare FTP client (when this threshold is hit,
|
|
1057
|
+
#: warning are issued)
|
|
1058
|
+
_REUSABLE_THRESHOLD = 10
|
|
1059
|
+
|
|
1060
|
+
def __init__(self, system, nrcfile=None, ignoreproxy=False):
|
|
1061
|
+
"""
|
|
1062
|
+
:param ~vortex.tools.systems.OSExtended system: The system object to work with.
|
|
1063
|
+
:param str nrcfile: The path to the .netrc file (if `None` the ~/.netrc default is used)
|
|
1064
|
+
:param bool ignoreproxy: Forcibly ignore any proxy related environment variables
|
|
1065
|
+
"""
|
|
1066
|
+
self._system = system
|
|
1067
|
+
self._nrcfile = nrcfile
|
|
1068
|
+
self._ignoreproxy = ignoreproxy
|
|
1069
|
+
self._reusable = collections.defaultdict(collections.deque)
|
|
1070
|
+
self._created = 0
|
|
1071
|
+
self._reused = 0
|
|
1072
|
+
self._givenback = 0
|
|
1073
|
+
|
|
1074
|
+
@property
|
|
1075
|
+
def poolsize(self):
|
|
1076
|
+
"""The number of spare FTP clients."""
|
|
1077
|
+
return sum([len(hpool) for hpool in self._reusable.values()])
|
|
1078
|
+
|
|
1079
|
+
def __str__(self):
|
|
1080
|
+
"""Print a summary of the connection pool activity."""
|
|
1081
|
+
out = "Current connection pool size: {:d}\n".format(self.poolsize)
|
|
1082
|
+
out += " # of created objects: {:d}\n".format(self._created)
|
|
1083
|
+
out += " # of re-used objects: {:d}\n".format(self._reused)
|
|
1084
|
+
out += " # of given back objects: {:d}\n".format(self._givenback)
|
|
1085
|
+
if self.poolsize:
|
|
1086
|
+
out += "\nDetailed list of current spare clients:\n"
|
|
1087
|
+
for ident, hpool in self._reusable.items():
|
|
1088
|
+
for client in hpool:
|
|
1089
|
+
out += " - {id[1]:s}@{id[0]:s}: {cl!r}\n".format(
|
|
1090
|
+
id=ident, cl=client
|
|
1091
|
+
)
|
|
1092
|
+
return out
|
|
1093
|
+
|
|
1094
|
+
def deal(
|
|
1095
|
+
self,
|
|
1096
|
+
hostname,
|
|
1097
|
+
logname,
|
|
1098
|
+
port=DEFAULT_FTP_PORT,
|
|
1099
|
+
delayed=True,
|
|
1100
|
+
ignoreproxy=False,
|
|
1101
|
+
):
|
|
1102
|
+
"""Retrieve an FTP client for the *hostname*/*logname* pair."""
|
|
1103
|
+
p_logname, _ = netrc_lookup(logname, hostname, nrcfile=self._nrcfile)
|
|
1104
|
+
if self._reusable[(hostname, port, p_logname)]:
|
|
1105
|
+
ftpc = self._reusable[(hostname, port, p_logname)].pop()
|
|
1106
|
+
ftpc.reset()
|
|
1107
|
+
logger.debug("Re-using a client: %s", repr(ftpc))
|
|
1108
|
+
if not delayed:
|
|
1109
|
+
# If requested, ensure that we are logged in
|
|
1110
|
+
ftpc.delayedlogin()
|
|
1111
|
+
self._reused += 1
|
|
1112
|
+
return ftpc
|
|
1113
|
+
else:
|
|
1114
|
+
ftpc = self._FTPCLIENT_CLASS(
|
|
1115
|
+
self,
|
|
1116
|
+
self._system,
|
|
1117
|
+
hostname,
|
|
1118
|
+
port=port,
|
|
1119
|
+
nrcfile=self._nrcfile,
|
|
1120
|
+
ignoreproxy=self._ignoreproxy,
|
|
1121
|
+
)
|
|
1122
|
+
rc = ftpc.fastlogin(p_logname, delayed=delayed)
|
|
1123
|
+
if rc:
|
|
1124
|
+
logger.debug("Creating a new client: %s", repr(ftpc))
|
|
1125
|
+
self._created += 1
|
|
1126
|
+
return ftpc
|
|
1127
|
+
else:
|
|
1128
|
+
logger.warning(
|
|
1129
|
+
"Could not login on %s:%d as %s [%s]",
|
|
1130
|
+
hostname,
|
|
1131
|
+
port,
|
|
1132
|
+
p_logname,
|
|
1133
|
+
str(rc),
|
|
1134
|
+
)
|
|
1135
|
+
return None
|
|
1136
|
+
|
|
1137
|
+
def relinquishing(self, client):
|
|
1138
|
+
"""
|
|
1139
|
+
When the user is done with a reusable *client*, this method should be
|
|
1140
|
+
called in order for the FTP connection pool to reuse it.
|
|
1141
|
+
|
|
1142
|
+
It is usually dealt with properly by the FTP client object itself when
|
|
1143
|
+
its `close` method is called.
|
|
1144
|
+
"""
|
|
1145
|
+
assert isinstance(client, self._FTPCLIENT_CLASS)
|
|
1146
|
+
self._reusable[(client.host, client.port, client.logname)].append(
|
|
1147
|
+
client
|
|
1148
|
+
)
|
|
1149
|
+
self._givenback += 1
|
|
1150
|
+
logger.debug(
|
|
1151
|
+
"Spare client for %s@%s:%d has been stored (poolsize=%d).",
|
|
1152
|
+
client.logname,
|
|
1153
|
+
client.host,
|
|
1154
|
+
client.port,
|
|
1155
|
+
self.poolsize,
|
|
1156
|
+
)
|
|
1157
|
+
if self.poolsize >= self._REUSABLE_THRESHOLD:
|
|
1158
|
+
logger.warning(
|
|
1159
|
+
"The FTP pool is too big ! (%d >= %d). Here are the details:\n%s",
|
|
1160
|
+
self.poolsize,
|
|
1161
|
+
self._REUSABLE_THRESHOLD,
|
|
1162
|
+
str(self),
|
|
1163
|
+
)
|
|
1164
|
+
|
|
1165
|
+
def clear(self):
|
|
1166
|
+
"""Destroy all the spare FTP clients."""
|
|
1167
|
+
for hpool in self._reusable.values():
|
|
1168
|
+
for client in hpool:
|
|
1169
|
+
logger.debug(
|
|
1170
|
+
"Destroying client for %s@%s", client.logname, client.host
|
|
1171
|
+
)
|
|
1172
|
+
client.forceclose()
|
|
1173
|
+
hpool.clear()
|
|
1174
|
+
|
|
1175
|
+
|
|
1176
|
+
class Ssh:
|
|
1177
|
+
"""Remote command execution via ssh.
|
|
1178
|
+
|
|
1179
|
+
Also handles remote copy via scp or ssh, which is intimately linked
|
|
1180
|
+
"""
|
|
1181
|
+
|
|
1182
|
+
def __init__(self, sh, hostname, logname=None, sshopts=None, scpopts=None):
|
|
1183
|
+
"""
|
|
1184
|
+
:param System sh: The :class:`System` object that is to be used.
|
|
1185
|
+
:param str hostname: The target hostname(s).
|
|
1186
|
+
:param logname: The logname for the Ssh commands.
|
|
1187
|
+
:param str sshopts: Extra SSH options (in addition to the configuration file ones).
|
|
1188
|
+
:param str scpopts: Extra SCP options (in addition to the configuration file ones).
|
|
1189
|
+
"""
|
|
1190
|
+
self._sh = sh
|
|
1191
|
+
|
|
1192
|
+
self._logname = logname
|
|
1193
|
+
self._remote = hostname
|
|
1194
|
+
|
|
1195
|
+
target = sh.default_target
|
|
1196
|
+
self._sshcmd = target.get(key="services:sshcmd", default="ssh")
|
|
1197
|
+
self._scpcmd = target.get(key="services:scpcmd", default="scp")
|
|
1198
|
+
self._sshopts = (
|
|
1199
|
+
target.get(key="services:sshopts", default="-x").split()
|
|
1200
|
+
+ (sshopts or "").split()
|
|
1201
|
+
)
|
|
1202
|
+
self._scpopts = (
|
|
1203
|
+
target.get(key="services:scpopts", default="-Bp").split()
|
|
1204
|
+
+ (scpopts or "").split()
|
|
1205
|
+
)
|
|
1206
|
+
|
|
1207
|
+
@property
|
|
1208
|
+
def sh(self):
|
|
1209
|
+
return self._sh
|
|
1210
|
+
|
|
1211
|
+
@property
|
|
1212
|
+
def remote(self):
|
|
1213
|
+
return (
|
|
1214
|
+
"" if self._logname is None else self._logname + "@"
|
|
1215
|
+
) + self._remote
|
|
1216
|
+
|
|
1217
|
+
def check_ok(self):
|
|
1218
|
+
"""Is the connexion ok ?"""
|
|
1219
|
+
return self.execute("true") is not False
|
|
1220
|
+
|
|
1221
|
+
def execute(self, remote_command, sshopts=""):
|
|
1222
|
+
"""Execute the command remotely.
|
|
1223
|
+
|
|
1224
|
+
Return the output of the command (list of lines), or False on error.
|
|
1225
|
+
|
|
1226
|
+
Only the output sent to the log (when silent=False) shows the difference
|
|
1227
|
+
between:
|
|
1228
|
+
|
|
1229
|
+
- a bad connection (e.g. wrong user)
|
|
1230
|
+
- a remote command retcode != 0 (e.g. cmd='/bin/false')
|
|
1231
|
+
|
|
1232
|
+
"""
|
|
1233
|
+
myremote = self.remote
|
|
1234
|
+
if myremote is None:
|
|
1235
|
+
return False
|
|
1236
|
+
cmd = (
|
|
1237
|
+
[
|
|
1238
|
+
self._sshcmd,
|
|
1239
|
+
]
|
|
1240
|
+
+ self._sshopts
|
|
1241
|
+
+ sshopts.split()
|
|
1242
|
+
+ [
|
|
1243
|
+
myremote,
|
|
1244
|
+
]
|
|
1245
|
+
+ [
|
|
1246
|
+
remote_command,
|
|
1247
|
+
]
|
|
1248
|
+
)
|
|
1249
|
+
return self.sh.spawn(cmd, output=True, fatal=False)
|
|
1250
|
+
|
|
1251
|
+
def background_execute(
|
|
1252
|
+
self, remote_command, sshopts="", stdout=None, stderr=None
|
|
1253
|
+
):
|
|
1254
|
+
"""Execute the command remotely and return the object representing the ssh process.
|
|
1255
|
+
|
|
1256
|
+
Return a Popen object representing the ssh process. The user is reponsible
|
|
1257
|
+
for calling pclose on this object and check the return code.
|
|
1258
|
+
"""
|
|
1259
|
+
myremote = self.remote
|
|
1260
|
+
if myremote is None:
|
|
1261
|
+
return False
|
|
1262
|
+
cmd = (
|
|
1263
|
+
[
|
|
1264
|
+
self._sshcmd,
|
|
1265
|
+
]
|
|
1266
|
+
+ self._sshopts
|
|
1267
|
+
+ sshopts.split()
|
|
1268
|
+
+ [
|
|
1269
|
+
myremote,
|
|
1270
|
+
]
|
|
1271
|
+
+ [
|
|
1272
|
+
remote_command,
|
|
1273
|
+
]
|
|
1274
|
+
)
|
|
1275
|
+
return self.sh.popen(cmd, stdout=stdout, stderr=stderr)
|
|
1276
|
+
|
|
1277
|
+
def cocoon(self, destination):
|
|
1278
|
+
"""Create the remote directory to contain ``destination``.
|
|
1279
|
+
|
|
1280
|
+
Return ``False`` on failure.
|
|
1281
|
+
"""
|
|
1282
|
+
remote_dir = self.sh.path.dirname(destination)
|
|
1283
|
+
if remote_dir == "":
|
|
1284
|
+
return True
|
|
1285
|
+
logger.debug('Cocooning remote directory "%s"', remote_dir)
|
|
1286
|
+
cmd = 'mkdir -p "{}"'.format(remote_dir)
|
|
1287
|
+
rc = self.execute(cmd)
|
|
1288
|
+
if not rc:
|
|
1289
|
+
logger.error(
|
|
1290
|
+
"Cannot cocoon on %s (user: %s) for %s",
|
|
1291
|
+
str(self._remote),
|
|
1292
|
+
str(self._logname),
|
|
1293
|
+
destination,
|
|
1294
|
+
)
|
|
1295
|
+
return rc
|
|
1296
|
+
|
|
1297
|
+
def remove(self, target):
|
|
1298
|
+
"""Remove the remote target, if present. Return False on failure.
|
|
1299
|
+
|
|
1300
|
+
Does not fail when the target is missing, but does when it exists
|
|
1301
|
+
and cannot be removed, which would make a final move also fail.
|
|
1302
|
+
"""
|
|
1303
|
+
logger.debug('Removing remote target "%s"', target)
|
|
1304
|
+
cmd = 'rm -fr "{}"'.format(target)
|
|
1305
|
+
rc = self.execute(cmd)
|
|
1306
|
+
if not rc:
|
|
1307
|
+
logger.error(
|
|
1308
|
+
'Cannot remove from %s (user: %s) item "%s"',
|
|
1309
|
+
str(self._remote),
|
|
1310
|
+
str(self._logname),
|
|
1311
|
+
target,
|
|
1312
|
+
)
|
|
1313
|
+
return rc
|
|
1314
|
+
|
|
1315
|
+
def _scp_putget_commons(self, source, destination):
|
|
1316
|
+
"""Common checks on source and destination."""
|
|
1317
|
+
if not isinstance(source, str):
|
|
1318
|
+
msg = "Source is not a plain file path: {!r}".format(source)
|
|
1319
|
+
raise TypeError(msg)
|
|
1320
|
+
if not isinstance(destination, str):
|
|
1321
|
+
msg = "Destination is not a plain file path: {!r}".format(
|
|
1322
|
+
destination
|
|
1323
|
+
)
|
|
1324
|
+
raise TypeError(msg)
|
|
1325
|
+
|
|
1326
|
+
# avoid special cases
|
|
1327
|
+
if destination == "" or destination == ".":
|
|
1328
|
+
destination = "./"
|
|
1329
|
+
else:
|
|
1330
|
+
if destination.endswith(".."):
|
|
1331
|
+
destination += "/"
|
|
1332
|
+
if "../" in destination:
|
|
1333
|
+
raise ValueError(
|
|
1334
|
+
'"../" is not allowed in the destination path'
|
|
1335
|
+
)
|
|
1336
|
+
if destination.endswith("/"):
|
|
1337
|
+
destination = self.sh.path.join(
|
|
1338
|
+
destination, self.sh.path.basename(source)
|
|
1339
|
+
)
|
|
1340
|
+
|
|
1341
|
+
return source, destination
|
|
1342
|
+
|
|
1343
|
+
def scpput(self, source, destination, scpopts=""):
|
|
1344
|
+
r"""Send ``source`` to ``destination``.
|
|
1345
|
+
|
|
1346
|
+
- ``source`` is a single file or a directory, not a pattern (no '\*.grib').
|
|
1347
|
+
- ``destination`` is the remote name, unless it ends with '/', in
|
|
1348
|
+
which case it is the containing directory, and the remote name is
|
|
1349
|
+
the basename of ``source`` (like a real cp or scp):
|
|
1350
|
+
|
|
1351
|
+
- ``scp a/b.gif c/d.gif --> c/d.gif``
|
|
1352
|
+
- ``scp a/b.gif c/d/ --> c/d/b.gif``
|
|
1353
|
+
|
|
1354
|
+
Return True for ok, False on error.
|
|
1355
|
+
"""
|
|
1356
|
+
source, destination = self._scp_putget_commons(source, destination)
|
|
1357
|
+
|
|
1358
|
+
if not self.sh.path.exists(source):
|
|
1359
|
+
logger.error("No such file or directory: %s", source)
|
|
1360
|
+
return False
|
|
1361
|
+
|
|
1362
|
+
source = self.sh.path.realpath(source)
|
|
1363
|
+
|
|
1364
|
+
myremote = self.remote
|
|
1365
|
+
if myremote is None:
|
|
1366
|
+
return False
|
|
1367
|
+
|
|
1368
|
+
if not self.cocoon(destination):
|
|
1369
|
+
return False
|
|
1370
|
+
|
|
1371
|
+
if not self.remove(destination):
|
|
1372
|
+
return False
|
|
1373
|
+
|
|
1374
|
+
if self.sh.path.isdir(source):
|
|
1375
|
+
scpopts += " -r"
|
|
1376
|
+
|
|
1377
|
+
if not self.remove(destination + ".tmp"):
|
|
1378
|
+
return False
|
|
1379
|
+
|
|
1380
|
+
# transfer to a temporary place.
|
|
1381
|
+
# when ``destination`` contains spaces, 1 round of quoting
|
|
1382
|
+
# is necessary, to avoid an 'scp: ambiguous target' error.
|
|
1383
|
+
cmd = (
|
|
1384
|
+
[
|
|
1385
|
+
self._scpcmd,
|
|
1386
|
+
]
|
|
1387
|
+
+ self._scpopts
|
|
1388
|
+
+ scpopts.split()
|
|
1389
|
+
+ [source, myremote + ":" + shlex.quote(destination + ".tmp")]
|
|
1390
|
+
)
|
|
1391
|
+
rc = self.sh.spawn(cmd, output=False, fatal=False)
|
|
1392
|
+
if rc:
|
|
1393
|
+
# success, rename the tmp
|
|
1394
|
+
rc = self.execute('mv "{0}.tmp" "{0}"'.format(destination))
|
|
1395
|
+
return rc
|
|
1396
|
+
|
|
1397
|
+
def scpget(self, source, destination, scpopts="", isadir=False):
|
|
1398
|
+
r"""Send ``source`` to ``destination``.
|
|
1399
|
+
|
|
1400
|
+
- ``source`` is the remote name, not a pattern (no '\*.grib').
|
|
1401
|
+
- ``destination`` is a single file or a directory, unless it ends with
|
|
1402
|
+
'/', in which case it is the containing directory, and the remote name
|
|
1403
|
+
is the basename of ``source`` (like a real cp or scp):
|
|
1404
|
+
|
|
1405
|
+
- ``scp a/b.gif c/d.gif --> c/d.gif``
|
|
1406
|
+
- ``scp a/b.gif c/d/ --> c/d/b.gif``
|
|
1407
|
+
|
|
1408
|
+
Return True for ok, False on error.
|
|
1409
|
+
"""
|
|
1410
|
+
source, destination = self._scp_putget_commons(source, destination)
|
|
1411
|
+
|
|
1412
|
+
myremote = self.remote
|
|
1413
|
+
if myremote is None:
|
|
1414
|
+
return False
|
|
1415
|
+
|
|
1416
|
+
if not self.sh.filecocoon(destination):
|
|
1417
|
+
return False
|
|
1418
|
+
|
|
1419
|
+
if isadir:
|
|
1420
|
+
if not self.sh.remove(destination):
|
|
1421
|
+
return False
|
|
1422
|
+
scpopts += " -r"
|
|
1423
|
+
|
|
1424
|
+
# transfer to a temporary place.
|
|
1425
|
+
# when ``source`` contains spaces, 1 round of quoting
|
|
1426
|
+
# is necessary, to avoid an 'scp: ambiguous target' error.
|
|
1427
|
+
cmd = (
|
|
1428
|
+
[
|
|
1429
|
+
self._scpcmd,
|
|
1430
|
+
]
|
|
1431
|
+
+ self._scpopts
|
|
1432
|
+
+ scpopts.split()
|
|
1433
|
+
+ [myremote + ":" + shlex.quote(source), destination + ".tmp"]
|
|
1434
|
+
)
|
|
1435
|
+
rc = self.sh.spawn(cmd, output=False, fatal=False)
|
|
1436
|
+
if rc:
|
|
1437
|
+
# success, rename the tmp
|
|
1438
|
+
rc = self.sh.move(destination + ".tmp", destination)
|
|
1439
|
+
return rc
|
|
1440
|
+
|
|
1441
|
+
def get_permissions(self, source):
|
|
1442
|
+
"""
|
|
1443
|
+
Convenience method to retrieve the permissions of a file/dir (in a form
|
|
1444
|
+
suitable for chmod).
|
|
1445
|
+
"""
|
|
1446
|
+
mode = self.sh.stat(source).st_mode
|
|
1447
|
+
return stat.S_IMODE(mode)
|
|
1448
|
+
|
|
1449
|
+
def scpput_stream(self, stream, destination, permissions=None, sshopts=""):
|
|
1450
|
+
"""Send the ``stream`` to the ``destination``.
|
|
1451
|
+
|
|
1452
|
+
- ``stream`` is a ``file`` (typically returned by open(),
|
|
1453
|
+
or the piped output of a spawned process).
|
|
1454
|
+
- ``destination`` is the remote file name.
|
|
1455
|
+
|
|
1456
|
+
Return True for ok, False on error.
|
|
1457
|
+
"""
|
|
1458
|
+
if not isinstance(stream, io.IOBase):
|
|
1459
|
+
msg = "stream is a {}, should be a <type 'file'>".format(
|
|
1460
|
+
type(stream)
|
|
1461
|
+
)
|
|
1462
|
+
raise TypeError(msg)
|
|
1463
|
+
|
|
1464
|
+
if not isinstance(destination, str):
|
|
1465
|
+
msg = "Destination is not a plain file path: {!r}".format(
|
|
1466
|
+
destination
|
|
1467
|
+
)
|
|
1468
|
+
raise TypeError(msg)
|
|
1469
|
+
|
|
1470
|
+
myremote = self.remote
|
|
1471
|
+
if myremote is None:
|
|
1472
|
+
return False
|
|
1473
|
+
|
|
1474
|
+
if not self.cocoon(destination):
|
|
1475
|
+
return False
|
|
1476
|
+
|
|
1477
|
+
# transfer to a tmp, rename and set permissions in one go
|
|
1478
|
+
remote_cmd = "cat > {0}.tmp && mv {0}.tmp {0}".format(
|
|
1479
|
+
shlex.quote(destination)
|
|
1480
|
+
)
|
|
1481
|
+
if permissions:
|
|
1482
|
+
remote_cmd += " && chmod -v {:o} {}".format(
|
|
1483
|
+
permissions, shlex.quote(destination)
|
|
1484
|
+
)
|
|
1485
|
+
|
|
1486
|
+
cmd = (
|
|
1487
|
+
[
|
|
1488
|
+
self._sshcmd,
|
|
1489
|
+
]
|
|
1490
|
+
+ self._sshopts
|
|
1491
|
+
+ sshopts.split()
|
|
1492
|
+
+ [myremote, remote_cmd]
|
|
1493
|
+
)
|
|
1494
|
+
return self.sh.spawn(cmd, stdin=stream, output=False, fatal=False)
|
|
1495
|
+
|
|
1496
|
+
def scpget_stream(self, source, stream, sshopts=""):
|
|
1497
|
+
"""Send the ``source`` to the ``stream``.
|
|
1498
|
+
|
|
1499
|
+
- ``source`` is the remote file name.
|
|
1500
|
+
- ``stream`` is a ``file`` (typically returned by open(),
|
|
1501
|
+
or the piped output of a spawned process).
|
|
1502
|
+
|
|
1503
|
+
Return True for ok, False on error.
|
|
1504
|
+
"""
|
|
1505
|
+
if not isinstance(stream, io.IOBase):
|
|
1506
|
+
msg = "stream is a {}, should be a <type 'file'>".format(
|
|
1507
|
+
type(stream)
|
|
1508
|
+
)
|
|
1509
|
+
raise TypeError(msg)
|
|
1510
|
+
|
|
1511
|
+
if not isinstance(source, str):
|
|
1512
|
+
msg = "Source is not a plain file path: {!r}".format(source)
|
|
1513
|
+
raise TypeError(msg)
|
|
1514
|
+
|
|
1515
|
+
myremote = self.remote
|
|
1516
|
+
if myremote is None:
|
|
1517
|
+
return False
|
|
1518
|
+
|
|
1519
|
+
# transfer to a tmp, rename and set permissions in one go
|
|
1520
|
+
remote_cmd = "cat {}".format(shlex.quote(source))
|
|
1521
|
+
cmd = (
|
|
1522
|
+
[
|
|
1523
|
+
self._sshcmd,
|
|
1524
|
+
]
|
|
1525
|
+
+ self._sshopts
|
|
1526
|
+
+ sshopts.split()
|
|
1527
|
+
+ [myremote, remote_cmd]
|
|
1528
|
+
)
|
|
1529
|
+
return self.sh.spawn(cmd, output=stream, fatal=False)
|
|
1530
|
+
|
|
1531
|
+
def tunnel(
|
|
1532
|
+
self,
|
|
1533
|
+
finaldestination,
|
|
1534
|
+
finalport=0,
|
|
1535
|
+
entranceport=None,
|
|
1536
|
+
maxwait=3.0,
|
|
1537
|
+
checkdelay=0.25,
|
|
1538
|
+
):
|
|
1539
|
+
"""Create an SSH tunnel and check that it actually starts.
|
|
1540
|
+
|
|
1541
|
+
:param str finaldestination: The destination hostname (i.e the machine
|
|
1542
|
+
at the far end of the tunnel). If the
|
|
1543
|
+
"socks" special value is provided, the SSH
|
|
1544
|
+
tunnel will behave as a SOCKS4/SOCKS5 proxy.
|
|
1545
|
+
:param int finalport: The destination port
|
|
1546
|
+
:param int entranceport: The port number of the tunnel entrance (if None,
|
|
1547
|
+
which is the default, it is automatically
|
|
1548
|
+
assigned)
|
|
1549
|
+
:param float maxwait: The maximum time to wait for the entrance port to
|
|
1550
|
+
be opened by the SSH client (if the entrance port
|
|
1551
|
+
is not ready by that time, the SSH command is
|
|
1552
|
+
considered to have failed).
|
|
1553
|
+
:return: False if the tunnel command failed, otherwise an object that
|
|
1554
|
+
contains all kind of details on the SSH tunnel.
|
|
1555
|
+
:rtype: ActiveSshTunnel
|
|
1556
|
+
"""
|
|
1557
|
+
|
|
1558
|
+
myremote = self.remote
|
|
1559
|
+
if myremote is None:
|
|
1560
|
+
return False
|
|
1561
|
+
|
|
1562
|
+
if entranceport is None:
|
|
1563
|
+
entranceport = self.sh.available_localport()
|
|
1564
|
+
else:
|
|
1565
|
+
if self.sh.check_localport(entranceport):
|
|
1566
|
+
logger.error(
|
|
1567
|
+
"The SSH tunnel creation failed "
|
|
1568
|
+
+ "(entrance: %d, dest: %s:%d, via %s).",
|
|
1569
|
+
entranceport,
|
|
1570
|
+
finaldestination,
|
|
1571
|
+
finalport,
|
|
1572
|
+
myremote,
|
|
1573
|
+
)
|
|
1574
|
+
logger.error("The entrance port is already in use.")
|
|
1575
|
+
return False
|
|
1576
|
+
if finaldestination == "socks":
|
|
1577
|
+
p = self.sh.popen(
|
|
1578
|
+
[
|
|
1579
|
+
self._sshcmd,
|
|
1580
|
+
]
|
|
1581
|
+
+ self._sshopts
|
|
1582
|
+
+ ["-N", "-D", "{:d}".format(entranceport), myremote],
|
|
1583
|
+
stdin=False,
|
|
1584
|
+
output=False,
|
|
1585
|
+
)
|
|
1586
|
+
else:
|
|
1587
|
+
if finalport <= 0:
|
|
1588
|
+
raise ValueError(
|
|
1589
|
+
"Erroneous finalport value: {!s}".format(finalport)
|
|
1590
|
+
)
|
|
1591
|
+
p = self.sh.popen(
|
|
1592
|
+
[
|
|
1593
|
+
self._sshcmd,
|
|
1594
|
+
]
|
|
1595
|
+
+ self._sshopts
|
|
1596
|
+
+ [
|
|
1597
|
+
"-N",
|
|
1598
|
+
"-L",
|
|
1599
|
+
"{:d}:{:s}:{:d}".format(
|
|
1600
|
+
entranceport, finaldestination, finalport
|
|
1601
|
+
),
|
|
1602
|
+
myremote,
|
|
1603
|
+
],
|
|
1604
|
+
stdin=False,
|
|
1605
|
+
output=False,
|
|
1606
|
+
)
|
|
1607
|
+
tunnel = ActiveSshTunnel(
|
|
1608
|
+
self.sh, p, entranceport, finaldestination, finalport
|
|
1609
|
+
)
|
|
1610
|
+
elapsed = 0.0
|
|
1611
|
+
while (
|
|
1612
|
+
not self.sh.check_localport(entranceport)
|
|
1613
|
+
) and elapsed < maxwait:
|
|
1614
|
+
self.sh.sleep(checkdelay)
|
|
1615
|
+
elapsed += checkdelay
|
|
1616
|
+
if not self.sh.check_localport(entranceport):
|
|
1617
|
+
logger.error(
|
|
1618
|
+
"The SSH tunnel creation failed "
|
|
1619
|
+
+ "(entrance: %d, dest: %s:%d, via %s).",
|
|
1620
|
+
entranceport,
|
|
1621
|
+
finaldestination,
|
|
1622
|
+
finalport,
|
|
1623
|
+
myremote,
|
|
1624
|
+
)
|
|
1625
|
+
tunnel.close()
|
|
1626
|
+
tunnel = False
|
|
1627
|
+
logger.info(
|
|
1628
|
+
"SSH tunnel opened, enjoy the ride ! "
|
|
1629
|
+
+ "(entrance: %d, dest: %s:%d, via %s).",
|
|
1630
|
+
entranceport,
|
|
1631
|
+
finaldestination,
|
|
1632
|
+
finalport,
|
|
1633
|
+
myremote,
|
|
1634
|
+
)
|
|
1635
|
+
return tunnel
|
|
1636
|
+
|
|
1637
|
+
|
|
1638
|
+
class ActiveSshTunnel:
|
|
1639
|
+
"""Hold an opened SSH tunnel."""
|
|
1640
|
+
|
|
1641
|
+
def __init__(
|
|
1642
|
+
self, sh, activeprocess, entranceport, finaldestination, finalport
|
|
1643
|
+
):
|
|
1644
|
+
"""
|
|
1645
|
+
:param Popen activeprocess: The active tunnel process.
|
|
1646
|
+
:param int entranceport: Tunnel's entrance port.
|
|
1647
|
+
:param str finaldestination: Tunnel's final destination.
|
|
1648
|
+
:param int finalport: Tunnel's destination port.
|
|
1649
|
+
|
|
1650
|
+
Objects of this class can be used as context managers (the tunnel will
|
|
1651
|
+
be closed when the context is exited).
|
|
1652
|
+
"""
|
|
1653
|
+
self._sh = sh
|
|
1654
|
+
self.activeprocess = activeprocess
|
|
1655
|
+
self.entranceport = entranceport
|
|
1656
|
+
self.finaldestination = finaldestination
|
|
1657
|
+
self.finalport = finalport
|
|
1658
|
+
|
|
1659
|
+
def __del__(self):
|
|
1660
|
+
self.close()
|
|
1661
|
+
|
|
1662
|
+
def close(self):
|
|
1663
|
+
"""Close the tunnel (i.e. kill the SSH process)."""
|
|
1664
|
+
if self.opened:
|
|
1665
|
+
self.activeprocess.terminate()
|
|
1666
|
+
t0 = time.time()
|
|
1667
|
+
while self.opened and time.time() - t0 < 5:
|
|
1668
|
+
self._sh.sleep(0.1)
|
|
1669
|
+
logger.debug(
|
|
1670
|
+
"Tunnel termination took: %f seconds", time.time() - t0
|
|
1671
|
+
)
|
|
1672
|
+
if self.opened:
|
|
1673
|
+
logger.debug("Tunnel termination failed: issuing SIGKILL")
|
|
1674
|
+
self.activeprocess.kill()
|
|
1675
|
+
logger.info(
|
|
1676
|
+
"SSH tunnel closed (entrance: %d, dest: %s:%d).",
|
|
1677
|
+
self.entranceport,
|
|
1678
|
+
self.finaldestination,
|
|
1679
|
+
self.finalport,
|
|
1680
|
+
)
|
|
1681
|
+
|
|
1682
|
+
@property
|
|
1683
|
+
def opened(self):
|
|
1684
|
+
"""Is the tunnel opened ?"""
|
|
1685
|
+
return self.activeprocess.poll() is None
|
|
1686
|
+
|
|
1687
|
+
def __enter__(self):
|
|
1688
|
+
return self
|
|
1689
|
+
|
|
1690
|
+
def __exit__(self, exc_type, exc_value, traceback): # @UnusedVariable
|
|
1691
|
+
self.close()
|
|
1692
|
+
|
|
1693
|
+
|
|
1694
|
+
@nicedeco
|
|
1695
|
+
def _check_fatal(func):
|
|
1696
|
+
"""decorator: an exception is raised, if fatal=True and the returncode != True.
|
|
1697
|
+
|
|
1698
|
+
This decorator is very specialised and should be used solely with the AssistedSsh
|
|
1699
|
+
class since it relies on several attributes (_fatal, _maxtries).
|
|
1700
|
+
"""
|
|
1701
|
+
|
|
1702
|
+
def wrapped(*args, **kwargs):
|
|
1703
|
+
self = args[0]
|
|
1704
|
+
if self._fatal_in_progress:
|
|
1705
|
+
return func(self, *args[1:], **kwargs)
|
|
1706
|
+
else:
|
|
1707
|
+
# This trick ensure that only one fatal check is attempted
|
|
1708
|
+
self._fatal_in_progress = True
|
|
1709
|
+
try:
|
|
1710
|
+
rc = func(self, *args[1:], **kwargs)
|
|
1711
|
+
if not rc:
|
|
1712
|
+
logger.error(
|
|
1713
|
+
"The maximum number of retries (%s) was reached...",
|
|
1714
|
+
self._maxtries,
|
|
1715
|
+
)
|
|
1716
|
+
if self._fatal:
|
|
1717
|
+
raise RuntimeError(
|
|
1718
|
+
"Could not execute the SSH command."
|
|
1719
|
+
)
|
|
1720
|
+
finally:
|
|
1721
|
+
self._fatal_in_progress = False
|
|
1722
|
+
return rc
|
|
1723
|
+
|
|
1724
|
+
return wrapped
|
|
1725
|
+
|
|
1726
|
+
|
|
1727
|
+
@nicedeco
|
|
1728
|
+
def _tryagain(func):
|
|
1729
|
+
"""decorator: whenever the return code != True, several attempts are made according to self._maxtries.
|
|
1730
|
+
|
|
1731
|
+
This decorator is very specialised and should be used solely with the AssistedSsh
|
|
1732
|
+
class since it relies on several attributes (_retry_in_progress, _retries, _maxtries).
|
|
1733
|
+
"""
|
|
1734
|
+
|
|
1735
|
+
def wrapped(*args, **kwargs):
|
|
1736
|
+
self = args[0]
|
|
1737
|
+
if self._retry_in_progress:
|
|
1738
|
+
return func(self, *args[1:], **kwargs)
|
|
1739
|
+
else:
|
|
1740
|
+
# This trick ensures that only one retry loop is attempted
|
|
1741
|
+
self._retry_in_progress = True
|
|
1742
|
+
trycount = 1
|
|
1743
|
+
try:
|
|
1744
|
+
rc = func(self, *args[1:], **kwargs)
|
|
1745
|
+
while not rc and trycount < self._maxtries:
|
|
1746
|
+
trycount += 1
|
|
1747
|
+
logger.info(
|
|
1748
|
+
"Trying again (retries=%d/%d)...",
|
|
1749
|
+
trycount,
|
|
1750
|
+
self._maxtries,
|
|
1751
|
+
)
|
|
1752
|
+
self.sh.sleep(self._triesdelay)
|
|
1753
|
+
rc = func(self, *args[1:], **kwargs)
|
|
1754
|
+
finally:
|
|
1755
|
+
self._retries = trycount
|
|
1756
|
+
self._retry_in_progress = False
|
|
1757
|
+
return rc
|
|
1758
|
+
|
|
1759
|
+
return wrapped
|
|
1760
|
+
|
|
1761
|
+
|
|
1762
|
+
class _AssistedSshMeta(type):
|
|
1763
|
+
"""Specialized metaclass for AssitedSsh."""
|
|
1764
|
+
|
|
1765
|
+
def __new__(cls, n, b, d):
|
|
1766
|
+
"""Adds _tryagain and _check_fatal decorators on a list of inherited methods.
|
|
1767
|
+
|
|
1768
|
+
This is controled by two class variables:
|
|
1769
|
+
|
|
1770
|
+
- _auto_retries: list of inherited methods that should be decorated
|
|
1771
|
+
with _tryagin
|
|
1772
|
+
- _auto_checkfatal: list of inherited methods that should be
|
|
1773
|
+
decorated with _check_fatal
|
|
1774
|
+
|
|
1775
|
+
Note: it only acts on inherited methods. For overridden methods,
|
|
1776
|
+
decorators have to be added manually.
|
|
1777
|
+
"""
|
|
1778
|
+
bare_methods = list(d.keys())
|
|
1779
|
+
# Add the tryagain decorator...
|
|
1780
|
+
for tagain in [x for x in d["_auto_retries"] if x not in bare_methods]:
|
|
1781
|
+
inherited = [base for base in b if hasattr(base, tagain)]
|
|
1782
|
+
d[tagain] = _tryagain(getattr(inherited[0], tagain))
|
|
1783
|
+
# Add the check_fatal decorator...
|
|
1784
|
+
for cfatal in [
|
|
1785
|
+
x for x in d["_auto_checkfatal"] if x not in bare_methods
|
|
1786
|
+
]:
|
|
1787
|
+
inherited = [base for base in b if hasattr(base, cfatal)]
|
|
1788
|
+
d[cfatal] = _check_fatal(
|
|
1789
|
+
d.get(cfatal, getattr(inherited[0], cfatal))
|
|
1790
|
+
)
|
|
1791
|
+
return super().__new__(cls, n, b, d)
|
|
1792
|
+
|
|
1793
|
+
|
|
1794
|
+
class AssistedSsh(Ssh, metaclass=_AssistedSshMeta):
|
|
1795
|
+
"""Remote command execution via ssh.
|
|
1796
|
+
|
|
1797
|
+
Also handles remote copy via scp or ssh, which is intimately linked.
|
|
1798
|
+
Compared to the :class:`Ssh` class it adds:
|
|
1799
|
+
|
|
1800
|
+
- retries capabilities
|
|
1801
|
+
- support for multiple hostnames (a hostname is picked up in the hostnames
|
|
1802
|
+
list, it is tested and if the test succeeds it is chosen. If not, the next
|
|
1803
|
+
hostname is tested, ... and so on).
|
|
1804
|
+
- virtual nodes support (i.e. the real hostnames associated with a virtual
|
|
1805
|
+
node name are read in the configuration file).
|
|
1806
|
+
|
|
1807
|
+
Examples (`sh` being an :class:`~vortex.tools.systems.OSExtended` object):
|
|
1808
|
+
|
|
1809
|
+
- Basic use::
|
|
1810
|
+
|
|
1811
|
+
>>> ssh1 = AssistedSsh(sh, 'localhost')
|
|
1812
|
+
>>> print(ssh1, ssh1.remote)
|
|
1813
|
+
<vortex.tools.net.AssistedSsh object at 0x7fac3bb19810> localhost
|
|
1814
|
+
>> ssh1.execute("echo -n 'My name is: '; hostname")
|
|
1815
|
+
['My name is: belenoslogin3']
|
|
1816
|
+
|
|
1817
|
+
- Using virtual nodes names (let's consider here that "network" nodes are
|
|
1818
|
+
defined in the current target-?.ini configuration file)::
|
|
1819
|
+
|
|
1820
|
+
>>> ssh2 = AssistedSsh(sh, 'network', virtualnode=True)
|
|
1821
|
+
>>> print(ssh2, ssh2.targets) # The list of possible network nodes
|
|
1822
|
+
['belenoslogin0', 'belenoslogin1', 'belenoslogin2', 'belenoslogin3', ]
|
|
1823
|
+
>>> print(ssh2, ssh2.remote) # Pick one randomly
|
|
1824
|
+
'belenoslogin2'
|
|
1825
|
+
|
|
1826
|
+
- The multiple retries concept::
|
|
1827
|
+
|
|
1828
|
+
>>> ssh3 = AssistedSsh(sh, 'network', virtualnode=True, maxtries=3)
|
|
1829
|
+
>>> print(ssh3, ssh3.remote) # Pick one randomly
|
|
1830
|
+
'belenoslogin0'
|
|
1831
|
+
>>> ssh3.execute("false")
|
|
1832
|
+
# [2018/02/19-11:29:00][vortex.tools.systems][spawn:0878][WARNING]:
|
|
1833
|
+
Bad return code [1] for ['ssh', '-x', 'belenoslogin0', 'false']
|
|
1834
|
+
# [2018/02/19-11:29:00][vortex.tools.systems][spawn:0885][WARNING]: Carry on because fatal is off
|
|
1835
|
+
# [2018/02/19-11:29:00][vortex.tools.net][wrapped:1296][INFO]: Trying again (retries=2/3)...
|
|
1836
|
+
# [2018/02/19-11:29:01][vortex.tools.systems][spawn:0878][WARNING]:
|
|
1837
|
+
Bad return code [1] for ['ssh', '-x', 'belenoslogin0', 'false']
|
|
1838
|
+
# [2018/02/19-11:29:01][vortex.tools.systems][spawn:0885][WARNING]: Carry on because fatal is off
|
|
1839
|
+
# [2018/02/19-11:29:01][vortex.tools.net][wrapped:1296][INFO]: Trying again (retries=3/3)...
|
|
1840
|
+
# [2018/02/19-11:29:02][vortex.tools.systems][spawn:0878][WARNING]:
|
|
1841
|
+
Bad return code [1] for ['ssh', '-x', 'belenoslogin0', 'false']
|
|
1842
|
+
# [2018/02/19-11:29:02][vortex.tools.systems][spawn:0885][WARNING]: Carry on because fatal is off
|
|
1843
|
+
# [2018/02/19-11:29:02][vortex.tools.net][wrapped:1268][ERROR]: The maximum number of retries (3) was reached...
|
|
1844
|
+
False
|
|
1845
|
+
|
|
1846
|
+
- Raise an exception on failure::
|
|
1847
|
+
|
|
1848
|
+
>>> ssh4 = AssistedSsh(sh, 'network', virtualnode=True, fatal=True)
|
|
1849
|
+
>>> ssh4.execute("false")
|
|
1850
|
+
# [2018/02/19-11:29:00][vortex.tools.systems][spawn:0878][WARNING]:
|
|
1851
|
+
Bad return code [1] for ['ssh', '-x', 'belenoslogin0', 'false']
|
|
1852
|
+
# [2018/02/19-11:29:00][vortex.tools.systems][spawn:0885][WARNING]: Carry on because fatal is off
|
|
1853
|
+
# [2018/02/19-11:29:02][vortex.tools.net][wrapped:1268][ERROR]: The maximum number of retries (1) was reached...
|
|
1854
|
+
RuntimeError: Could not execute the SSH command.
|
|
1855
|
+
|
|
1856
|
+
"""
|
|
1857
|
+
|
|
1858
|
+
_auto_checkfatal = [
|
|
1859
|
+
"check_ok",
|
|
1860
|
+
"execute",
|
|
1861
|
+
"cocoon",
|
|
1862
|
+
"remove",
|
|
1863
|
+
"scpput",
|
|
1864
|
+
"scpget",
|
|
1865
|
+
"scpput_stream",
|
|
1866
|
+
"scpget_stream",
|
|
1867
|
+
"tunnel",
|
|
1868
|
+
]
|
|
1869
|
+
# No retries on scpput_stream since it's not guaranteed that the stream is seekable.
|
|
1870
|
+
_auto_retries = [
|
|
1871
|
+
"check_ok",
|
|
1872
|
+
"execute",
|
|
1873
|
+
"cocoon",
|
|
1874
|
+
"remove",
|
|
1875
|
+
"scpput",
|
|
1876
|
+
"scpget",
|
|
1877
|
+
"tunnel",
|
|
1878
|
+
]
|
|
1879
|
+
|
|
1880
|
+
def __init__(
|
|
1881
|
+
self,
|
|
1882
|
+
sh,
|
|
1883
|
+
hostname,
|
|
1884
|
+
logname=None,
|
|
1885
|
+
sshopts=None,
|
|
1886
|
+
scpopts=None,
|
|
1887
|
+
maxtries=1,
|
|
1888
|
+
triesdelay=1,
|
|
1889
|
+
virtualnode=False,
|
|
1890
|
+
permut=True,
|
|
1891
|
+
fatal=False,
|
|
1892
|
+
mandatory_hostcheck=True,
|
|
1893
|
+
):
|
|
1894
|
+
"""
|
|
1895
|
+
:param System sh: The :class:`System` object that is to be used.
|
|
1896
|
+
:param hostname: The target hostname(s).
|
|
1897
|
+
:type hostname: str or list
|
|
1898
|
+
:param logname: The logname for the Ssh commands.
|
|
1899
|
+
:param str sshopts: Extra SSH options (in addition to the configuration file ones).
|
|
1900
|
+
:param str scpopts: Extra SCP options (in addition to the configuration file ones).
|
|
1901
|
+
:param int maxtries: The maximum number of retries.
|
|
1902
|
+
:param int triesdelay: The delay in seconds between retries.
|
|
1903
|
+
:param bool virtualnode: If True, the *hostname* is considered to be a
|
|
1904
|
+
virtual node name. It is therefore looked up in
|
|
1905
|
+
the configuration file.
|
|
1906
|
+
:param bool permut: If True, the hostnames list is shuffled prior to
|
|
1907
|
+
being used.
|
|
1908
|
+
:param bool fatal: If True, a RuntimeError exception is raised whenever
|
|
1909
|
+
something fails.
|
|
1910
|
+
:param mandatory_hostcheck: If True and several host names are provided,
|
|
1911
|
+
the hostname is always checked prior to being
|
|
1912
|
+
used for the real Ssh command. When a single
|
|
1913
|
+
host name is provided, such a check is never
|
|
1914
|
+
performed.
|
|
1915
|
+
"""
|
|
1916
|
+
super().__init__(sh, hostname, logname, sshopts, scpopts)
|
|
1917
|
+
self._triesdelay = triesdelay
|
|
1918
|
+
self._virtualnode = virtualnode
|
|
1919
|
+
self._permut = permut
|
|
1920
|
+
self._fatal = fatal
|
|
1921
|
+
self._mandatory_hostcheck = mandatory_hostcheck
|
|
1922
|
+
if self._virtualnode and isinstance(self._remote, (list, tuple)):
|
|
1923
|
+
raise ValueError(
|
|
1924
|
+
"When virtual nodes are used, the hostname must be a string"
|
|
1925
|
+
)
|
|
1926
|
+
|
|
1927
|
+
self._retry_in_progress = False
|
|
1928
|
+
self._fatal_in_progress = False
|
|
1929
|
+
self._retries = 0
|
|
1930
|
+
self._targets = self._setup_targets()
|
|
1931
|
+
self._targets_iter = itertools.cycle(self._targets)
|
|
1932
|
+
if not self._mandatory_hostcheck and len(self._targets) > 1:
|
|
1933
|
+
# Try at least one time with each of the possible targets
|
|
1934
|
+
self._maxtries = maxtries + len(self._targets) - 1
|
|
1935
|
+
else:
|
|
1936
|
+
self._maxtries = maxtries
|
|
1937
|
+
self._chosen_target = None
|
|
1938
|
+
|
|
1939
|
+
def _setup_targets(self):
|
|
1940
|
+
"""Build the actual hostnames list."""
|
|
1941
|
+
if self._virtualnode:
|
|
1942
|
+
targets = self.sh.default_target.specialproxies[self._remote]
|
|
1943
|
+
else:
|
|
1944
|
+
if isinstance(self._remote, (list, tuple)):
|
|
1945
|
+
targets = self._remote
|
|
1946
|
+
else:
|
|
1947
|
+
targets = [self._remote]
|
|
1948
|
+
if self._logname is not None:
|
|
1949
|
+
targets = [self._logname + "@" + x for x in targets]
|
|
1950
|
+
if self._permut:
|
|
1951
|
+
random.shuffle(targets)
|
|
1952
|
+
return targets
|
|
1953
|
+
|
|
1954
|
+
@property
|
|
1955
|
+
def targets(self):
|
|
1956
|
+
"""The actual hostnames list."""
|
|
1957
|
+
return self._targets
|
|
1958
|
+
|
|
1959
|
+
@property
|
|
1960
|
+
def retries(self):
|
|
1961
|
+
"""The number of tries made for the last Ssh command."""
|
|
1962
|
+
return self._retries
|
|
1963
|
+
|
|
1964
|
+
@property
|
|
1965
|
+
@_check_fatal
|
|
1966
|
+
@_tryagain
|
|
1967
|
+
def remote(self):
|
|
1968
|
+
"""Hostname to use for this kind of remote execution."""
|
|
1969
|
+
if len(self.targets) == 1:
|
|
1970
|
+
# This is simple enough, do not bother testing...
|
|
1971
|
+
self._chosen_target = self.targets[0]
|
|
1972
|
+
# Ok, let's take self._mandatory_hostcheck into account
|
|
1973
|
+
if self._mandatory_hostcheck:
|
|
1974
|
+
if self._chosen_target is None:
|
|
1975
|
+
for guess in self.targets:
|
|
1976
|
+
cmd = (
|
|
1977
|
+
[
|
|
1978
|
+
self._sshcmd,
|
|
1979
|
+
]
|
|
1980
|
+
+ self._sshopts
|
|
1981
|
+
+ [
|
|
1982
|
+
guess,
|
|
1983
|
+
"true",
|
|
1984
|
+
]
|
|
1985
|
+
)
|
|
1986
|
+
try:
|
|
1987
|
+
self.sh.spawn(cmd, output=False, silent=True)
|
|
1988
|
+
except Exception:
|
|
1989
|
+
pass
|
|
1990
|
+
else:
|
|
1991
|
+
self._chosen_target = guess
|
|
1992
|
+
break
|
|
1993
|
+
return self._chosen_target
|
|
1994
|
+
else:
|
|
1995
|
+
return next(self._targets_iter)
|
|
1996
|
+
|
|
1997
|
+
|
|
1998
|
+
_ConnectionStatusAttrs = (
|
|
1999
|
+
"Family",
|
|
2000
|
+
"LocalAddr",
|
|
2001
|
+
"LocalPort",
|
|
2002
|
+
"DestAddr",
|
|
2003
|
+
"DestPort",
|
|
2004
|
+
"Status",
|
|
2005
|
+
)
|
|
2006
|
+
TcpConnectionStatus = namedtuple("TcpConnectionStatus", _ConnectionStatusAttrs)
|
|
2007
|
+
UdpConnectionStatus = namedtuple("UdpConnectionStatus", _ConnectionStatusAttrs)
|
|
2008
|
+
|
|
2009
|
+
|
|
2010
|
+
class AbstractNetstats(metaclass=abc.ABCMeta):
|
|
2011
|
+
"""AbstractNetstats classes provide all kind of informations on network connections."""
|
|
2012
|
+
|
|
2013
|
+
@property
|
|
2014
|
+
@abc.abstractmethod
|
|
2015
|
+
def unprivileged_ports(self):
|
|
2016
|
+
"""The list of unprivileged port that may be opened by any user."""
|
|
2017
|
+
pass
|
|
2018
|
+
|
|
2019
|
+
@abc.abstractmethod
|
|
2020
|
+
def tcp_netstats(self):
|
|
2021
|
+
"""Informations on active TCP connections.
|
|
2022
|
+
|
|
2023
|
+
Returns a list of :class:`TcpConnectionStatus` objects.
|
|
2024
|
+
"""
|
|
2025
|
+
pass
|
|
2026
|
+
|
|
2027
|
+
@abc.abstractmethod
|
|
2028
|
+
def udp_netstats(self):
|
|
2029
|
+
"""Informations on active UDP connections.
|
|
2030
|
+
|
|
2031
|
+
Returns a list of :class:`UdpConnectionStatus` objects.
|
|
2032
|
+
"""
|
|
2033
|
+
pass
|
|
2034
|
+
|
|
2035
|
+
def available_localport(self):
|
|
2036
|
+
"""Returns the number of an unused unprivileged port."""
|
|
2037
|
+
netstats = self.tcp_netstats() + self.udp_netstats()
|
|
2038
|
+
busyports = {x.LocalPort for x in netstats}
|
|
2039
|
+
busy = True
|
|
2040
|
+
while busy:
|
|
2041
|
+
guess_port = random.choice(self.unprivileged_ports)
|
|
2042
|
+
busy = guess_port in busyports
|
|
2043
|
+
return guess_port
|
|
2044
|
+
|
|
2045
|
+
def check_localport(self, port):
|
|
2046
|
+
"""Check if ``port`` is currently in use."""
|
|
2047
|
+
netstats = self.tcp_netstats() + self.udp_netstats()
|
|
2048
|
+
busyports = {x.LocalPort for x in netstats}
|
|
2049
|
+
return port in busyports
|
|
2050
|
+
|
|
2051
|
+
|
|
2052
|
+
class LinuxNetstats(AbstractNetstats):
|
|
2053
|
+
"""A Netstats implementation for Linux (based on the /proc/net data)."""
|
|
2054
|
+
|
|
2055
|
+
_LINUX_LPORT = "/proc/sys/net/ipv4/ip_local_port_range"
|
|
2056
|
+
_LINUX_PORTS_V4 = {"tcp": "/proc/net/tcp", "udp": "/proc/net/udp"}
|
|
2057
|
+
_LINUX_PORTS_V6 = {"tcp": "/proc/net/tcp6", "udp": "/proc/net/udp6"}
|
|
2058
|
+
_LINUX_AF_INET4 = socket.AF_INET
|
|
2059
|
+
_LINUX_AF_INET6 = socket.AF_INET6
|
|
2060
|
+
|
|
2061
|
+
def __init__(self):
|
|
2062
|
+
self.__unprivileged_ports = None
|
|
2063
|
+
|
|
2064
|
+
@property
|
|
2065
|
+
def unprivileged_ports(self):
|
|
2066
|
+
if self.__unprivileged_ports is None:
|
|
2067
|
+
with open(self._LINUX_LPORT) as tmprange:
|
|
2068
|
+
tmpports = [int(x) for x in tmprange.readline().split()]
|
|
2069
|
+
unports = set(range(5001, 65536))
|
|
2070
|
+
self.__unprivileged_ports = sorted(
|
|
2071
|
+
unports - set(range(tmpports[0], tmpports[1] + 1))
|
|
2072
|
+
)
|
|
2073
|
+
return self.__unprivileged_ports
|
|
2074
|
+
|
|
2075
|
+
@classmethod
|
|
2076
|
+
def _ip_from_hex(cls, hexip, family=_LINUX_AF_INET4):
|
|
2077
|
+
if family == cls._LINUX_AF_INET4:
|
|
2078
|
+
packed = struct.pack(b"<I", int(hexip, 16))
|
|
2079
|
+
elif family == cls._LINUX_AF_INET6:
|
|
2080
|
+
packed = struct.unpack(b">IIII", binascii.a2b_hex(hexip))
|
|
2081
|
+
packed = struct.pack(b"@IIII", *packed)
|
|
2082
|
+
else:
|
|
2083
|
+
raise ValueError("Unknown address family.")
|
|
2084
|
+
return socket.inet_ntop(family, packed)
|
|
2085
|
+
|
|
2086
|
+
def _generic_netstats(self, proto, rclass):
|
|
2087
|
+
tmpports = dict()
|
|
2088
|
+
with open(self._LINUX_PORTS_V4[proto]) as netstats:
|
|
2089
|
+
netstats.readline() # Skip the header line
|
|
2090
|
+
tmpports[self._LINUX_AF_INET4] = [
|
|
2091
|
+
re.split(r":\b|\s+", x.strip())[1:6]
|
|
2092
|
+
for x in netstats.readlines()
|
|
2093
|
+
]
|
|
2094
|
+
try:
|
|
2095
|
+
with open(self._LINUX_PORTS_V6[proto]) as netstats:
|
|
2096
|
+
netstats.readline() # Skip the header line
|
|
2097
|
+
tmpports[self._LINUX_AF_INET6] = [
|
|
2098
|
+
re.split(r":\b|\s+", x.strip())[1:6]
|
|
2099
|
+
for x in netstats.readlines()
|
|
2100
|
+
]
|
|
2101
|
+
except OSError:
|
|
2102
|
+
# Apparently, no IPv6 support on this machine
|
|
2103
|
+
tmpports[self._LINUX_AF_INET6] = []
|
|
2104
|
+
tmpports = [
|
|
2105
|
+
[
|
|
2106
|
+
rclass(
|
|
2107
|
+
family,
|
|
2108
|
+
self._ip_from_hex(l[0], family),
|
|
2109
|
+
int(l[1], 16),
|
|
2110
|
+
self._ip_from_hex(l[2], family),
|
|
2111
|
+
int(l[3], 16),
|
|
2112
|
+
int(l[4], 16),
|
|
2113
|
+
)
|
|
2114
|
+
for l in tmpports[family]
|
|
2115
|
+
]
|
|
2116
|
+
for family in (self._LINUX_AF_INET4, self._LINUX_AF_INET6)
|
|
2117
|
+
]
|
|
2118
|
+
return functools.reduce(operator.add, tmpports)
|
|
2119
|
+
|
|
2120
|
+
def tcp_netstats(self):
|
|
2121
|
+
return self._generic_netstats("tcp", TcpConnectionStatus)
|
|
2122
|
+
|
|
2123
|
+
def udp_netstats(self):
|
|
2124
|
+
return self._generic_netstats("udp", UdpConnectionStatus)
|