vortex-nwp 2.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vortex/__init__.py +135 -0
- vortex/algo/__init__.py +12 -0
- vortex/algo/components.py +2136 -0
- vortex/algo/mpitools.py +1648 -0
- vortex/algo/mpitools_templates/envelope_wrapper_default.tpl +27 -0
- vortex/algo/mpitools_templates/envelope_wrapper_mpiauto.tpl +29 -0
- vortex/algo/mpitools_templates/wrapstd_wrapper_default.tpl +18 -0
- vortex/algo/serversynctools.py +170 -0
- vortex/config.py +115 -0
- vortex/data/__init__.py +13 -0
- vortex/data/abstractstores.py +1572 -0
- vortex/data/containers.py +780 -0
- vortex/data/contents.py +596 -0
- vortex/data/executables.py +284 -0
- vortex/data/flow.py +113 -0
- vortex/data/geometries.ini +2689 -0
- vortex/data/geometries.py +703 -0
- vortex/data/handlers.py +1021 -0
- vortex/data/outflow.py +67 -0
- vortex/data/providers.py +465 -0
- vortex/data/resources.py +201 -0
- vortex/data/stores.py +1271 -0
- vortex/gloves.py +282 -0
- vortex/layout/__init__.py +27 -0
- vortex/layout/appconf.py +109 -0
- vortex/layout/contexts.py +511 -0
- vortex/layout/dataflow.py +1069 -0
- vortex/layout/jobs.py +1276 -0
- vortex/layout/monitor.py +833 -0
- vortex/layout/nodes.py +1424 -0
- vortex/layout/subjobs.py +464 -0
- vortex/nwp/__init__.py +11 -0
- vortex/nwp/algo/__init__.py +12 -0
- vortex/nwp/algo/assim.py +483 -0
- vortex/nwp/algo/clim.py +920 -0
- vortex/nwp/algo/coupling.py +609 -0
- vortex/nwp/algo/eda.py +632 -0
- vortex/nwp/algo/eps.py +613 -0
- vortex/nwp/algo/forecasts.py +745 -0
- vortex/nwp/algo/fpserver.py +927 -0
- vortex/nwp/algo/ifsnaming.py +403 -0
- vortex/nwp/algo/ifsroot.py +311 -0
- vortex/nwp/algo/monitoring.py +202 -0
- vortex/nwp/algo/mpitools.py +554 -0
- vortex/nwp/algo/odbtools.py +974 -0
- vortex/nwp/algo/oopsroot.py +735 -0
- vortex/nwp/algo/oopstests.py +186 -0
- vortex/nwp/algo/request.py +579 -0
- vortex/nwp/algo/stdpost.py +1285 -0
- vortex/nwp/data/__init__.py +12 -0
- vortex/nwp/data/assim.py +392 -0
- vortex/nwp/data/boundaries.py +261 -0
- vortex/nwp/data/climfiles.py +539 -0
- vortex/nwp/data/configfiles.py +149 -0
- vortex/nwp/data/consts.py +929 -0
- vortex/nwp/data/ctpini.py +133 -0
- vortex/nwp/data/diagnostics.py +181 -0
- vortex/nwp/data/eda.py +148 -0
- vortex/nwp/data/eps.py +383 -0
- vortex/nwp/data/executables.py +1039 -0
- vortex/nwp/data/fields.py +96 -0
- vortex/nwp/data/gridfiles.py +308 -0
- vortex/nwp/data/logs.py +551 -0
- vortex/nwp/data/modelstates.py +334 -0
- vortex/nwp/data/monitoring.py +220 -0
- vortex/nwp/data/namelists.py +644 -0
- vortex/nwp/data/obs.py +748 -0
- vortex/nwp/data/oopsexec.py +72 -0
- vortex/nwp/data/providers.py +182 -0
- vortex/nwp/data/query.py +217 -0
- vortex/nwp/data/stores.py +147 -0
- vortex/nwp/data/surfex.py +338 -0
- vortex/nwp/syntax/__init__.py +9 -0
- vortex/nwp/syntax/stdattrs.py +375 -0
- vortex/nwp/tools/__init__.py +10 -0
- vortex/nwp/tools/addons.py +35 -0
- vortex/nwp/tools/agt.py +55 -0
- vortex/nwp/tools/bdap.py +48 -0
- vortex/nwp/tools/bdcp.py +38 -0
- vortex/nwp/tools/bdm.py +21 -0
- vortex/nwp/tools/bdmp.py +49 -0
- vortex/nwp/tools/conftools.py +1311 -0
- vortex/nwp/tools/drhook.py +62 -0
- vortex/nwp/tools/grib.py +268 -0
- vortex/nwp/tools/gribdiff.py +99 -0
- vortex/nwp/tools/ifstools.py +163 -0
- vortex/nwp/tools/igastuff.py +249 -0
- vortex/nwp/tools/mars.py +56 -0
- vortex/nwp/tools/odb.py +548 -0
- vortex/nwp/tools/partitioning.py +234 -0
- vortex/nwp/tools/satrad.py +56 -0
- vortex/nwp/util/__init__.py +6 -0
- vortex/nwp/util/async.py +184 -0
- vortex/nwp/util/beacon.py +40 -0
- vortex/nwp/util/diffpygram.py +359 -0
- vortex/nwp/util/ens.py +198 -0
- vortex/nwp/util/hooks.py +128 -0
- vortex/nwp/util/taskdeco.py +81 -0
- vortex/nwp/util/usepygram.py +591 -0
- vortex/nwp/util/usetnt.py +87 -0
- vortex/proxy.py +6 -0
- vortex/sessions.py +341 -0
- vortex/syntax/__init__.py +9 -0
- vortex/syntax/stdattrs.py +628 -0
- vortex/syntax/stddeco.py +176 -0
- vortex/toolbox.py +982 -0
- vortex/tools/__init__.py +11 -0
- vortex/tools/actions.py +457 -0
- vortex/tools/addons.py +297 -0
- vortex/tools/arm.py +76 -0
- vortex/tools/compression.py +322 -0
- vortex/tools/date.py +20 -0
- vortex/tools/ddhpack.py +10 -0
- vortex/tools/delayedactions.py +672 -0
- vortex/tools/env.py +513 -0
- vortex/tools/folder.py +663 -0
- vortex/tools/grib.py +559 -0
- vortex/tools/lfi.py +746 -0
- vortex/tools/listings.py +354 -0
- vortex/tools/names.py +575 -0
- vortex/tools/net.py +1790 -0
- vortex/tools/odb.py +10 -0
- vortex/tools/parallelism.py +336 -0
- vortex/tools/prestaging.py +186 -0
- vortex/tools/rawfiles.py +10 -0
- vortex/tools/schedulers.py +413 -0
- vortex/tools/services.py +871 -0
- vortex/tools/storage.py +1061 -0
- vortex/tools/surfex.py +61 -0
- vortex/tools/systems.py +3396 -0
- vortex/tools/targets.py +384 -0
- vortex/util/__init__.py +9 -0
- vortex/util/config.py +1071 -0
- vortex/util/empty.py +24 -0
- vortex/util/helpers.py +184 -0
- vortex/util/introspection.py +63 -0
- vortex/util/iosponge.py +76 -0
- vortex/util/roles.py +51 -0
- vortex/util/storefunctions.py +103 -0
- vortex/util/structs.py +26 -0
- vortex/util/worker.py +150 -0
- vortex_nwp-2.0.0b1.dist-info/LICENSE +517 -0
- vortex_nwp-2.0.0b1.dist-info/METADATA +50 -0
- vortex_nwp-2.0.0b1.dist-info/RECORD +146 -0
- vortex_nwp-2.0.0b1.dist-info/WHEEL +5 -0
- vortex_nwp-2.0.0b1.dist-info/top_level.txt +1 -0
vortex/tools/systems.py
ADDED
|
@@ -0,0 +1,3396 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This package handles system interfaces objects that are in charge of
|
|
3
|
+
system interaction. Systems objects use the :mod:`footprints` mechanism.
|
|
4
|
+
|
|
5
|
+
The current active System object should be retrieved using the session's Ticket
|
|
6
|
+
(*i.e.* System classes should not be instantiated directly) ::
|
|
7
|
+
|
|
8
|
+
t = vortex.ticket()
|
|
9
|
+
sh = t.sh
|
|
10
|
+
|
|
11
|
+
The System retrieved by this property will always be an instance of subclasses of
|
|
12
|
+
:class:`OSExtended`. Consequently, you can safely assume that all attributes,
|
|
13
|
+
properties and methods available in :class:`OSExtended` ad :class:`System` are
|
|
14
|
+
available to you.
|
|
15
|
+
|
|
16
|
+
When working with System objects, preferentialy use high-level methods such as
|
|
17
|
+
:meth:`~OSExtended.cp`, :meth:`~OSExtended.mv`, :meth:`~OSExtended.rm`,
|
|
18
|
+
:meth:`~OSExtended.smartftput`, :meth:`~OSExtended.smartftget`, ...
|
|
19
|
+
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import contextlib
|
|
23
|
+
import errno
|
|
24
|
+
import filecmp
|
|
25
|
+
import ftplib
|
|
26
|
+
import functools
|
|
27
|
+
import glob
|
|
28
|
+
import hashlib
|
|
29
|
+
import importlib
|
|
30
|
+
import io
|
|
31
|
+
import json
|
|
32
|
+
import locale
|
|
33
|
+
import multiprocessing
|
|
34
|
+
import os
|
|
35
|
+
import pickle
|
|
36
|
+
import platform
|
|
37
|
+
import pwd as passwd
|
|
38
|
+
import random
|
|
39
|
+
import re
|
|
40
|
+
import resource
|
|
41
|
+
import shutil
|
|
42
|
+
import signal
|
|
43
|
+
import socket
|
|
44
|
+
import stat
|
|
45
|
+
import subprocess
|
|
46
|
+
import sys
|
|
47
|
+
import tarfile
|
|
48
|
+
import tempfile
|
|
49
|
+
import threading
|
|
50
|
+
import time
|
|
51
|
+
import uuid
|
|
52
|
+
from collections import namedtuple
|
|
53
|
+
|
|
54
|
+
import footprints
|
|
55
|
+
from bronx.fancies import loggers
|
|
56
|
+
from bronx.stdtypes import date
|
|
57
|
+
from bronx.stdtypes.history import History
|
|
58
|
+
from bronx.syntax.decorators import nicedeco_plusdoc, secure_getattr
|
|
59
|
+
from bronx.syntax.externalcode import ExternalCodeImportChecker
|
|
60
|
+
from bronx.system.cpus import LinuxCpusInfo
|
|
61
|
+
from bronx.system.interrupt import SignalInterruptError, SignalInterruptHandler
|
|
62
|
+
from bronx.system.memory import LinuxMemInfo
|
|
63
|
+
from bronx.system.numa import LibNumaNodesInfo
|
|
64
|
+
from vortex.gloves import Glove
|
|
65
|
+
from vortex.syntax.stdattrs import DelayedInit
|
|
66
|
+
from vortex.tools.compression import CompressionPipeline
|
|
67
|
+
from vortex.tools.env import Environment
|
|
68
|
+
from vortex.tools.net import AssistedSsh, AutoRetriesFtp, DEFAULT_FTP_PORT
|
|
69
|
+
from vortex.tools.net import FtpConnectionPool, LinuxNetstats, StdFtp
|
|
70
|
+
|
|
71
|
+
#: No automatic export
|
|
72
|
+
__all__ = []
|
|
73
|
+
|
|
74
|
+
logger = loggers.getLogger(__name__)
|
|
75
|
+
|
|
76
|
+
#: Pre-compiled regex to check a none str value
|
|
77
|
+
isnonedef = re.compile(r'\s*none\s*$', re.IGNORECASE)
|
|
78
|
+
|
|
79
|
+
#: Pre-compiled regex to check a boolean true str value
|
|
80
|
+
istruedef = re.compile(r'\s*(on|true|ok)\s*$', re.IGNORECASE)
|
|
81
|
+
|
|
82
|
+
#: Pre-compiled regex to check a boolean false str value
|
|
83
|
+
isfalsedef = re.compile(r'\s*(off|false|ko)\s*$', re.IGNORECASE)
|
|
84
|
+
|
|
85
|
+
#: Global lock to protect temporary locale changes
|
|
86
|
+
LOCALE_LOCK = threading.Lock()
|
|
87
|
+
|
|
88
|
+
_fmtshcmd_docbonus = """
|
|
89
|
+
|
|
90
|
+
This method is decorated by :func:`fmtshcmd`, consequently it accepts
|
|
91
|
+
an additional **fmt** attribute that might alter this method behaviour
|
|
92
|
+
(*i.e.* if a ``thefmt_{name:s}`` method exists (where ``thefmt`` is the
|
|
93
|
+
value of the **ftm** attribute), it will be executed instead of the
|
|
94
|
+
present one).
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
# Constant items
|
|
98
|
+
|
|
99
|
+
#: Definition of a named tuple ftpflavour
|
|
100
|
+
FtpFlavourTuple = namedtuple('FtpFlavourTuple', ['STD', 'RETRIES', 'CONNECTION_POOLS'])
|
|
101
|
+
|
|
102
|
+
#: Predefined FTP_FLAVOUR values IN, OUT and INOUT.
|
|
103
|
+
FTP_FLAVOUR = FtpFlavourTuple(STD=0, RETRIES=1, CONNECTION_POOLS=2)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@nicedeco_plusdoc(_fmtshcmd_docbonus)
|
|
107
|
+
def fmtshcmd(func):
|
|
108
|
+
"""This decorator gives a try to the equivalent formatted command.
|
|
109
|
+
|
|
110
|
+
Example: let ``decomethod`` be a method decorated with the present decorator,
|
|
111
|
+
if a user calls ``decomethod(..., fmt='toto')``, the decorator with look for
|
|
112
|
+
a method called ``toto_decomethod`` : if it exists, it will be used (otherwise,
|
|
113
|
+
the original method is used).
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
def formatted_method(self, *args, **kw):
|
|
117
|
+
fmt = kw.pop('fmt', None)
|
|
118
|
+
shtarget = self if isinstance(self, System) else self.sh
|
|
119
|
+
fmtcall = getattr(shtarget, str(fmt).lower() + '_' + func.__name__, func)
|
|
120
|
+
if getattr(fmtcall, 'func_extern', False):
|
|
121
|
+
return fmtcall(*args, **kw)
|
|
122
|
+
else:
|
|
123
|
+
return fmtcall(self, *args, **kw)
|
|
124
|
+
|
|
125
|
+
return formatted_method
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _kw2spawn(func):
|
|
129
|
+
"""This decorator justs update the docstring of a class...
|
|
130
|
+
|
|
131
|
+
It will state that all **kw** arguments will be passed directly to the
|
|
132
|
+
```spawn`` method.
|
|
133
|
+
|
|
134
|
+
(Because laziness is good and cut&paste is bad)
|
|
135
|
+
"""
|
|
136
|
+
func.__doc__ += """
|
|
137
|
+
|
|
138
|
+
At some point, all of the **kw** arguments will be passed directly to the
|
|
139
|
+
:meth:`spawn` method. Please see refer to the :meth:`spawn` method
|
|
140
|
+
documentation for more details.
|
|
141
|
+
"""
|
|
142
|
+
return func
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class ExecutionError(RuntimeError):
|
|
146
|
+
"""Go through exception for internal :meth:`OSExtended.spawn` errors."""
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class CopyTreeError(OSError):
|
|
151
|
+
"""An error raised during the recursive copy of a directory."""
|
|
152
|
+
pass
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class CdContext:
|
|
156
|
+
"""
|
|
157
|
+
Context manager for temporarily changing the working directory.
|
|
158
|
+
|
|
159
|
+
Returns to the initial directory, even when an exception is raised.
|
|
160
|
+
Has the syntax of the :meth:`~OSExtended.cd` call, and can be used through an :class:`OSExtended` object::
|
|
161
|
+
|
|
162
|
+
with sh.cdcontext(newpath, create=True):
|
|
163
|
+
# work in newpath
|
|
164
|
+
# back to the original path
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
def __init__(self, sh, newpath, create=False, clean_onexit=False):
|
|
168
|
+
self.sh = sh
|
|
169
|
+
self.create = create
|
|
170
|
+
self.clean_onexit = clean_onexit
|
|
171
|
+
self.newpath = self.sh.path.expanduser(newpath)
|
|
172
|
+
|
|
173
|
+
def __enter__(self):
|
|
174
|
+
if self.newpath not in ('', '.'):
|
|
175
|
+
self.oldpath = self.sh.getcwd()
|
|
176
|
+
self.sh.cd(self.newpath, create=self.create)
|
|
177
|
+
|
|
178
|
+
def __exit__(self, etype, value, traceback): # @UnusedVariable
|
|
179
|
+
if self.newpath not in ('', '.'):
|
|
180
|
+
self.sh.cd(self.oldpath)
|
|
181
|
+
if self.clean_onexit:
|
|
182
|
+
self.sh.rm(self.newpath)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
@contextlib.contextmanager
|
|
186
|
+
def NullContext():
|
|
187
|
+
"""A context that does nothing, but with a context's semantic."""
|
|
188
|
+
yield
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
@contextlib.contextmanager
|
|
192
|
+
def LocaleContext(category, localename=None, uselock=False):
|
|
193
|
+
"""Context used to locally change the Locale.
|
|
194
|
+
|
|
195
|
+
This is used like the :func:`~locale.setlocale` function::
|
|
196
|
+
|
|
197
|
+
with LocaleContext(locale.LC_TIME, 'fr_FR.UTF-8'):
|
|
198
|
+
strtime = date.now().strftime('%X')
|
|
199
|
+
|
|
200
|
+
The ``locale`` is changed at the process level ; to avoid conflicting changes
|
|
201
|
+
in a multithread context, use *with care* the additional ``uselock`` argument.
|
|
202
|
+
"""
|
|
203
|
+
lock = LOCALE_LOCK if uselock else NullContext()
|
|
204
|
+
with lock:
|
|
205
|
+
previous = locale.setlocale(category)
|
|
206
|
+
try:
|
|
207
|
+
yield locale.setlocale(category, localename)
|
|
208
|
+
finally:
|
|
209
|
+
locale.setlocale(category, previous)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
@functools.total_ordering
|
|
213
|
+
class PythonSimplifiedVersion:
|
|
214
|
+
"""
|
|
215
|
+
Type that holds a simplified representation of the Python's version
|
|
216
|
+
|
|
217
|
+
It provides basic comparison operators to determine if a given version is
|
|
218
|
+
more recent or not compared to another one.
|
|
219
|
+
|
|
220
|
+
It can be used in a footprint specification.
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
_VERSION_RE = re.compile(r'(\d+)\.(\d+)\.(\d+)')
|
|
224
|
+
|
|
225
|
+
def __init__(self, versionstr):
|
|
226
|
+
v_match = self._VERSION_RE.match(versionstr)
|
|
227
|
+
if v_match:
|
|
228
|
+
self._version = tuple([int(d) for d in v_match.groups()])
|
|
229
|
+
else:
|
|
230
|
+
raise ValueError('Malformed version string: {}'.format(versionstr))
|
|
231
|
+
|
|
232
|
+
@property
|
|
233
|
+
def version(self):
|
|
234
|
+
return self._version
|
|
235
|
+
|
|
236
|
+
def __hash__(self):
|
|
237
|
+
return hash(self.version)
|
|
238
|
+
|
|
239
|
+
def __eq__(self, other):
|
|
240
|
+
if not isinstance(other, self.__class__):
|
|
241
|
+
try:
|
|
242
|
+
other = self.__class__(other)
|
|
243
|
+
except (ValueError, TypeError):
|
|
244
|
+
return False
|
|
245
|
+
return self.version == other.version
|
|
246
|
+
|
|
247
|
+
def __gt__(self, other):
|
|
248
|
+
if not isinstance(other, self.__class__):
|
|
249
|
+
other = self.__class__(other)
|
|
250
|
+
return self.version > other.version
|
|
251
|
+
|
|
252
|
+
def __str__(self):
|
|
253
|
+
return '.'.join([str(d) for d in self.version])
|
|
254
|
+
|
|
255
|
+
def __repr__(self):
|
|
256
|
+
return '<{} | {!s}>'.format(object.__repr__(self).lstrip('<').rstrip('>'), self)
|
|
257
|
+
|
|
258
|
+
def export_dict(self):
|
|
259
|
+
"""The pure dict/json output is the raw integer"""
|
|
260
|
+
return str(self)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
class System(footprints.FootprintBase):
|
|
264
|
+
"""Abstract root class for any :class:`System` subclasses.
|
|
265
|
+
|
|
266
|
+
It contains basic generic methods and redefinition of some of the usual
|
|
267
|
+
Python's system methods.
|
|
268
|
+
"""
|
|
269
|
+
|
|
270
|
+
_abstract = True
|
|
271
|
+
_explicit = False
|
|
272
|
+
_collector = ('system',)
|
|
273
|
+
|
|
274
|
+
_footprint = dict(
|
|
275
|
+
info = 'Default system interface',
|
|
276
|
+
attr = dict(
|
|
277
|
+
hostname = dict(
|
|
278
|
+
info = "The computer's network name",
|
|
279
|
+
optional = True,
|
|
280
|
+
default = platform.node(),
|
|
281
|
+
alias = ('nodename',)
|
|
282
|
+
),
|
|
283
|
+
sysname = dict(
|
|
284
|
+
info = "The underlying system/OS name (e.g. Linux, Darwin, ...)",
|
|
285
|
+
optional = True,
|
|
286
|
+
default = platform.system(),
|
|
287
|
+
),
|
|
288
|
+
arch = dict(
|
|
289
|
+
info = "The underlying machine type (e.g. i386, x86_64, ...)",
|
|
290
|
+
optional = True,
|
|
291
|
+
default = platform.machine(),
|
|
292
|
+
alias = ('machine',)
|
|
293
|
+
),
|
|
294
|
+
release = dict(
|
|
295
|
+
info = "The underlying system's release, (e.g. 2.2.0, NT, ...)",
|
|
296
|
+
optional = True,
|
|
297
|
+
default = platform.release()
|
|
298
|
+
),
|
|
299
|
+
version = dict(
|
|
300
|
+
info = "The underlying system's release version",
|
|
301
|
+
optional = True,
|
|
302
|
+
default = platform.version()
|
|
303
|
+
),
|
|
304
|
+
python = dict(
|
|
305
|
+
info = "The Python's version (e.g 2.7.5)",
|
|
306
|
+
type = PythonSimplifiedVersion,
|
|
307
|
+
optional = True,
|
|
308
|
+
default = platform.python_version(),
|
|
309
|
+
),
|
|
310
|
+
glove = dict(
|
|
311
|
+
info = "The session's Glove object",
|
|
312
|
+
optional = True,
|
|
313
|
+
type = Glove,
|
|
314
|
+
)
|
|
315
|
+
)
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
def __init__(self, *args, **kw):
|
|
319
|
+
"""
|
|
320
|
+
In addition to footprint's attributes, the following attributes may be added:
|
|
321
|
+
|
|
322
|
+
* **prompt** - as a starting comment line in :meth:`title` like methods.
|
|
323
|
+
* **trace** - if *True* or *"log"* mimic ``set -x`` behaviour (default: *False*).
|
|
324
|
+
With trace="log", the information is sent through the logger.
|
|
325
|
+
* **timer** - time all the calls to external commands (default: *False*).
|
|
326
|
+
* **output** - as a default value for any external spawning command (default: *True*).
|
|
327
|
+
|
|
328
|
+
The following attributes are also picked from ``kw`` (by default the
|
|
329
|
+
usual Python's modules are used):
|
|
330
|
+
|
|
331
|
+
* **os** - as an alternative to :mod:`os`.
|
|
332
|
+
* **rlimit** - as an alternative to :mod:`resource`.
|
|
333
|
+
* **sh** or **shutil** - as an alternative to :mod:`shutil`.
|
|
334
|
+
|
|
335
|
+
**The proxy concept:**
|
|
336
|
+
|
|
337
|
+
The :class:`System` class acts as a proxy for the :mod:`os`, :mod:`resource`
|
|
338
|
+
and :mod:`shutil` modules. *i.e.* if a method or attribute
|
|
339
|
+
is not defined in the :class:`System` class, the :mod:`os`, :mod:`resource`
|
|
340
|
+
and :mod:`shutil` modules are looked-up (in turn): if one of them has
|
|
341
|
+
the desired attribute/method, it is returned.
|
|
342
|
+
|
|
343
|
+
Example: let ``sh`` be an object of class :class:`System`, calling
|
|
344
|
+
``sh.path.exists`` is equivalent to calling ``os.path.exists`` since
|
|
345
|
+
``path`` is not redefined in the :class:`System` class.
|
|
346
|
+
|
|
347
|
+
In vortex, it is mandatory to use the :class:`System` class (and not the
|
|
348
|
+
official Python modules) even for attributes/methods that are not
|
|
349
|
+
redefined. This is not pointless since, in the future, we may decide to
|
|
350
|
+
redefine a given attribute/method either globally or for a specific
|
|
351
|
+
architecture.
|
|
352
|
+
|
|
353
|
+
**Addons:**
|
|
354
|
+
|
|
355
|
+
Using the :meth:`extend` method, a :class:`System` object can be extended
|
|
356
|
+
by any object. This mechanism is used by classes deriving from
|
|
357
|
+
:class:`vortex.tools.addons.Addon`.
|
|
358
|
+
|
|
359
|
+
Example: let ``sh`` be an object of class :class:`System` and ``MyAddon``
|
|
360
|
+
a subclass of :class:`~vortex.tools.addons.Addon` (of kind 'myaddon') that
|
|
361
|
+
defines the ``greatstuff`` attribute; creating an object of class
|
|
362
|
+
``MyAddon`` using ``footprints.proxy.addon(kind='myaddon', shell=sh)``
|
|
363
|
+
will extend the ``sh`` with the ``greatstuff`` attribute (*e.g.* any
|
|
364
|
+
user will be able to call ``sh.greatstuff``).
|
|
365
|
+
|
|
366
|
+
"""
|
|
367
|
+
logger.debug('Abstract System init %s', self.__class__)
|
|
368
|
+
self.__dict__['_os'] = kw.pop('os', os)
|
|
369
|
+
self.__dict__['_rl'] = kw.pop('rlimit', resource)
|
|
370
|
+
self.__dict__['_sh'] = kw.pop('shutil', kw.pop('sh', shutil))
|
|
371
|
+
self.__dict__['_search'] = [self.__dict__['_os'], self.__dict__['_sh'], self.__dict__['_rl']]
|
|
372
|
+
self.__dict__['_xtrack'] = dict()
|
|
373
|
+
self.__dict__['_history'] = History(tag='shell')
|
|
374
|
+
self.__dict__['_rclast'] = 0
|
|
375
|
+
self.__dict__['prompt'] = str(kw.pop('prompt', ''))
|
|
376
|
+
for flag in ('trace', 'timer'):
|
|
377
|
+
self.__dict__[flag] = kw.pop(flag, False)
|
|
378
|
+
for flag in ('output',):
|
|
379
|
+
self.__dict__[flag] = kw.pop(flag, True)
|
|
380
|
+
super().__init__(*args, **kw)
|
|
381
|
+
|
|
382
|
+
@property
|
|
383
|
+
def realkind(self):
|
|
384
|
+
"""The object/class realkind."""
|
|
385
|
+
return 'system'
|
|
386
|
+
|
|
387
|
+
@property
|
|
388
|
+
def history(self):
|
|
389
|
+
"""The :class:`History` object associated with all :class:`System` objects."""
|
|
390
|
+
return self._history
|
|
391
|
+
|
|
392
|
+
@property
|
|
393
|
+
def rclast(self):
|
|
394
|
+
"""The last return-code (for external commands)."""
|
|
395
|
+
return self._rclast
|
|
396
|
+
|
|
397
|
+
@property
|
|
398
|
+
def search(self):
|
|
399
|
+
"""A list of Python's modules that are looked up when an attribute is not found.
|
|
400
|
+
|
|
401
|
+
At startup, mod:`os`, :mod:`resource` and :mod:`shutil` are looked up but
|
|
402
|
+
additional Addon classes may be added to this list (see the :meth:`extend`
|
|
403
|
+
method).
|
|
404
|
+
"""
|
|
405
|
+
return self._search
|
|
406
|
+
|
|
407
|
+
@property
|
|
408
|
+
def default_syslog(self):
|
|
409
|
+
"""Address to use in logging.handler.SysLogHandler()."""
|
|
410
|
+
return '/dev/log'
|
|
411
|
+
|
|
412
|
+
def extend(self, obj=None):
|
|
413
|
+
"""Extend the current external attribute resolution to **obj** (module or object)."""
|
|
414
|
+
if obj is not None:
|
|
415
|
+
if hasattr(obj, 'kind'):
|
|
416
|
+
for k, v in self._xtrack.items():
|
|
417
|
+
if hasattr(v, 'kind'):
|
|
418
|
+
if hasattr(self, k):
|
|
419
|
+
delattr(self, k)
|
|
420
|
+
for addon in self.search:
|
|
421
|
+
if hasattr(addon, 'kind') and addon.kind == obj.kind:
|
|
422
|
+
self.search.remove(addon)
|
|
423
|
+
self.search.append(obj)
|
|
424
|
+
return len(self.search)
|
|
425
|
+
|
|
426
|
+
def loaded_addons(self):
|
|
427
|
+
"""
|
|
428
|
+
Kind of all the loaded :class:`~vortex.tools.addons.Addon objects
|
|
429
|
+
(*i.e.* :class:`~vortex.tools.addons.Addon objects previously
|
|
430
|
+
loaded with the :meth:`extend` method).
|
|
431
|
+
"""
|
|
432
|
+
return [addon.kind for addon in self.search if hasattr(addon, 'kind')]
|
|
433
|
+
|
|
434
|
+
def external(self, key):
|
|
435
|
+
"""Return effective module object reference if any, or *None*."""
|
|
436
|
+
try:
|
|
437
|
+
getattr(self, key)
|
|
438
|
+
except AttributeError:
|
|
439
|
+
pass
|
|
440
|
+
return self._xtrack.get(key, None)
|
|
441
|
+
|
|
442
|
+
@secure_getattr
|
|
443
|
+
def __getattr__(self, key):
|
|
444
|
+
"""Gateway to undefined methods or attributes.
|
|
445
|
+
|
|
446
|
+
This is the place where the ``self.search`` list is looked for...
|
|
447
|
+
"""
|
|
448
|
+
actualattr = None
|
|
449
|
+
if key.startswith('_'):
|
|
450
|
+
# Do not attempt to look for hidden attributes
|
|
451
|
+
raise AttributeError('Method or attribute ' + key + ' not found')
|
|
452
|
+
for shxobj in self.search:
|
|
453
|
+
if hasattr(shxobj, key):
|
|
454
|
+
if isinstance(shxobj, footprints.FootprintBase) and shxobj.footprint_has_attribute(key):
|
|
455
|
+
# Ignore footprint attributes
|
|
456
|
+
continue
|
|
457
|
+
if actualattr is None:
|
|
458
|
+
actualattr = getattr(shxobj, key)
|
|
459
|
+
self._xtrack[key] = shxobj
|
|
460
|
+
else:
|
|
461
|
+
# Do not warn for a restricted list of keys
|
|
462
|
+
if key not in ('stat',):
|
|
463
|
+
logger.warning('System: duplicate entry while looking for key="%s". ' +
|
|
464
|
+
'First result in %s but also available in %s.',
|
|
465
|
+
key, self._xtrack[key], shxobj)
|
|
466
|
+
if actualattr is None:
|
|
467
|
+
raise AttributeError('Method or attribute ' + key + ' not found')
|
|
468
|
+
if callable(actualattr):
|
|
469
|
+
def osproxy(*args, **kw):
|
|
470
|
+
cmd = [key]
|
|
471
|
+
cmd.extend(args)
|
|
472
|
+
cmd.extend(['{:s}={:s}'.format(x, str(kw[x])) for x in kw.keys()])
|
|
473
|
+
self.stderr(*cmd)
|
|
474
|
+
return actualattr(*args, **kw)
|
|
475
|
+
|
|
476
|
+
osproxy.func_name = str(key)
|
|
477
|
+
osproxy.__name__ = str(key)
|
|
478
|
+
osproxy.func_doc = actualattr.__doc__
|
|
479
|
+
osproxy.func_extern = True
|
|
480
|
+
setattr(self, key, osproxy)
|
|
481
|
+
return osproxy
|
|
482
|
+
else:
|
|
483
|
+
return actualattr
|
|
484
|
+
|
|
485
|
+
def stderr(self, *args):
|
|
486
|
+
"""Write a formatted message to standard error (if ``self.trace == True``)."""
|
|
487
|
+
count, justnow, = self.history.append(*args)
|
|
488
|
+
if self.trace:
|
|
489
|
+
if self.trace == 'log':
|
|
490
|
+
logger.info('[sh:#%d] %s', count, ' '.join([str(x) for x in args]))
|
|
491
|
+
else:
|
|
492
|
+
sys.stderr.write(
|
|
493
|
+
"* [{:s}][{:d}] {:s}\n".format(
|
|
494
|
+
justnow.strftime('%Y/%m/%d-%H:%M:%S'), count,
|
|
495
|
+
' '.join([str(x) for x in args])
|
|
496
|
+
)
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
def flush_stdall(self):
|
|
500
|
+
"""Flush stdout and stderr."""
|
|
501
|
+
sys.stdout.flush()
|
|
502
|
+
sys.stderr.flush()
|
|
503
|
+
|
|
504
|
+
@contextlib.contextmanager
|
|
505
|
+
def mute_stderr(self):
|
|
506
|
+
oldtrace = self.trace
|
|
507
|
+
self.trace = False
|
|
508
|
+
try:
|
|
509
|
+
yield
|
|
510
|
+
finally:
|
|
511
|
+
self.trace = oldtrace
|
|
512
|
+
|
|
513
|
+
def echo(self, *args):
|
|
514
|
+
"""Joined **args** are echoed."""
|
|
515
|
+
print('>>>', ' '.join([str(arg) for arg in args]))
|
|
516
|
+
|
|
517
|
+
def title(self, textlist, tchar='=', autolen=96):
|
|
518
|
+
"""Formated title output.
|
|
519
|
+
|
|
520
|
+
:param list|str textlist: A list of strings that contains the title's text
|
|
521
|
+
:param str tchar: The character used to frame the title text
|
|
522
|
+
:param int autolen: The title width
|
|
523
|
+
"""
|
|
524
|
+
if isinstance(textlist, str):
|
|
525
|
+
textlist = (textlist,)
|
|
526
|
+
if autolen:
|
|
527
|
+
nbc = autolen
|
|
528
|
+
else:
|
|
529
|
+
nbc = max([len(text) for text in textlist])
|
|
530
|
+
print()
|
|
531
|
+
print(tchar * (nbc + 4))
|
|
532
|
+
for text in textlist:
|
|
533
|
+
print('{0:s} {1:^{size}s} {0:s}'.format(tchar, text.upper(), size=nbc))
|
|
534
|
+
print(tchar * (nbc + 4))
|
|
535
|
+
print()
|
|
536
|
+
self.flush_stdall()
|
|
537
|
+
|
|
538
|
+
def subtitle(self, text='', tchar='-', autolen=96):
|
|
539
|
+
"""Formatted subtitle output.
|
|
540
|
+
|
|
541
|
+
:param str text: The subtitle's text
|
|
542
|
+
:param str tchar: The character used to frame the title text
|
|
543
|
+
:param int autolen: The title width
|
|
544
|
+
"""
|
|
545
|
+
if autolen:
|
|
546
|
+
nbc = autolen
|
|
547
|
+
else:
|
|
548
|
+
nbc = len(text)
|
|
549
|
+
print()
|
|
550
|
+
print(tchar * (nbc + 4))
|
|
551
|
+
if text:
|
|
552
|
+
print('# {0:{size}s} #'.format(text, size=nbc))
|
|
553
|
+
print(tchar * (nbc + 4))
|
|
554
|
+
self.flush_stdall()
|
|
555
|
+
|
|
556
|
+
def header(self, text='', tchar='-', autolen=False, xline=True, prompt=None):
|
|
557
|
+
"""Formatted header output.
|
|
558
|
+
|
|
559
|
+
:param str text: The subtitle's text
|
|
560
|
+
:param str tchar: The character used to frame the title text
|
|
561
|
+
:param bool autolen: If True the header width will match the text width (10. otherwise)
|
|
562
|
+
:param bool xline: Adds a line of **tchar** after the header text
|
|
563
|
+
:param str prompt: A customised prompt (otherwise ``self.prompt`` is used)
|
|
564
|
+
"""
|
|
565
|
+
if autolen:
|
|
566
|
+
nbc = len(prompt + text) + 1
|
|
567
|
+
else:
|
|
568
|
+
nbc = 100
|
|
569
|
+
print()
|
|
570
|
+
print(tchar * nbc)
|
|
571
|
+
if text:
|
|
572
|
+
if not prompt:
|
|
573
|
+
prompt = self.prompt
|
|
574
|
+
if prompt:
|
|
575
|
+
prompt = str(prompt) + ' '
|
|
576
|
+
else:
|
|
577
|
+
prompt = ''
|
|
578
|
+
print(prompt + str(text))
|
|
579
|
+
if xline:
|
|
580
|
+
print(tchar * nbc)
|
|
581
|
+
self.flush_stdall()
|
|
582
|
+
|
|
583
|
+
def highlight(self, text='', hchar='----', bchar='#', bline=False, bline0=True):
|
|
584
|
+
"""Highlight some text.
|
|
585
|
+
|
|
586
|
+
:param str text: The text to be highlighted
|
|
587
|
+
:param str hchar: The characters used to frame the text
|
|
588
|
+
:param str bchar: The characters used at the beging
|
|
589
|
+
:param bool bline: Adds a blank line after
|
|
590
|
+
:param bool bline0: Adds a blank line before
|
|
591
|
+
"""
|
|
592
|
+
if bline0:
|
|
593
|
+
print()
|
|
594
|
+
print('{0:s} {1:s} {2:s} {1:s} {3:s}'
|
|
595
|
+
.format(bchar.rstrip(), hchar, text, bchar.lstrip()))
|
|
596
|
+
if bline:
|
|
597
|
+
print()
|
|
598
|
+
self.flush_stdall()
|
|
599
|
+
|
|
600
|
+
@property
|
|
601
|
+
def executable(self):
|
|
602
|
+
"""Return the actual ``sys.executable``."""
|
|
603
|
+
self.stderr('executable')
|
|
604
|
+
return sys.executable
|
|
605
|
+
|
|
606
|
+
def pythonpath(self, output=None):
|
|
607
|
+
"""Return or print actual ``sys.path``."""
|
|
608
|
+
if output is None:
|
|
609
|
+
output = self.output
|
|
610
|
+
self.stderr('pythonpath')
|
|
611
|
+
if output:
|
|
612
|
+
return sys.path[:]
|
|
613
|
+
else:
|
|
614
|
+
self.subtitle('Python PATH')
|
|
615
|
+
for pypath in sys.path:
|
|
616
|
+
print(pypath)
|
|
617
|
+
return True
|
|
618
|
+
|
|
619
|
+
@property
|
|
620
|
+
def env(self):
|
|
621
|
+
"""Returns the current active environment."""
|
|
622
|
+
return Environment.current()
|
|
623
|
+
|
|
624
|
+
def guess_job_identifier(self):
|
|
625
|
+
"""Try to determine an identification string for the current script."""
|
|
626
|
+
# PBS scheduler SLURM scheduler Good-old PID
|
|
627
|
+
env = self.env
|
|
628
|
+
label = env.PBS_JOBID or env.SLURM_JOB_ID or 'localpid'
|
|
629
|
+
if label == 'localpid':
|
|
630
|
+
label = str(self.getpid())
|
|
631
|
+
return label
|
|
632
|
+
|
|
633
|
+
def vortex_modules(self, only='.'):
|
|
634
|
+
"""Return a filtered list of modules in the vortex package.
|
|
635
|
+
|
|
636
|
+
:param str only: The regex used to filter the modules list.
|
|
637
|
+
"""
|
|
638
|
+
if self.glove is not None:
|
|
639
|
+
g = self.glove
|
|
640
|
+
mfiles = [
|
|
641
|
+
re.sub(r'^' + mroot + r'/', '', x)
|
|
642
|
+
for mroot in (g.siteroot + '/src', g.siteroot + '/site')
|
|
643
|
+
for x in self.ffind(mroot)
|
|
644
|
+
if self.path.isfile(self.path.join(self.path.dirname(x), '__init__.py'))
|
|
645
|
+
]
|
|
646
|
+
return [
|
|
647
|
+
re.sub(r'(?:/__init__)?\.py$', '', x).replace('/', '.')
|
|
648
|
+
for x in mfiles
|
|
649
|
+
if (not x.startswith('.') and
|
|
650
|
+
re.search(only, x, re.IGNORECASE) and
|
|
651
|
+
x.endswith('.py'))
|
|
652
|
+
]
|
|
653
|
+
else:
|
|
654
|
+
raise RuntimeError("A glove must be defined")
|
|
655
|
+
|
|
656
|
+
def vortex_loaded_modules(self, only='.', output=None):
|
|
657
|
+
"""Check loaded modules, producing either a dump or a list of tuple (status, modulename).
|
|
658
|
+
|
|
659
|
+
:param str only: The regex used to filter the modules list.
|
|
660
|
+
"""
|
|
661
|
+
checklist = list()
|
|
662
|
+
if output is None:
|
|
663
|
+
output = self.output
|
|
664
|
+
for modname in self.vortex_modules(only):
|
|
665
|
+
checklist.append((modname, modname in sys.modules))
|
|
666
|
+
if not output:
|
|
667
|
+
for m, s in checklist:
|
|
668
|
+
print(str(s).ljust(8), m)
|
|
669
|
+
print('--')
|
|
670
|
+
return True
|
|
671
|
+
else:
|
|
672
|
+
return checklist
|
|
673
|
+
|
|
674
|
+
def systems_reload(self):
|
|
675
|
+
"""Load extra systems modules not yet loaded."""
|
|
676
|
+
extras = list()
|
|
677
|
+
for modname in self.vortex_modules(only='systems'):
|
|
678
|
+
if modname not in sys.modules:
|
|
679
|
+
try:
|
|
680
|
+
self.import_module(modname)
|
|
681
|
+
extras.append(modname)
|
|
682
|
+
except ValueError as err:
|
|
683
|
+
logger.critical('systems_reload: cannot import module %s (%s)', modname, str(err))
|
|
684
|
+
return extras
|
|
685
|
+
|
|
686
|
+
# Redefinition of methods of the resource package...
|
|
687
|
+
|
|
688
|
+
def numrlimit(self, r_id):
|
|
689
|
+
"""
|
|
690
|
+
Convert actual resource id (**r_id**) in some acceptable *int* for the
|
|
691
|
+
:mod:`resource` module.
|
|
692
|
+
"""
|
|
693
|
+
if type(r_id) is not int:
|
|
694
|
+
r_id = r_id.upper()
|
|
695
|
+
if not r_id.startswith('RLIMIT_'):
|
|
696
|
+
r_id = 'RLIMIT_' + r_id
|
|
697
|
+
r_id = getattr(self._rl, r_id, None)
|
|
698
|
+
if r_id is None:
|
|
699
|
+
raise ValueError('Invalid resource specified')
|
|
700
|
+
return r_id
|
|
701
|
+
|
|
702
|
+
def setrlimit(self, r_id, r_limits):
|
|
703
|
+
"""Proxy to :mod:`resource` function of the same name."""
|
|
704
|
+
self.stderr('setrlimit', r_id, r_limits)
|
|
705
|
+
try:
|
|
706
|
+
r_limits = tuple(r_limits)
|
|
707
|
+
except TypeError:
|
|
708
|
+
r_limits = (r_limits, r_limits)
|
|
709
|
+
return self._rl.setrlimit(self.numrlimit(r_id), r_limits)
|
|
710
|
+
|
|
711
|
+
def getrlimit(self, r_id):
|
|
712
|
+
"""Proxy to :mod:`resource` function of the same name."""
|
|
713
|
+
self.stderr('getrlimit', r_id)
|
|
714
|
+
return self._rl.getrlimit(self.numrlimit(r_id))
|
|
715
|
+
|
|
716
|
+
def getrusage(self, pid=None):
|
|
717
|
+
"""Proxy to :mod:`resource` function of the same name with current process as defaut."""
|
|
718
|
+
if pid is None:
|
|
719
|
+
pid = self._rl.RUSAGE_SELF
|
|
720
|
+
self.stderr('getrusage', pid)
|
|
721
|
+
return self._rl.getrusage(pid)
|
|
722
|
+
|
|
723
|
+
def import_module(self, modname):
|
|
724
|
+
"""Import the module named **modname** with :mod:`importlib` package."""
|
|
725
|
+
importlib.import_module(modname)
|
|
726
|
+
return sys.modules.get(modname)
|
|
727
|
+
|
|
728
|
+
def import_function(self, funcname):
|
|
729
|
+
"""Import the function named **funcname** qualified by a proper module name package."""
|
|
730
|
+
thisfunc = None
|
|
731
|
+
if '.' in funcname:
|
|
732
|
+
thismod = self.import_module('.'.join(funcname.split('.')[:-1]))
|
|
733
|
+
if thismod:
|
|
734
|
+
thisfunc = getattr(thismod, funcname.split('.')[-1], None)
|
|
735
|
+
else:
|
|
736
|
+
logger.error('Bad function path name <%s>' % funcname)
|
|
737
|
+
return thisfunc
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
class OSExtended(System):
|
|
741
|
+
"""Abstract extended base system.
|
|
742
|
+
|
|
743
|
+
It contains many useful Vortex's additions to the usual Python's shell.
|
|
744
|
+
"""
|
|
745
|
+
|
|
746
|
+
_abstract = True
|
|
747
|
+
_footprint = dict(
|
|
748
|
+
info = 'Abstract extended base system'
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
def __init__(self, *args, **kw):
|
|
752
|
+
"""
|
|
753
|
+
Before going through parent initialisation (see :class:`System`),
|
|
754
|
+
pickle these attributes:
|
|
755
|
+
|
|
756
|
+
* **rmtreemin** - as the minimal depth needed for a :meth:`rmsafe`.
|
|
757
|
+
* **cmpaftercp** - as a boolean for activating full comparison after plain cp (default: *True*).
|
|
758
|
+
* **ftraw** - allows ``smartft*`` methods to use the raw FTP commands
|
|
759
|
+
(e.g. ftget, ftput) instead of the internal Vortex's FTP client
|
|
760
|
+
(default: *False*).
|
|
761
|
+
* **ftputcmd** - The name of the raw FTP command for the "put" action
|
|
762
|
+
(default: ftput).
|
|
763
|
+
* **ftgetcmd** - The name of the raw FTP command for the "get" action
|
|
764
|
+
(default: ftget).
|
|
765
|
+
* **ftpflavour** - The default Vortex's FTP client behaviour
|
|
766
|
+
(default: `FTP_FLAVOUR.CONNECTION_POOLS`). See the :meth:`ftp` method
|
|
767
|
+
for more details.
|
|
768
|
+
"""
|
|
769
|
+
logger.debug('Abstract System init %s', self.__class__)
|
|
770
|
+
self._rmtreemin = kw.pop('rmtreemin', 3)
|
|
771
|
+
self._cmpaftercp = kw.pop('cmpaftercp', True)
|
|
772
|
+
# Switches for rawft* methods
|
|
773
|
+
self._ftraw = kw.pop('ftraw', None)
|
|
774
|
+
self.ftputcmd = kw.pop('ftputcmd', None)
|
|
775
|
+
self.ftgetcmd = kw.pop('ftgetcmd', None)
|
|
776
|
+
# FTP stuff again
|
|
777
|
+
self.ftpflavour = kw.pop('ftpflavour', FTP_FLAVOUR.CONNECTION_POOLS)
|
|
778
|
+
self._current_ftppool = None
|
|
779
|
+
# Some internal variables used by particular methods
|
|
780
|
+
self._ftspool_cache = None
|
|
781
|
+
self._frozen_target = None
|
|
782
|
+
# Hardlinks behaviour...
|
|
783
|
+
self.allow_cross_users_links = True
|
|
784
|
+
# Go for the superclass' constructor
|
|
785
|
+
super().__init__(*args, **kw)
|
|
786
|
+
# Initialise possibly missing objects
|
|
787
|
+
self.__dict__['_cpusinfo'] = None
|
|
788
|
+
self.__dict__['_numainfo'] = None
|
|
789
|
+
self.__dict__['_memoryinfo'] = None
|
|
790
|
+
self.__dict__['_netstatsinfo'] = None
|
|
791
|
+
|
|
792
|
+
# Initialise the signal handler object
|
|
793
|
+
self._signal_intercept_init()
|
|
794
|
+
|
|
795
|
+
@property
|
|
796
|
+
def ftraw(self):
|
|
797
|
+
"""Use the system's FTP service (e.g. ftserv)."""
|
|
798
|
+
if self._ftraw is None:
|
|
799
|
+
return self.default_target.ftraw_default
|
|
800
|
+
else:
|
|
801
|
+
return self._ftraw
|
|
802
|
+
|
|
803
|
+
@ftraw.setter
|
|
804
|
+
def ftraw(self, value):
|
|
805
|
+
"""Use the system's FTP service (e.g. ftserv)."""
|
|
806
|
+
self._ftraw = bool(value)
|
|
807
|
+
|
|
808
|
+
@ftraw.deleter
|
|
809
|
+
def ftraw(self):
|
|
810
|
+
"""Use the system's FTP service (e.g. ftserv)."""
|
|
811
|
+
self._ftraw = None
|
|
812
|
+
|
|
813
|
+
def target(self, **kw):
|
|
814
|
+
"""
|
|
815
|
+
Provide a default :class:`~vortex.tools.targets.Target` according
|
|
816
|
+
to System's own attributes.
|
|
817
|
+
|
|
818
|
+
* Extra or alternative attributes may still be provided using **kw**.
|
|
819
|
+
* The object returned by this method will be used in subsequent calls
|
|
820
|
+
to ::attr:`default_target` (this is the concept of frozen target).
|
|
821
|
+
"""
|
|
822
|
+
desc = dict(
|
|
823
|
+
hostname=self.hostname,
|
|
824
|
+
sysname=self.sysname
|
|
825
|
+
)
|
|
826
|
+
desc.update(kw)
|
|
827
|
+
self._frozen_target = footprints.proxy.targets.default(**desc)
|
|
828
|
+
return self._frozen_target
|
|
829
|
+
|
|
830
|
+
@property
|
|
831
|
+
def default_target(self):
|
|
832
|
+
"""Return the latest frozen target."""
|
|
833
|
+
return DelayedInit(self._frozen_target, self.target)
|
|
834
|
+
|
|
835
|
+
def fmtspecific_mtd(self, method, fmt):
|
|
836
|
+
"""Check if a format specific implementation is available for a given format."""
|
|
837
|
+
return hasattr(self, '{:s}_{:s}'.format(fmt, method))
|
|
838
|
+
|
|
839
|
+
def popen(self, args, stdin=None, stdout=None, stderr=None, shell=False,
|
|
840
|
+
output=False, bufsize=0): # @UnusedVariable
|
|
841
|
+
"""Return an open pipe for the **args** command.
|
|
842
|
+
|
|
843
|
+
:param str|list args: The command (+ its command-line arguments) to be
|
|
844
|
+
executed. When **shell** is *False* it should be a list. When **shell**
|
|
845
|
+
is *True*, it should be a string.
|
|
846
|
+
:param stdin: Specify the input stream characteristics:
|
|
847
|
+
|
|
848
|
+
* If *None*, the standard input stream will be opened.
|
|
849
|
+
* If *True*, a pipe is created and data may be sent to the process
|
|
850
|
+
using the :meth:`~subprocess.Popen.communicate` method of the
|
|
851
|
+
returned object.
|
|
852
|
+
* If a File-like object is provided, it will be used as an input stream
|
|
853
|
+
|
|
854
|
+
:param stdout: Specify the output stream characteristics:
|
|
855
|
+
|
|
856
|
+
* If *None*, the standard output stream is used.
|
|
857
|
+
* If *True*, a pipe is created and standard outputs may be retrieved
|
|
858
|
+
using the :meth:`~subprocess.Popen.communicate` method of the
|
|
859
|
+
returned object.
|
|
860
|
+
* If a File-like object is provided, standard outputs will be written there.
|
|
861
|
+
|
|
862
|
+
:param stderr: Specify the error stream characteristics:
|
|
863
|
+
|
|
864
|
+
* If *None*, the standard error stream is used.
|
|
865
|
+
* If *True*, a pipe is created and standard errors may be retrieved
|
|
866
|
+
using the :meth:`~subprocess.Popen.communicate` method of the
|
|
867
|
+
returned object.
|
|
868
|
+
* If a File-like object is provided, standard errors will be written there.
|
|
869
|
+
|
|
870
|
+
:param bool shell: If *True*, the specified command will be executed
|
|
871
|
+
through the system shell. (It is usually considered a security hazard:
|
|
872
|
+
see the :mod:`subprocess` documentation for more details).
|
|
873
|
+
:param bool output: unused (kept for backward compatibility).
|
|
874
|
+
:param int bufsize: The default buffer size for new pipes (``0`` means unbuffered)
|
|
875
|
+
:return subprocess.Popen: A Python's :class:`~subprocess.Popen` object
|
|
876
|
+
handling the process.
|
|
877
|
+
"""
|
|
878
|
+
self.stderr(*args)
|
|
879
|
+
if stdout is True:
|
|
880
|
+
stdout = subprocess.PIPE
|
|
881
|
+
if stdin is True:
|
|
882
|
+
stdin = subprocess.PIPE
|
|
883
|
+
if stderr is True:
|
|
884
|
+
stderr = subprocess.PIPE
|
|
885
|
+
return subprocess.Popen(args, bufsize=bufsize, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell)
|
|
886
|
+
|
|
887
|
+
def pclose(self, p, ok=None):
|
|
888
|
+
"""Do its best to nicely shutdown the process started by **p**.
|
|
889
|
+
|
|
890
|
+
:param subprocess.Popen p: The process to be shutdown
|
|
891
|
+
:param list[int] ok: The shell return codes considered as successful
|
|
892
|
+
(if *None*, only 0 is considered successful)
|
|
893
|
+
:return bool: Returns *True* if the process return code is within the
|
|
894
|
+
**ok** list.
|
|
895
|
+
"""
|
|
896
|
+
if p.stdin is not None:
|
|
897
|
+
p.stdin.close()
|
|
898
|
+
p.wait()
|
|
899
|
+
if p.stdout is not None:
|
|
900
|
+
p.stdout.close()
|
|
901
|
+
if p.stderr is not None:
|
|
902
|
+
p.stderr.close()
|
|
903
|
+
|
|
904
|
+
try:
|
|
905
|
+
p.terminate()
|
|
906
|
+
except OSError as e:
|
|
907
|
+
if e.errno == 3:
|
|
908
|
+
logger.debug('Processus %s alreaded terminated.' % str(p))
|
|
909
|
+
else:
|
|
910
|
+
raise
|
|
911
|
+
|
|
912
|
+
self._rclast = p.returncode
|
|
913
|
+
if ok is None:
|
|
914
|
+
ok = [0]
|
|
915
|
+
if p.returncode in ok:
|
|
916
|
+
return True
|
|
917
|
+
else:
|
|
918
|
+
return False
|
|
919
|
+
|
|
920
|
+
def spawn(self, args, ok=None, shell=False, stdin=None, output=None,
|
|
921
|
+
outmode='a+b', outsplit=True, silent=False, fatal=True,
|
|
922
|
+
taskset=None, taskset_id=0, taskset_bsize=1):
|
|
923
|
+
"""Subprocess call of **args**.
|
|
924
|
+
|
|
925
|
+
:param str|list[str] args: The command (+ its command-line arguments) to be
|
|
926
|
+
executed. When **shell** is *False* it should be a list. When **shell**
|
|
927
|
+
is *True*, it should be a string.
|
|
928
|
+
:param list[int] ok: The shell return codes considered as successful
|
|
929
|
+
(if *None*, only 0 is considered successful)
|
|
930
|
+
:param bool shell: If *True*, the specified command will be executed
|
|
931
|
+
through the system shell. (It is usually considered a security hazard:
|
|
932
|
+
see the :mod:`subprocess` documentation for more details).
|
|
933
|
+
:param stdin: Specify the input stream characteristics:
|
|
934
|
+
|
|
935
|
+
* If *None*, the standard input stream will be opened.
|
|
936
|
+
* If *True*, no standard input will be sent.
|
|
937
|
+
* If a File-like object is provided, it will be used as an input stream.
|
|
938
|
+
|
|
939
|
+
:param output: Specify the standard and error stream characteristics:
|
|
940
|
+
|
|
941
|
+
* If *None*, ``self.output`` (that defaults to *True*) will be used.
|
|
942
|
+
* If *True*, *stderr* and *stdout* will be captured and *stdout*
|
|
943
|
+
will be returned by the method if the execution goes well
|
|
944
|
+
according to the **ok** list. (see the **outsplit** argument).
|
|
945
|
+
* If *False*, the standard output and error streams will be used.
|
|
946
|
+
* If a File-like object is provided, outputs will be written there.
|
|
947
|
+
* If a string is provided, it's considered to be a filename. The
|
|
948
|
+
file will be opened (see the **outmode** argument) and used to
|
|
949
|
+
redirect *stdout* and *stderr*
|
|
950
|
+
|
|
951
|
+
:param str outmode: The open mode of the file output file
|
|
952
|
+
(meaningful only when **output** is a filename).
|
|
953
|
+
:param bools outsplit: It *True*, the captured standard output will be split
|
|
954
|
+
on line-breaks and a list of lines will be returned (with all the
|
|
955
|
+
line-breaks stripped out). Otherwise, the raw standard output text
|
|
956
|
+
is returned. (meaningful only when **output** is *True*).
|
|
957
|
+
:param bool silent: If *True*, in case a bad return-code is encountered
|
|
958
|
+
(according to the **ok** list), the standard error strem is not printed
|
|
959
|
+
out.
|
|
960
|
+
:param bool fatal: It *True*, exceptions will be raised in case of failures
|
|
961
|
+
(more precisely, if a bad return-code is detected (according to the
|
|
962
|
+
**ok** list), an :class:`ExecutionError` is raised). Otherwise, the
|
|
963
|
+
method just returns *False*.
|
|
964
|
+
:param str taskset: If *None*, process will not be binded to a specific
|
|
965
|
+
CPU core (this is usually what we want). Otherwise, **taskset** can be
|
|
966
|
+
a string describing the wanted *topology* and the *method* used to
|
|
967
|
+
bind the process (the string should looks like ``topology_method``).
|
|
968
|
+
(see the :meth:`cpus_affinity_get` method and the
|
|
969
|
+
:mod:`bronx.system.cpus` module for more details).
|
|
970
|
+
:param int taskset_id: The task id for this process
|
|
971
|
+
:param int taskset_bsize: The number of CPU that will be used (usually 1,
|
|
972
|
+
but possibly more when using threaded programs).
|
|
973
|
+
:note: When a signal is caught by the Python script, the TERM signal is
|
|
974
|
+
sent to the spawned process and then the signal Exception is re-raised
|
|
975
|
+
(the **fatal** argument has no effect on that).
|
|
976
|
+
:note: If **output** = True, the results is a Unicode string decoded
|
|
977
|
+
assuming the **locale.getpreferredencoding(False)**
|
|
978
|
+
encoding.
|
|
979
|
+
"""
|
|
980
|
+
rc = False
|
|
981
|
+
if ok is None:
|
|
982
|
+
ok = [0]
|
|
983
|
+
if output is None:
|
|
984
|
+
output = self.output
|
|
985
|
+
if stdin is True:
|
|
986
|
+
stdin = subprocess.PIPE
|
|
987
|
+
localenv = self._os.environ.copy()
|
|
988
|
+
if taskset is not None:
|
|
989
|
+
taskset_def = taskset.split('_')
|
|
990
|
+
taskset, taskset_cmd, taskset_env = self.cpus_affinity_get(taskset_id,
|
|
991
|
+
taskset_bsize,
|
|
992
|
+
*taskset_def)
|
|
993
|
+
if taskset:
|
|
994
|
+
localenv.update(taskset_env)
|
|
995
|
+
else:
|
|
996
|
+
logger.warning("CPU binding is not available on this platform")
|
|
997
|
+
if isinstance(args, str):
|
|
998
|
+
if taskset:
|
|
999
|
+
args = taskset_cmd + ' ' + args
|
|
1000
|
+
if self.timer:
|
|
1001
|
+
args = 'time ' + args
|
|
1002
|
+
self.stderr(args)
|
|
1003
|
+
else:
|
|
1004
|
+
if taskset:
|
|
1005
|
+
args[:0] = taskset_cmd
|
|
1006
|
+
if self.timer:
|
|
1007
|
+
args[:0] = ['time']
|
|
1008
|
+
self.stderr(*args)
|
|
1009
|
+
if isinstance(output, bool):
|
|
1010
|
+
if output:
|
|
1011
|
+
cmdout, cmderr = subprocess.PIPE, subprocess.PIPE
|
|
1012
|
+
else:
|
|
1013
|
+
cmdout, cmderr = None, None
|
|
1014
|
+
else:
|
|
1015
|
+
if isinstance(output, str):
|
|
1016
|
+
output = open(output, outmode)
|
|
1017
|
+
cmdout, cmderr = output, output
|
|
1018
|
+
p = None
|
|
1019
|
+
try:
|
|
1020
|
+
p = subprocess.Popen(args, stdin=stdin, stdout=cmdout, stderr=cmderr,
|
|
1021
|
+
shell=shell, env=localenv)
|
|
1022
|
+
p_out, p_err = p.communicate()
|
|
1023
|
+
except ValueError as e:
|
|
1024
|
+
logger.critical(
|
|
1025
|
+
'Weird arguments to Popen ({!s}, stdout={!s}, stderr={!s}, shell={!s})'.format(
|
|
1026
|
+
args, cmdout, cmderr, shell
|
|
1027
|
+
)
|
|
1028
|
+
)
|
|
1029
|
+
logger.critical('Caught exception: %s', e)
|
|
1030
|
+
if fatal:
|
|
1031
|
+
raise
|
|
1032
|
+
else:
|
|
1033
|
+
logger.warning('Carry on because fatal is off')
|
|
1034
|
+
except OSError:
|
|
1035
|
+
logger.critical('Could not call %s', str(args))
|
|
1036
|
+
if fatal:
|
|
1037
|
+
raise
|
|
1038
|
+
else:
|
|
1039
|
+
logger.warning('Carry on because fatal is off')
|
|
1040
|
+
except Exception as perr:
|
|
1041
|
+
logger.critical('System returns %s', str(perr))
|
|
1042
|
+
if fatal:
|
|
1043
|
+
raise RuntimeError('System {!s} spawned {!s} got [{!s}]: {!s}'
|
|
1044
|
+
.format(self, args, p.returncode, perr))
|
|
1045
|
+
else:
|
|
1046
|
+
logger.warning('Carry on because fatal is off')
|
|
1047
|
+
except (SignalInterruptError, KeyboardInterrupt) as perr:
|
|
1048
|
+
logger.critical('The python process was killed: %s. Trying to terminate the subprocess.',
|
|
1049
|
+
str(perr))
|
|
1050
|
+
if p:
|
|
1051
|
+
if shell:
|
|
1052
|
+
# Kill the process group: apparently it's the only way when shell=T
|
|
1053
|
+
self.killpg(self.getpgid(p.pid), signal.SIGTERM)
|
|
1054
|
+
else:
|
|
1055
|
+
p.terminate()
|
|
1056
|
+
p.wait()
|
|
1057
|
+
raise # Fatal has no effect on that !
|
|
1058
|
+
else:
|
|
1059
|
+
plocale = locale.getlocale()[1] or 'ascii'
|
|
1060
|
+
if p.returncode in ok:
|
|
1061
|
+
if isinstance(output, bool) and output:
|
|
1062
|
+
rc = p_out.decode(plocale, 'replace')
|
|
1063
|
+
if outsplit:
|
|
1064
|
+
rc = rc.rstrip('\n').split('\n')
|
|
1065
|
+
p.stdout.close()
|
|
1066
|
+
else:
|
|
1067
|
+
rc = not bool(p.returncode)
|
|
1068
|
+
else:
|
|
1069
|
+
if not silent:
|
|
1070
|
+
logger.warning('Bad return code [%d] for %s', p.returncode, str(args))
|
|
1071
|
+
if isinstance(output, bool) and output:
|
|
1072
|
+
sys.stderr.write(p_err.decode(plocale, 'replace'))
|
|
1073
|
+
if fatal:
|
|
1074
|
+
raise ExecutionError()
|
|
1075
|
+
else:
|
|
1076
|
+
logger.warning('Carry on because fatal is off')
|
|
1077
|
+
finally:
|
|
1078
|
+
self._rclast = p.returncode if p else 1
|
|
1079
|
+
if isinstance(output, bool) and p:
|
|
1080
|
+
if output:
|
|
1081
|
+
if p.stdout:
|
|
1082
|
+
p.stdout.close()
|
|
1083
|
+
if p.stderr:
|
|
1084
|
+
p.stderr.close()
|
|
1085
|
+
elif isinstance(output, str):
|
|
1086
|
+
output.close()
|
|
1087
|
+
elif isinstance(output, io.IOBase):
|
|
1088
|
+
output.flush()
|
|
1089
|
+
del p
|
|
1090
|
+
|
|
1091
|
+
return rc
|
|
1092
|
+
|
|
1093
|
+
def getlogname(self):
|
|
1094
|
+
"""Be sure to get the actual login name."""
|
|
1095
|
+
return passwd.getpwuid(self._os.getuid())[0]
|
|
1096
|
+
|
|
1097
|
+
def getfqdn(self, name=None):
|
|
1098
|
+
"""
|
|
1099
|
+
Return a fully qualified domain name for **name**. Default is to
|
|
1100
|
+
check for current *hostname**
|
|
1101
|
+
"""
|
|
1102
|
+
if name is None:
|
|
1103
|
+
name = self.default_target.inetname
|
|
1104
|
+
return socket.getfqdn(name)
|
|
1105
|
+
|
|
1106
|
+
def pwd(self, output=None):
|
|
1107
|
+
"""Current working directory."""
|
|
1108
|
+
if output is None:
|
|
1109
|
+
output = self.output
|
|
1110
|
+
self.stderr('pwd')
|
|
1111
|
+
try:
|
|
1112
|
+
realpwd = self._os.getcwd()
|
|
1113
|
+
except OSError as e:
|
|
1114
|
+
logger.error('getcwdu failed: %s.', str(e))
|
|
1115
|
+
return None
|
|
1116
|
+
if output:
|
|
1117
|
+
return realpwd
|
|
1118
|
+
else:
|
|
1119
|
+
print(realpwd)
|
|
1120
|
+
return True
|
|
1121
|
+
|
|
1122
|
+
def cd(self, pathtogo, create=False):
|
|
1123
|
+
"""Change the current working directory to **pathtogo**."""
|
|
1124
|
+
pathtogo = self.path.expanduser(pathtogo)
|
|
1125
|
+
self.stderr('cd', pathtogo, create)
|
|
1126
|
+
if create:
|
|
1127
|
+
self.mkdir(pathtogo)
|
|
1128
|
+
self._os.chdir(pathtogo)
|
|
1129
|
+
return True
|
|
1130
|
+
|
|
1131
|
+
def cdcontext(self, path, create=False, clean_onexit=False):
|
|
1132
|
+
"""
|
|
1133
|
+
Returns a new :class:`CdContext` context manager initialised with the
|
|
1134
|
+
**path**, **create** and **clean_onexit** arguments.
|
|
1135
|
+
"""
|
|
1136
|
+
return CdContext(self, path, create, clean_onexit)
|
|
1137
|
+
|
|
1138
|
+
@contextlib.contextmanager
|
|
1139
|
+
def temporary_dir_context(self, suffix=None, prefix=None, dir=None):
|
|
1140
|
+
"""Creates a temporary directory and remove it when exiting the context.
|
|
1141
|
+
|
|
1142
|
+
:param suffix: The temporary directory name will end with that suffix
|
|
1143
|
+
:param prefix: The temporary directory name will start with that suffix
|
|
1144
|
+
:param dir: The temporary directory will be created in that directory
|
|
1145
|
+
"""
|
|
1146
|
+
self.stderr('temporary_dir_context starts', suffix)
|
|
1147
|
+
self.stderr('tempfile.TemporaryDirectory', suffix, prefix, dir)
|
|
1148
|
+
with tempfile.TemporaryDirectory(suffix=suffix, prefix=prefix, dir=dir) as tmp_dir:
|
|
1149
|
+
yield tmp_dir
|
|
1150
|
+
self.stderr('tempfile.TemporaryDirectory cleanup', tmp_dir)
|
|
1151
|
+
|
|
1152
|
+
@contextlib.contextmanager
|
|
1153
|
+
def temporary_dir_cdcontext(self, suffix=None, prefix=None, dir=None):
|
|
1154
|
+
"""Change to a temporary directory, and remove it when exiting the context.
|
|
1155
|
+
|
|
1156
|
+
For a description of the context's arguments, see :func:`temporary_dir_context`.
|
|
1157
|
+
"""
|
|
1158
|
+
with self.temporary_dir_context(suffix=suffix, prefix=prefix, dir=dir) as tmp_dir:
|
|
1159
|
+
with self.cdcontext(tmp_dir, create=False, clean_onexit=False):
|
|
1160
|
+
yield tmp_dir
|
|
1161
|
+
|
|
1162
|
+
def ffind(self, *args):
|
|
1163
|
+
"""Recursive file find. Arguments are starting paths."""
|
|
1164
|
+
if not args:
|
|
1165
|
+
args = ['*']
|
|
1166
|
+
else:
|
|
1167
|
+
args = [self.path.expanduser(x) for x in args]
|
|
1168
|
+
files = []
|
|
1169
|
+
self.stderr('ffind', *args)
|
|
1170
|
+
for pathtogo in self.glob(*args):
|
|
1171
|
+
if self.path.isfile(pathtogo):
|
|
1172
|
+
files.append(pathtogo)
|
|
1173
|
+
else:
|
|
1174
|
+
for root, u_dirs, filenames in self._os.walk(pathtogo): # @UnusedVariable
|
|
1175
|
+
files.extend([self.path.join(root, f) for f in filenames])
|
|
1176
|
+
return sorted(files)
|
|
1177
|
+
|
|
1178
|
+
def xperm(self, filename, force=False):
|
|
1179
|
+
"""Return whether a **filename** exists and is executable or not.
|
|
1180
|
+
|
|
1181
|
+
If **force** is set to *True*, the file's permission will be modified
|
|
1182
|
+
so that the file becomes executable.
|
|
1183
|
+
"""
|
|
1184
|
+
if self._os.path.exists(filename):
|
|
1185
|
+
is_x = bool(self._os.stat(filename).st_mode & 1)
|
|
1186
|
+
if not is_x and force:
|
|
1187
|
+
self.chmod(filename,
|
|
1188
|
+
self._os.stat(filename).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
|
|
1189
|
+
is_x = True
|
|
1190
|
+
return is_x
|
|
1191
|
+
else:
|
|
1192
|
+
return False
|
|
1193
|
+
|
|
1194
|
+
def rperm(self, filename, force=False):
|
|
1195
|
+
"""Return whether a **filename** exists and is readable by all or not.
|
|
1196
|
+
|
|
1197
|
+
If **force** is set to *True*, the file's permission will be modified
|
|
1198
|
+
so that the file becomes readable for all.
|
|
1199
|
+
"""
|
|
1200
|
+
if self._os.path.exists(filename):
|
|
1201
|
+
mode = self._os.stat(filename).st_mode
|
|
1202
|
+
is_r = all([bool(mode & i) for i in [stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH]])
|
|
1203
|
+
if not is_r and force:
|
|
1204
|
+
self.chmod(filename, mode | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
|
|
1205
|
+
is_r = True
|
|
1206
|
+
return is_r
|
|
1207
|
+
else:
|
|
1208
|
+
return False
|
|
1209
|
+
|
|
1210
|
+
def wperm(self, filename, force=False):
|
|
1211
|
+
"""Return whether a **filename** exists and is writable by owner or not.
|
|
1212
|
+
|
|
1213
|
+
If **force** is set to *True*, the file's permission will be modified
|
|
1214
|
+
so that the file becomes writable.
|
|
1215
|
+
"""
|
|
1216
|
+
if self._os.path.exists(filename):
|
|
1217
|
+
st = self._os.stat(filename).st_mode
|
|
1218
|
+
is_w = bool(st & stat.S_IWUSR)
|
|
1219
|
+
if not is_w and force:
|
|
1220
|
+
self.chmod(filename, st | stat.S_IWUSR)
|
|
1221
|
+
is_w = True
|
|
1222
|
+
return is_w
|
|
1223
|
+
else:
|
|
1224
|
+
return False
|
|
1225
|
+
|
|
1226
|
+
def wpermtree(self, objpath, force=False):
|
|
1227
|
+
"""Return whether all items are owner-writeable in a hierarchy.
|
|
1228
|
+
|
|
1229
|
+
If **force** is set to *True*, the file's permission of all files in the
|
|
1230
|
+
hierarchy will be modified so that they writable.
|
|
1231
|
+
"""
|
|
1232
|
+
rc = self.wperm(objpath, force)
|
|
1233
|
+
for dirpath, dirnames, filenames in self.walk(objpath):
|
|
1234
|
+
for item in filenames + dirnames:
|
|
1235
|
+
rc = self.wperm(self.path.join(dirpath, item), force) and rc
|
|
1236
|
+
return rc
|
|
1237
|
+
|
|
1238
|
+
def readonly(self, inodename):
|
|
1239
|
+
"""Set permissions of the ``inodename`` object to read-only."""
|
|
1240
|
+
inodename = self.path.expanduser(inodename)
|
|
1241
|
+
self.stderr('readonly', inodename)
|
|
1242
|
+
rc = None
|
|
1243
|
+
if self._os.path.exists(inodename):
|
|
1244
|
+
if self._os.path.isdir(inodename):
|
|
1245
|
+
rc = self.chmod(inodename, 0o555)
|
|
1246
|
+
else:
|
|
1247
|
+
st = self.stat(inodename).st_mode
|
|
1248
|
+
if st & stat.S_IWUSR or st & stat.S_IWGRP or st & stat.S_IWOTH:
|
|
1249
|
+
rc = self.chmod(inodename, st & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))
|
|
1250
|
+
else:
|
|
1251
|
+
rc = True
|
|
1252
|
+
return rc
|
|
1253
|
+
|
|
1254
|
+
def readonlytree(self, objpath):
|
|
1255
|
+
"""Recursively set permissions of the **objpath** object to read-only."""
|
|
1256
|
+
rc = self.readonly(objpath)
|
|
1257
|
+
for dirpath, dirnames, filenames in self.walk(objpath):
|
|
1258
|
+
for item in filenames + dirnames:
|
|
1259
|
+
rc = self.readonly(self.path.join(dirpath, item)) and rc
|
|
1260
|
+
return rc
|
|
1261
|
+
|
|
1262
|
+
def usr_file(self, filename):
|
|
1263
|
+
"""Return whether or not **filename** belongs to the current user."""
|
|
1264
|
+
return self._os.stat(filename).st_uid == self._os.getuid()
|
|
1265
|
+
|
|
1266
|
+
def touch(self, filename):
|
|
1267
|
+
"""Clone of the eponymous unix command."""
|
|
1268
|
+
filename = self.path.expanduser(filename)
|
|
1269
|
+
self.stderr('touch', filename)
|
|
1270
|
+
rc = True
|
|
1271
|
+
if self.path.exists(filename):
|
|
1272
|
+
# Note: "filename" might as well be a directory...
|
|
1273
|
+
try:
|
|
1274
|
+
os.utime(filename, None)
|
|
1275
|
+
except Exception:
|
|
1276
|
+
rc = False
|
|
1277
|
+
else:
|
|
1278
|
+
fh = open(filename, 'a')
|
|
1279
|
+
fh.close()
|
|
1280
|
+
return rc
|
|
1281
|
+
|
|
1282
|
+
@fmtshcmd
|
|
1283
|
+
def remove(self, objpath):
|
|
1284
|
+
"""Unlink the specified object (file, directory or directory tree).
|
|
1285
|
+
|
|
1286
|
+
:param str objpath: Path to the object to unlink
|
|
1287
|
+
"""
|
|
1288
|
+
objpath = self.path.expanduser(objpath)
|
|
1289
|
+
if self._os.path.exists(objpath):
|
|
1290
|
+
self.stderr('remove', objpath)
|
|
1291
|
+
if self._os.path.isdir(objpath):
|
|
1292
|
+
self.rmtree(objpath)
|
|
1293
|
+
else:
|
|
1294
|
+
self.unlink(objpath)
|
|
1295
|
+
else:
|
|
1296
|
+
self.stderr('clear', objpath)
|
|
1297
|
+
return not self._os.path.exists(objpath)
|
|
1298
|
+
|
|
1299
|
+
@fmtshcmd
|
|
1300
|
+
def rm(self, objpath):
|
|
1301
|
+
"""Shortcut to :meth:`remove` method.
|
|
1302
|
+
|
|
1303
|
+
:param str objpath: Path to the object to unlink
|
|
1304
|
+
"""
|
|
1305
|
+
return self.remove(objpath)
|
|
1306
|
+
|
|
1307
|
+
def ps(self, opts=None, search=None, pscmd=None):
|
|
1308
|
+
"""
|
|
1309
|
+
Performs a standard process inquiry through :class:`subprocess.Popen`
|
|
1310
|
+
and filter the output if a **search** expression is provided (regular
|
|
1311
|
+
expressions are used).
|
|
1312
|
+
"""
|
|
1313
|
+
if not pscmd:
|
|
1314
|
+
pscmd = ['ps']
|
|
1315
|
+
if opts is None:
|
|
1316
|
+
opts = []
|
|
1317
|
+
pscmd.extend(self._psopts)
|
|
1318
|
+
pscmd.extend(opts)
|
|
1319
|
+
self.stderr(*pscmd)
|
|
1320
|
+
psall = subprocess.Popen(pscmd, stdout=subprocess.PIPE).communicate()[0].split('\n')
|
|
1321
|
+
if search:
|
|
1322
|
+
psall = filter(lambda x: re.search(search, x), psall)
|
|
1323
|
+
return [x.strip() for x in psall]
|
|
1324
|
+
|
|
1325
|
+
def sleep(self, nbsecs):
|
|
1326
|
+
"""Clone of the unix eponymous command."""
|
|
1327
|
+
self.stderr('sleep', nbsecs)
|
|
1328
|
+
time.sleep(nbsecs)
|
|
1329
|
+
|
|
1330
|
+
def setulimit(self, r_id):
|
|
1331
|
+
"""Set an unlimited value to the specified resource (**r_id**)."""
|
|
1332
|
+
self.stderr('setulimit', r_id)
|
|
1333
|
+
u_soft, hard = self.getrlimit(r_id) # @UnusedVariable
|
|
1334
|
+
if hard != self._rl.RLIM_INFINITY:
|
|
1335
|
+
logger.info('Unable to raise the %s soft limit to "unlimited", ' +
|
|
1336
|
+
'using the hard limit instead (%s).', str(r_id), str(hard))
|
|
1337
|
+
return self.setrlimit(r_id, (hard, hard))
|
|
1338
|
+
|
|
1339
|
+
def ulimit(self):
|
|
1340
|
+
"""Dump the user limits currently defined."""
|
|
1341
|
+
for limit in [r for r in dir(self._rl) if r.startswith('RLIMIT_')]:
|
|
1342
|
+
print(' ', limit.ljust(16), ':', self._rl.getrlimit(getattr(self._rl, limit)))
|
|
1343
|
+
|
|
1344
|
+
@property
|
|
1345
|
+
def cpus_info(self):
|
|
1346
|
+
"""Return an object of a subclass of :class:`bronx.system.cpus.CpusInfo`.
|
|
1347
|
+
|
|
1348
|
+
Such objects are designed to get informations on the platform's CPUs.
|
|
1349
|
+
|
|
1350
|
+
:note: *None* might be returned on some platforms (if cpufinfo is not
|
|
1351
|
+
implemented)
|
|
1352
|
+
"""
|
|
1353
|
+
return self._cpusinfo
|
|
1354
|
+
|
|
1355
|
+
def cpus_ids_per_blocks(self, blocksize=1, topology='raw', hexmask=False): # @UnusedVariable
|
|
1356
|
+
"""Get the list of CPUs IDs ordered for subsequent binding.
|
|
1357
|
+
|
|
1358
|
+
:param int blocksize: The number of thread consumed by one task
|
|
1359
|
+
:param str topology: The task distribution scheme
|
|
1360
|
+
:param bool hexmask: Return a list of CPU masks in hexadecimal
|
|
1361
|
+
"""
|
|
1362
|
+
return []
|
|
1363
|
+
|
|
1364
|
+
def cpus_ids_dispenser(self, topology='raw'):
|
|
1365
|
+
"""Get a dispenser of CPUs IDs for nicely ordered for subsequent binding.
|
|
1366
|
+
|
|
1367
|
+
:param str topology: The task distribution scheme
|
|
1368
|
+
"""
|
|
1369
|
+
return None
|
|
1370
|
+
|
|
1371
|
+
def cpus_affinity_get(self, taskid, blocksize=1, method='default', topology='raw'): # @UnusedVariable
|
|
1372
|
+
"""Get the necessary command/environment to set the CPUs affinity.
|
|
1373
|
+
|
|
1374
|
+
:param int taskid: the task number
|
|
1375
|
+
:param int blocksize: the number of thread consumed by one task
|
|
1376
|
+
:param str method: The binding method
|
|
1377
|
+
:param str topology: The task distribution scheme
|
|
1378
|
+
:return: A 3-elements tuple. (bool: BindingPossible,
|
|
1379
|
+
list: Starting command prefix, dict: Environment update)
|
|
1380
|
+
"""
|
|
1381
|
+
return (False, list(), dict())
|
|
1382
|
+
|
|
1383
|
+
@property
|
|
1384
|
+
def numa_info(self):
|
|
1385
|
+
"""Return an object of a subclass of :class:`bronx.system.numa.NumaNodesInfo`.
|
|
1386
|
+
|
|
1387
|
+
Such objects are designed to get informations on the platform's NUMA layout
|
|
1388
|
+
|
|
1389
|
+
:note: *None* might be returned on some platforms (if numainfo is not
|
|
1390
|
+
implemented)
|
|
1391
|
+
"""
|
|
1392
|
+
return self._numainfo
|
|
1393
|
+
|
|
1394
|
+
@property
|
|
1395
|
+
def memory_info(self):
|
|
1396
|
+
"""Return an object of a subclass of :class:`bronx.system.memory.MemInfo`.
|
|
1397
|
+
|
|
1398
|
+
Such objects are designed to get informations on the platform's RAM.
|
|
1399
|
+
|
|
1400
|
+
:note: *None* might be returned on some platforms (if meminfo is not
|
|
1401
|
+
implemented)
|
|
1402
|
+
"""
|
|
1403
|
+
return self._memoryinfo
|
|
1404
|
+
|
|
1405
|
+
@property
|
|
1406
|
+
def netstatsinfo(self):
|
|
1407
|
+
"""Return an object of a subclass of :class:`vortex;tools;net.AbstractNetstats`.
|
|
1408
|
+
|
|
1409
|
+
Such objects are designed to get informations on the platform's network
|
|
1410
|
+
connection (both TCP and UDP)
|
|
1411
|
+
|
|
1412
|
+
:note: *None* might be returned on some platforms (if netstat is not
|
|
1413
|
+
implemented)
|
|
1414
|
+
"""
|
|
1415
|
+
return self._netstatsinfo
|
|
1416
|
+
|
|
1417
|
+
def available_localport(self):
|
|
1418
|
+
"""Returns an available port number for a new TCP or UDP connection.
|
|
1419
|
+
|
|
1420
|
+
:note: The ``NotImplementedError`` might be raised on some platforms since
|
|
1421
|
+
netstat may not be implemented.
|
|
1422
|
+
"""
|
|
1423
|
+
if self.netstatsinfo is None:
|
|
1424
|
+
raise NotImplementedError('This function is not implemented on this system.')
|
|
1425
|
+
return self.netstatsinfo.available_localport()
|
|
1426
|
+
|
|
1427
|
+
def check_localport(self, port):
|
|
1428
|
+
"""Check if a **port** number is currently being used.
|
|
1429
|
+
|
|
1430
|
+
:note: The ``NotImplementedError`` might be raised on some platforms since
|
|
1431
|
+
netstat may not be implemented.
|
|
1432
|
+
"""
|
|
1433
|
+
if self.netstatsinfo is None:
|
|
1434
|
+
raise NotImplementedError('This function is not implemented on this system.')
|
|
1435
|
+
return self.netstatsinfo.check_localport(port)
|
|
1436
|
+
|
|
1437
|
+
def clear(self):
|
|
1438
|
+
"""Clone of the unix eponymous command."""
|
|
1439
|
+
self._os.system('clear')
|
|
1440
|
+
|
|
1441
|
+
@property
|
|
1442
|
+
def cls(self):
|
|
1443
|
+
"""Property shortcut to :meth:`clear` screen."""
|
|
1444
|
+
self.clear()
|
|
1445
|
+
return None
|
|
1446
|
+
|
|
1447
|
+
def rawopts(self, cmdline=None, defaults=None,
|
|
1448
|
+
isnone=isnonedef, istrue=istruedef, isfalse=isfalsedef):
|
|
1449
|
+
"""Parse a simple options command line that looks like `` key=value``.
|
|
1450
|
+
|
|
1451
|
+
:param str cmdline: The command line to be processed (if *None*, ``sys.argv``
|
|
1452
|
+
is used to get the script's command line)
|
|
1453
|
+
:param dict defaults: defaults values for any missing option.
|
|
1454
|
+
:param re.sre_compile isnone: Regex that detects ``None`` values.
|
|
1455
|
+
:param re.sre_compile isnone: Regex that detects ``True`` values.
|
|
1456
|
+
:param re.sre_compile isnone: Regex that detects ``False`` values.
|
|
1457
|
+
:return dict: a dictionary that contains the parsed options (or their defaults)
|
|
1458
|
+
"""
|
|
1459
|
+
opts = dict()
|
|
1460
|
+
if defaults:
|
|
1461
|
+
try:
|
|
1462
|
+
opts.update(defaults)
|
|
1463
|
+
except (ValueError, TypeError):
|
|
1464
|
+
logger.warning('Could not update options default: %s', defaults)
|
|
1465
|
+
|
|
1466
|
+
if cmdline is None:
|
|
1467
|
+
cmdline = sys.argv[1:]
|
|
1468
|
+
opts.update(dict([x.split('=') for x in cmdline]))
|
|
1469
|
+
for k, v in opts.items():
|
|
1470
|
+
if v not in (None, True, False):
|
|
1471
|
+
if istrue.match(v):
|
|
1472
|
+
opts[k] = True
|
|
1473
|
+
if isfalse.match(v):
|
|
1474
|
+
opts[k] = False
|
|
1475
|
+
if isnone.match(v):
|
|
1476
|
+
opts[k] = None
|
|
1477
|
+
return opts
|
|
1478
|
+
|
|
1479
|
+
def is_iofile(self, iocandidate):
|
|
1480
|
+
"""Check if actual **iocandidate** is a valid filename or io stream."""
|
|
1481
|
+
return iocandidate is not None and (
|
|
1482
|
+
(isinstance(iocandidate, str) and self.path.exists(iocandidate)) or
|
|
1483
|
+
isinstance(iocandidate, io.IOBase) or
|
|
1484
|
+
isinstance(iocandidate, io.BytesIO) or isinstance(iocandidate, io.StringIO)
|
|
1485
|
+
)
|
|
1486
|
+
|
|
1487
|
+
@contextlib.contextmanager
|
|
1488
|
+
def ftppool(self, nrcfile=None):
|
|
1489
|
+
"""Create a context manager that initialises the FTP connection pool.
|
|
1490
|
+
|
|
1491
|
+
Within this context manager, if `self.ftpflavour==FTP_FLAVOUR.CONNECTION_POOLS`,
|
|
1492
|
+
the :meth:`ftp` method will use the FTP connection pool initialised by this
|
|
1493
|
+
context manager (see the :class:`~vortex.tools.net.FtpConnectionPool` class)
|
|
1494
|
+
in order to dispense FTP clients.
|
|
1495
|
+
|
|
1496
|
+
When the context manager is exited, the FTP connection pool is destroyed
|
|
1497
|
+
(and all the space FTP clients are closed).
|
|
1498
|
+
"""
|
|
1499
|
+
pool_control = self._current_ftppool is None
|
|
1500
|
+
if pool_control:
|
|
1501
|
+
self._current_ftppool = FtpConnectionPool(self, nrcfile=nrcfile)
|
|
1502
|
+
try:
|
|
1503
|
+
yield self._current_ftppool
|
|
1504
|
+
finally:
|
|
1505
|
+
if pool_control:
|
|
1506
|
+
self._current_ftppool.clear()
|
|
1507
|
+
self._current_ftppool = None
|
|
1508
|
+
|
|
1509
|
+
def fix_fthostname(self, hostname, fatal=True):
|
|
1510
|
+
"""If *hostname* is None, tries to find a default value for it."""
|
|
1511
|
+
if hostname is None:
|
|
1512
|
+
hostname = self.glove.default_fthost
|
|
1513
|
+
if not hostname:
|
|
1514
|
+
if fatal:
|
|
1515
|
+
raise ValueError('An *hostname* must be provided one way or another')
|
|
1516
|
+
return hostname
|
|
1517
|
+
|
|
1518
|
+
def fix_ftuser(self, hostname, logname, fatal=True, defaults_to_user=True):
|
|
1519
|
+
"""Given *hostname*, if *logname* is None, tries to find a default value for it."""
|
|
1520
|
+
if logname is None:
|
|
1521
|
+
if self.glove is not None:
|
|
1522
|
+
logname = self.glove.getftuser(hostname, defaults_to_user=defaults_to_user)
|
|
1523
|
+
else:
|
|
1524
|
+
if fatal:
|
|
1525
|
+
raise ValueError("Either a *logname* or a glove must be set-up")
|
|
1526
|
+
return logname
|
|
1527
|
+
|
|
1528
|
+
def ftp(self, hostname, logname=None, delayed=False, port=DEFAULT_FTP_PORT):
|
|
1529
|
+
"""Return an FTP client object.
|
|
1530
|
+
|
|
1531
|
+
:param str hostname: the remote host's name for FTP.
|
|
1532
|
+
:param str logname: the logname on the remote host.
|
|
1533
|
+
:param bool delayed: delay the actual connection attempt as much as possible.
|
|
1534
|
+
:param int port: the port number on the remote host.
|
|
1535
|
+
|
|
1536
|
+
The returned object is an instance of :class:`~vortex.tools.net.StdFtp`
|
|
1537
|
+
or of one of its subclasses. Consequently, see the :class:`~vortex.tools.net.StdFtp`
|
|
1538
|
+
class documentation to get more information on the client's capabilities.
|
|
1539
|
+
|
|
1540
|
+
The type and behaviour of the returned object depends of the `self.ftpflavour`
|
|
1541
|
+
setting. Possible values are:
|
|
1542
|
+
|
|
1543
|
+
* `FTP_FLAVOUR.STD`: a :class:`~vortex.tools.net.StdFtp` object is returned.
|
|
1544
|
+
* `FTP_FLAVOUR.RETRIES`: a :class:`~vortex.tools.net.AutoRetriesFtp` object
|
|
1545
|
+
is returned (consequently multiple retries will be made if something
|
|
1546
|
+
goes wrong with any FTP command).
|
|
1547
|
+
* `FTP_FLAVOUR.CONNECTION_POOLS`: a :class:`~vortex.tools.net.AutoRetriesFtp`
|
|
1548
|
+
or a :class:`~vortex.tools.net.PooledResetableAutoRetriesFtp` object
|
|
1549
|
+
is returned. If the :meth:`ftp` method is called from within a context
|
|
1550
|
+
manager created by the :meth:`ftppool`, a
|
|
1551
|
+
:class:`~vortex.tools.net.FtpConnectionPool` object is used in order
|
|
1552
|
+
to create and re-use FTP connections; Otherwise a "usual"
|
|
1553
|
+
:class:`~vortex.tools.net.AutoRetriesFtp` is returned.
|
|
1554
|
+
"""
|
|
1555
|
+
logname = self.fix_ftuser(hostname, logname)
|
|
1556
|
+
if port is None:
|
|
1557
|
+
port = DEFAULT_FTP_PORT
|
|
1558
|
+
if self.ftpflavour == FTP_FLAVOUR.CONNECTION_POOLS and self._current_ftppool is not None:
|
|
1559
|
+
return self._current_ftppool.deal(hostname, logname, port=port, delayed=delayed)
|
|
1560
|
+
else:
|
|
1561
|
+
ftpclass = AutoRetriesFtp if self.ftpflavour != FTP_FLAVOUR.STD else StdFtp
|
|
1562
|
+
ftpbox = ftpclass(self, hostname, port=port)
|
|
1563
|
+
rc = ftpbox.fastlogin(logname, delayed=delayed)
|
|
1564
|
+
if rc:
|
|
1565
|
+
return ftpbox
|
|
1566
|
+
else:
|
|
1567
|
+
logger.warning('Could not login on %s as %s [%s]', hostname, logname, str(rc))
|
|
1568
|
+
return None
|
|
1569
|
+
|
|
1570
|
+
@contextlib.contextmanager
|
|
1571
|
+
def ftpcontext(self, hostname, logname=None, delayed=False, port=DEFAULT_FTP_PORT):
|
|
1572
|
+
"""Create an FTP object and close it when the context exits.
|
|
1573
|
+
|
|
1574
|
+
For a description of the context's arguments, see :func:`ftp`.
|
|
1575
|
+
"""
|
|
1576
|
+
ftp = self.ftp(hostname, logname=logname, delayed=delayed, port=port)
|
|
1577
|
+
if ftp:
|
|
1578
|
+
try:
|
|
1579
|
+
yield ftp
|
|
1580
|
+
finally:
|
|
1581
|
+
ftp.close()
|
|
1582
|
+
else:
|
|
1583
|
+
yield ftp
|
|
1584
|
+
|
|
1585
|
+
@fmtshcmd
|
|
1586
|
+
def anyft_remote_rewrite(self, remote):
|
|
1587
|
+
"""
|
|
1588
|
+
When copying the data using a file transfer protocol (FTP, scp, ...),
|
|
1589
|
+
given the format, possibly modify the remote name.
|
|
1590
|
+
"""
|
|
1591
|
+
return remote
|
|
1592
|
+
|
|
1593
|
+
@fmtshcmd
|
|
1594
|
+
def ftget(self, source, destination, hostname=None, logname=None, port=DEFAULT_FTP_PORT,
|
|
1595
|
+
cpipeline=None):
|
|
1596
|
+
"""Proceed to a direct ftp get on the specified target (using Vortex's FTP client).
|
|
1597
|
+
|
|
1598
|
+
:param str source: the remote path to get data
|
|
1599
|
+
:param destination: The destination of data (either a path to file or a
|
|
1600
|
+
File-like object)
|
|
1601
|
+
:type destination: str or File-like object
|
|
1602
|
+
:param str hostname: The target hostname (default: *None*, see the
|
|
1603
|
+
:class:`~vortex.tools.net.StdFtp` class to get the effective default)
|
|
1604
|
+
:param str logname: the target logname (default: *None*, see the
|
|
1605
|
+
:class:`~vortex.tools.net.StdFtp` class to get the effective default)
|
|
1606
|
+
:param int port: the port number on the remote host.
|
|
1607
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
1608
|
+
uncompress the data during the file transfer (default: *None*).
|
|
1609
|
+
"""
|
|
1610
|
+
if isinstance(destination, str): # destination may be Virtual
|
|
1611
|
+
self.rm(destination)
|
|
1612
|
+
hostname = self.fix_fthostname(hostname)
|
|
1613
|
+
ftp = self.ftp(hostname, logname, port=port)
|
|
1614
|
+
if ftp:
|
|
1615
|
+
try:
|
|
1616
|
+
if cpipeline is None:
|
|
1617
|
+
rc = ftp.get(source, destination)
|
|
1618
|
+
else:
|
|
1619
|
+
with cpipeline.stream2uncompress(destination) as cdestination:
|
|
1620
|
+
rc = ftp.get(source, cdestination)
|
|
1621
|
+
except ftplib.all_errors as e:
|
|
1622
|
+
logger.warning('An FTP error occured: %s', str(e))
|
|
1623
|
+
rc = False
|
|
1624
|
+
finally:
|
|
1625
|
+
ftp.close()
|
|
1626
|
+
return rc
|
|
1627
|
+
else:
|
|
1628
|
+
return False
|
|
1629
|
+
|
|
1630
|
+
@fmtshcmd
|
|
1631
|
+
def ftput(self, source, destination, hostname=None, logname=None, port=DEFAULT_FTP_PORT,
|
|
1632
|
+
cpipeline=None, sync=False): # @UnusedVariable
|
|
1633
|
+
"""Proceed to a direct ftp put on the specified target (using Vortex's FTP client).
|
|
1634
|
+
|
|
1635
|
+
:param source: The source of data (either a path to file or a
|
|
1636
|
+
File-like object)
|
|
1637
|
+
:type source: str or File-like object
|
|
1638
|
+
:param str destination: The path where to upload the data.
|
|
1639
|
+
:param str hostname: The target hostname (default: *None*, see the
|
|
1640
|
+
:class:`~vortex.tools.net.StdFtp` class to get the effective default)
|
|
1641
|
+
:param str logname: the target logname (default: *None*, see the
|
|
1642
|
+
:class:`~vortex.tools.net.StdFtp` class to get the effective default)
|
|
1643
|
+
:param int port: the port number on the remote host.
|
|
1644
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
1645
|
+
compress the data during the file transfer (default: *None*).
|
|
1646
|
+
:param bool sync: If False, allow asynchronous transfers (currently not
|
|
1647
|
+
used: transfers are always synchronous).
|
|
1648
|
+
"""
|
|
1649
|
+
rc = False
|
|
1650
|
+
if self.is_iofile(source):
|
|
1651
|
+
hostname = self.fix_fthostname(hostname)
|
|
1652
|
+
ftp = self.ftp(hostname, logname, port=port)
|
|
1653
|
+
if ftp:
|
|
1654
|
+
try:
|
|
1655
|
+
if cpipeline is None:
|
|
1656
|
+
rc = ftp.put(source, destination)
|
|
1657
|
+
else:
|
|
1658
|
+
with cpipeline.compress2stream(source, iosponge=True) as csource:
|
|
1659
|
+
# csource is an IoSponge consequently the size attribute exists
|
|
1660
|
+
rc = ftp.put(csource, destination, size=csource.size)
|
|
1661
|
+
except ftplib.all_errors as e:
|
|
1662
|
+
logger.warning('An FTP error occured: %s', str(e))
|
|
1663
|
+
rc = False
|
|
1664
|
+
finally:
|
|
1665
|
+
ftp.close()
|
|
1666
|
+
else:
|
|
1667
|
+
raise OSError('No such file or directory: {!r}'.format(source))
|
|
1668
|
+
return rc
|
|
1669
|
+
|
|
1670
|
+
def ftspool_cache(self):
|
|
1671
|
+
"""Return a cache object for the FtSpool."""
|
|
1672
|
+
if self._ftspool_cache is None:
|
|
1673
|
+
self._ftspool_cache = footprints.proxy.cache(kind='ftstash')
|
|
1674
|
+
return self._ftspool_cache
|
|
1675
|
+
|
|
1676
|
+
def copy2ftspool(self, source, nest=False, **kwargs):
|
|
1677
|
+
"""Make a copy of **source** to the FtSpool cache."""
|
|
1678
|
+
h = hashlib.new('md5')
|
|
1679
|
+
h.update(source.encode(encoding='utf-8'))
|
|
1680
|
+
outputname = 'vortex_{:s}_P{:06d}_{:s}'.format(date.now().strftime('%Y%m%d%H%M%S-%f'),
|
|
1681
|
+
self.getpid(), h.hexdigest())
|
|
1682
|
+
if nest:
|
|
1683
|
+
outputname = self.path.join(outputname, self.path.basename(source))
|
|
1684
|
+
kwargs['intent'] = 'in' # Force intent=in
|
|
1685
|
+
if self.ftspool_cache().insert(outputname, source, **kwargs):
|
|
1686
|
+
return self.ftspool_cache().fullpath(outputname)
|
|
1687
|
+
else:
|
|
1688
|
+
return False
|
|
1689
|
+
|
|
1690
|
+
def ftserv_allowed(self, source, destination):
|
|
1691
|
+
"""Given **source** and **destination**, is FtServ usable ?"""
|
|
1692
|
+
return isinstance(source, str) and isinstance(destination, str)
|
|
1693
|
+
|
|
1694
|
+
def ftserv_put(self, source, destination, hostname=None, logname=None, port=None,
|
|
1695
|
+
specialshell=None, sync=False):
|
|
1696
|
+
"""Asynchronous put of a file using FtServ."""
|
|
1697
|
+
if self.ftserv_allowed(source, destination):
|
|
1698
|
+
if self.path.exists(source):
|
|
1699
|
+
ftcmd = self.ftputcmd or 'ftput'
|
|
1700
|
+
hostname = self.fix_fthostname(hostname, fatal=False)
|
|
1701
|
+
logname = self.fix_ftuser(hostname, logname, fatal=False)
|
|
1702
|
+
extras = list()
|
|
1703
|
+
if not sync:
|
|
1704
|
+
extras.extend(['-q', ])
|
|
1705
|
+
if hostname:
|
|
1706
|
+
if port is not None:
|
|
1707
|
+
hostname += ':{:s}'.format(port)
|
|
1708
|
+
extras.extend(['-h', hostname])
|
|
1709
|
+
if logname:
|
|
1710
|
+
extras.extend(['-u', logname])
|
|
1711
|
+
if specialshell:
|
|
1712
|
+
extras.extend(['-s', specialshell])
|
|
1713
|
+
# Remove ~/ and ~logname/ from the destinations' path
|
|
1714
|
+
actual_dest = re.sub('^~/+', '', destination)
|
|
1715
|
+
if logname:
|
|
1716
|
+
actual_dest = re.sub('^~{:s}/+'.format(logname), '', actual_dest)
|
|
1717
|
+
try:
|
|
1718
|
+
rc = self.spawn([ftcmd,
|
|
1719
|
+
'-o', 'mkdir', ] + # Automatically create subdirectories
|
|
1720
|
+
extras + [source, actual_dest], output=False)
|
|
1721
|
+
except ExecutionError:
|
|
1722
|
+
rc = False
|
|
1723
|
+
else:
|
|
1724
|
+
raise OSError('No such file or directory: {!s}'.format(source))
|
|
1725
|
+
else:
|
|
1726
|
+
raise OSError('Source or destination is not a plain file path: {!r}'.format(source))
|
|
1727
|
+
return rc
|
|
1728
|
+
|
|
1729
|
+
def ftserv_get(self, source, destination, hostname=None, logname=None, port=None):
|
|
1730
|
+
"""Get a file using FtServ."""
|
|
1731
|
+
if self.ftserv_allowed(source, destination):
|
|
1732
|
+
if self.filecocoon(destination):
|
|
1733
|
+
hostname = self.fix_fthostname(hostname, fatal=False)
|
|
1734
|
+
logname = self.fix_ftuser(hostname, logname, fatal=False)
|
|
1735
|
+
destination = self.path.expanduser(destination)
|
|
1736
|
+
extras = list()
|
|
1737
|
+
if hostname:
|
|
1738
|
+
if port is not None:
|
|
1739
|
+
hostname += ':{:s}'.format(port)
|
|
1740
|
+
extras.extend(['-h', hostname])
|
|
1741
|
+
if logname:
|
|
1742
|
+
extras.extend(['-u', logname])
|
|
1743
|
+
ftcmd = self.ftgetcmd or 'ftget'
|
|
1744
|
+
try:
|
|
1745
|
+
rc = self.spawn([ftcmd, ] + extras + [source, destination], output=False)
|
|
1746
|
+
except ExecutionError:
|
|
1747
|
+
rc = False
|
|
1748
|
+
else:
|
|
1749
|
+
raise OSError('Could not cocoon: {!s}'.format(destination))
|
|
1750
|
+
else:
|
|
1751
|
+
raise OSError('Source or destination is not a plain file path: {!r}'.format(source))
|
|
1752
|
+
return rc
|
|
1753
|
+
|
|
1754
|
+
def ftserv_batchget(self, source, destination, hostname=None, logname=None, port=None):
|
|
1755
|
+
"""Get a list of files using FtServ.
|
|
1756
|
+
|
|
1757
|
+
:note: **source** and **destination** are list or tuple.
|
|
1758
|
+
"""
|
|
1759
|
+
if all([self.ftserv_allowed(s, d) for s, d in zip(source, destination)]):
|
|
1760
|
+
for d in destination:
|
|
1761
|
+
if not self.filecocoon(d):
|
|
1762
|
+
raise OSError('Could not cocoon: {!s}'.format(d))
|
|
1763
|
+
extras = list()
|
|
1764
|
+
hostname = self.fix_fthostname(hostname, fatal=False)
|
|
1765
|
+
logname = self.fix_ftuser(hostname, logname, fatal=False)
|
|
1766
|
+
if hostname:
|
|
1767
|
+
if port is not None:
|
|
1768
|
+
hostname += ':{:s}'.format(port)
|
|
1769
|
+
extras.extend(['-h', hostname])
|
|
1770
|
+
if logname:
|
|
1771
|
+
extras.extend(['-u', logname])
|
|
1772
|
+
ftcmd = self.ftgetcmd or 'ftget'
|
|
1773
|
+
plocale = locale.getlocale()[1] or 'ascii'
|
|
1774
|
+
with tempfile.TemporaryFile(dir=self.path.dirname(self.path.abspath(destination[0])),
|
|
1775
|
+
mode='wb') as tmpio:
|
|
1776
|
+
tmpio.writelines(['{:s} {:s}\n'.format(s, d).encode(plocale)
|
|
1777
|
+
for s, d in zip(source, destination)])
|
|
1778
|
+
tmpio.seek(0)
|
|
1779
|
+
with tempfile.TemporaryFile(dir=self.path.dirname(self.path.abspath(destination[0])),
|
|
1780
|
+
mode='w+b') as tmpoutput:
|
|
1781
|
+
try:
|
|
1782
|
+
rc = self.spawn([ftcmd, ] + extras, output=tmpoutput, stdin=tmpio)
|
|
1783
|
+
except ExecutionError:
|
|
1784
|
+
rc = False
|
|
1785
|
+
# Process output data
|
|
1786
|
+
tmpoutput.seek(0)
|
|
1787
|
+
ft_outputs = tmpoutput.read()
|
|
1788
|
+
ft_outputs = ft_outputs.decode(locale.getlocale()[1] or 'ascii', 'replace')
|
|
1789
|
+
logger.info('Here is the ftget command output: \n%s', ft_outputs)
|
|
1790
|
+
# Expand the return codes
|
|
1791
|
+
if rc:
|
|
1792
|
+
x_rc = [True, ] * len(source)
|
|
1793
|
+
else:
|
|
1794
|
+
ack_re = re.compile(r'.*FT_(OK|ABORT)\s*:\s*GET\s+(.*)$')
|
|
1795
|
+
ack_lines = dict()
|
|
1796
|
+
for line in ft_outputs.split('\n'):
|
|
1797
|
+
ack_match = ack_re.match(line)
|
|
1798
|
+
if ack_match:
|
|
1799
|
+
ack_lines[ack_match.group(2)] = ack_match.group(1) == 'OK'
|
|
1800
|
+
x_rc = []
|
|
1801
|
+
for a_source in source:
|
|
1802
|
+
my_rc = None
|
|
1803
|
+
for ack_globish, ack_rc in ack_lines.items():
|
|
1804
|
+
if a_source in ack_globish:
|
|
1805
|
+
my_rc = ack_rc
|
|
1806
|
+
break
|
|
1807
|
+
x_rc.append(my_rc)
|
|
1808
|
+
else:
|
|
1809
|
+
raise OSError('Source or destination is not a plain file path: {!r}'.format(source))
|
|
1810
|
+
return x_rc
|
|
1811
|
+
|
|
1812
|
+
def rawftput_worthy(self, source, destination):
|
|
1813
|
+
"""Is it allowed to use FtServ given **source** and **destination**."""
|
|
1814
|
+
return self.ftraw and self.ftserv_allowed(source, destination)
|
|
1815
|
+
|
|
1816
|
+
@fmtshcmd
|
|
1817
|
+
def rawftput(self, source, destination, hostname=None, logname=None, port=None,
|
|
1818
|
+
cpipeline=None, sync=False):
|
|
1819
|
+
"""Proceed with some external ftput command on the specified target.
|
|
1820
|
+
|
|
1821
|
+
:param str source: Path to the source filename
|
|
1822
|
+
:param str destination: The path where to upload the data.
|
|
1823
|
+
:param str hostname: The target hostname (default: *None*).
|
|
1824
|
+
:param str logname: the target logname (default: *None*).
|
|
1825
|
+
:param int port: the port number on the remote host.
|
|
1826
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
1827
|
+
compress the data during the file transfer (default: *None*).
|
|
1828
|
+
:param bool sync: If False, allow asynchronous transfers.
|
|
1829
|
+
"""
|
|
1830
|
+
if cpipeline is not None:
|
|
1831
|
+
if cpipeline.compress2rawftp(source):
|
|
1832
|
+
return self.ftserv_put(source, destination, hostname,
|
|
1833
|
+
logname=logname, port=port,
|
|
1834
|
+
specialshell=cpipeline.compress2rawftp(source),
|
|
1835
|
+
sync=sync)
|
|
1836
|
+
else:
|
|
1837
|
+
if port is None:
|
|
1838
|
+
port = DEFAULT_FTP_PORT
|
|
1839
|
+
return self.ftput(source, destination, hostname, logname=logname,
|
|
1840
|
+
port=port, cpipeline=cpipeline, sync=sync)
|
|
1841
|
+
else:
|
|
1842
|
+
return self.ftserv_put(source, destination, hostname, logname,
|
|
1843
|
+
port=port, sync=sync)
|
|
1844
|
+
|
|
1845
|
+
def smartftput(self, source, destination, hostname=None, logname=None, port=None,
|
|
1846
|
+
cpipeline=None, sync=False, fmt=None):
|
|
1847
|
+
"""Select the best alternative between ``ftput`` and ``rawftput``.
|
|
1848
|
+
|
|
1849
|
+
:param source: The source of data (either a path to file or a
|
|
1850
|
+
File-like object)
|
|
1851
|
+
:type source: str or File-like object
|
|
1852
|
+
:param str destination: The path where to upload the data.
|
|
1853
|
+
:param str hostname: The target hostname (see :class:`~vortex.tools.net.StdFtp`
|
|
1854
|
+
for the default)
|
|
1855
|
+
:param str logname: the target logname (see :class:`~vortex.tools.net.StdFtp`
|
|
1856
|
+
for the default)
|
|
1857
|
+
:param int port: the port number on the remote host.
|
|
1858
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
1859
|
+
compress the data during the file transfer.
|
|
1860
|
+
:param bool sync: If False, allow asynchronous transfers.
|
|
1861
|
+
:param str fmt: The format of data.
|
|
1862
|
+
|
|
1863
|
+
``rawftput`` will be used if all of the following conditions are met:
|
|
1864
|
+
|
|
1865
|
+
* ``self.ftraw`` is *True*
|
|
1866
|
+
* **source** is a string (as opposed to a File like object)
|
|
1867
|
+
* **destination** is a string (as opposed to a File like object)
|
|
1868
|
+
"""
|
|
1869
|
+
if self.rawftput_worthy(source, destination):
|
|
1870
|
+
return self.rawftput(source, destination, hostname=hostname, logname=logname,
|
|
1871
|
+
port=port, cpipeline=cpipeline, sync=sync, fmt=fmt)
|
|
1872
|
+
else:
|
|
1873
|
+
if port is None:
|
|
1874
|
+
port = DEFAULT_FTP_PORT
|
|
1875
|
+
return self.ftput(source, destination, hostname=hostname, logname=logname,
|
|
1876
|
+
port=port, cpipeline=cpipeline, sync=sync, fmt=fmt)
|
|
1877
|
+
|
|
1878
|
+
def rawftget_worthy(self, source, destination, cpipeline=None):
|
|
1879
|
+
"""Is it allowed to use FtServ given **source** and **destination**."""
|
|
1880
|
+
return self.ftraw and cpipeline is None and self.ftserv_allowed(source, destination)
|
|
1881
|
+
|
|
1882
|
+
@fmtshcmd
|
|
1883
|
+
def rawftget(self, source, destination, hostname=None, logname=None, port=None,
|
|
1884
|
+
cpipeline=None):
|
|
1885
|
+
"""Proceed with some external ftget command on the specified target.
|
|
1886
|
+
|
|
1887
|
+
:param str source: the remote path to get data
|
|
1888
|
+
:param str destination: path to the filename where to put the data.
|
|
1889
|
+
:param str hostname: the target hostname (default: *None*).
|
|
1890
|
+
:param str logname: the target logname (default: *None*).
|
|
1891
|
+
:param int port: the port number on the remote host.
|
|
1892
|
+
:param CompressionPipeline cpipeline: unused (kept for compatibility)
|
|
1893
|
+
"""
|
|
1894
|
+
if cpipeline is not None:
|
|
1895
|
+
raise OSError("cpipeline is not supported by this method.")
|
|
1896
|
+
return self.ftserv_get(source, destination, hostname, logname, port=port)
|
|
1897
|
+
|
|
1898
|
+
@fmtshcmd
|
|
1899
|
+
def batchrawftget(self, source, destination, hostname=None, logname=None,
|
|
1900
|
+
port=None, cpipeline=None):
|
|
1901
|
+
"""Proceed with some external ftget command on the specified target.
|
|
1902
|
+
|
|
1903
|
+
:param source: A list of remote paths to get data
|
|
1904
|
+
:param destination: A list of paths to the filename where to put the data.
|
|
1905
|
+
:param str hostname: the target hostname (default: *None*).
|
|
1906
|
+
:param str logname: the target logname (default: *None*).
|
|
1907
|
+
:param int port: the port number on the remote host.
|
|
1908
|
+
:param CompressionPipeline cpipeline: unused (kept for compatibility)
|
|
1909
|
+
"""
|
|
1910
|
+
if cpipeline is not None:
|
|
1911
|
+
raise OSError("cpipeline is not supported by this method.")
|
|
1912
|
+
return self.ftserv_batchget(source, destination, hostname, logname, port=port)
|
|
1913
|
+
|
|
1914
|
+
def smartftget(self, source, destination, hostname=None, logname=None, port=None,
|
|
1915
|
+
cpipeline=None, fmt=None):
|
|
1916
|
+
"""Select the best alternative between ``ftget`` and ``rawftget``.
|
|
1917
|
+
|
|
1918
|
+
:param str source: the remote path to get data
|
|
1919
|
+
:param destination: The destination of data (either a path to file or a
|
|
1920
|
+
File-like object)
|
|
1921
|
+
:type destination: str or File-like object
|
|
1922
|
+
:param str hostname: The target hostname (see :class:`~vortex.tools.net.StdFtp`
|
|
1923
|
+
for the default)
|
|
1924
|
+
:param str logname: the target logname (see :class:`~vortex.tools.net.StdFtp`
|
|
1925
|
+
for the default)
|
|
1926
|
+
:param int port: the port number on the remote host.
|
|
1927
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
1928
|
+
uncompress the data during the file transfer.
|
|
1929
|
+
:param str fmt: The format of data.
|
|
1930
|
+
|
|
1931
|
+
``rawftget`` will be used if all of the following conditions are met:
|
|
1932
|
+
|
|
1933
|
+
* ``self.ftraw`` is *True*
|
|
1934
|
+
* **cpipeline** is None
|
|
1935
|
+
* **source** is a string (as opposed to a File like object)
|
|
1936
|
+
* **destination** is a string (as opposed to a File like object)
|
|
1937
|
+
"""
|
|
1938
|
+
if self.rawftget_worthy(source, destination, cpipeline):
|
|
1939
|
+
# FtServ is uninteresting when dealing with compression
|
|
1940
|
+
return self.rawftget(source, destination, hostname=hostname, logname=logname,
|
|
1941
|
+
port=port, cpipeline=cpipeline, fmt=fmt)
|
|
1942
|
+
else:
|
|
1943
|
+
if port is None:
|
|
1944
|
+
port = DEFAULT_FTP_PORT
|
|
1945
|
+
return self.ftget(source, destination, hostname=hostname, logname=logname,
|
|
1946
|
+
port=port, cpipeline=cpipeline, fmt=fmt)
|
|
1947
|
+
|
|
1948
|
+
def smartbatchftget(self, source, destination, hostname=None, logname=None,
|
|
1949
|
+
port=None, cpipeline=None, fmt=None):
|
|
1950
|
+
"""
|
|
1951
|
+
Select the best alternative between ``ftget`` and ``batchrawftget``
|
|
1952
|
+
when retrieving several files.
|
|
1953
|
+
|
|
1954
|
+
:param source: A list of remote paths to get data
|
|
1955
|
+
:param destination: A list of destinations for the data (either a path to
|
|
1956
|
+
file or a File-like object)
|
|
1957
|
+
:type destination: str or File-like object
|
|
1958
|
+
:param str hostname: The target hostname (see :class:`~vortex.tools.net.StdFtp`
|
|
1959
|
+
for the default)
|
|
1960
|
+
:param str logname: the target logname (see :class:`~vortex.tools.net.StdFtp`
|
|
1961
|
+
for the default)
|
|
1962
|
+
:param int port: the port number on the remote host.
|
|
1963
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
1964
|
+
uncompress the data during the file transfer.
|
|
1965
|
+
:param str fmt: The format of data.
|
|
1966
|
+
"""
|
|
1967
|
+
if all([self.rawftget_worthy(s, d, cpipeline) for s, d in zip(source, destination)]):
|
|
1968
|
+
# FtServ is uninteresting when dealing with compression
|
|
1969
|
+
return self.batchrawftget(source, destination, hostname=hostname, logname=logname,
|
|
1970
|
+
port=None, cpipeline=cpipeline, fmt=fmt)
|
|
1971
|
+
else:
|
|
1972
|
+
rc = True
|
|
1973
|
+
if port is None:
|
|
1974
|
+
port = DEFAULT_FTP_PORT
|
|
1975
|
+
with self.ftppool():
|
|
1976
|
+
for s, d in zip(source, destination):
|
|
1977
|
+
rc = rc and self.ftget(s, d, hostname=hostname, logname=logname,
|
|
1978
|
+
port=port, cpipeline=cpipeline, fmt=fmt)
|
|
1979
|
+
return rc
|
|
1980
|
+
|
|
1981
|
+
def ssh(self, hostname, logname=None, *args, **kw):
|
|
1982
|
+
"""Return an :class:`~vortex.tools.net.AssistedSsh` object.
|
|
1983
|
+
|
|
1984
|
+
:param str hostname: the remote host's name for SSH
|
|
1985
|
+
:param str logname: the logname on the remote host
|
|
1986
|
+
|
|
1987
|
+
Parameters provided with **args** or **kw** will be passed diorectly to the
|
|
1988
|
+
:class:`~vortex.tools.net.AssistedSsh` constructor.
|
|
1989
|
+
|
|
1990
|
+
See the :class:`~vortex.tools.net.AssistedSsh` class documentation for
|
|
1991
|
+
more information and examples.
|
|
1992
|
+
"""
|
|
1993
|
+
return AssistedSsh(self, hostname, logname, *args, **kw)
|
|
1994
|
+
|
|
1995
|
+
@fmtshcmd
|
|
1996
|
+
def scpput(self, source, destination, hostname, logname=None, cpipeline=None):
|
|
1997
|
+
"""Perform an scp to the specified target.
|
|
1998
|
+
|
|
1999
|
+
:param source: The source of data (either a path to file or a
|
|
2000
|
+
File-like object)
|
|
2001
|
+
:type source: str or File-like object
|
|
2002
|
+
:param str destination: The path where to upload the data.
|
|
2003
|
+
:param str hostname: The target hostname.
|
|
2004
|
+
:param str logname: the target logname (default: current user)
|
|
2005
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
2006
|
+
compress the data during the file transfer (default: *None*).
|
|
2007
|
+
"""
|
|
2008
|
+
logname = self.fix_ftuser(hostname, logname, fatal=False, defaults_to_user=False)
|
|
2009
|
+
msg = '[hostname={!s} logname={!s}]'.format(hostname, logname)
|
|
2010
|
+
ssh = self.ssh(hostname, logname)
|
|
2011
|
+
if isinstance(source, str) and cpipeline is None:
|
|
2012
|
+
self.stderr('scpput', source, destination, msg)
|
|
2013
|
+
return ssh.scpput(source, destination)
|
|
2014
|
+
else:
|
|
2015
|
+
self.stderr('scpput_stream', source, destination, msg)
|
|
2016
|
+
if cpipeline is None:
|
|
2017
|
+
return ssh.scpput_stream(source, destination)
|
|
2018
|
+
else:
|
|
2019
|
+
with cpipeline.compress2stream(source) as csource:
|
|
2020
|
+
return ssh.scpput_stream(csource, destination)
|
|
2021
|
+
|
|
2022
|
+
@fmtshcmd
|
|
2023
|
+
def scpget(self, source, destination, hostname, logname=None, cpipeline=None):
|
|
2024
|
+
"""Perform an scp to get the specified source.
|
|
2025
|
+
|
|
2026
|
+
:param str source: the remote path to get data
|
|
2027
|
+
:param destination: The destination of data (either a path to file or a
|
|
2028
|
+
File-like object)
|
|
2029
|
+
:type destination: str or File-like object
|
|
2030
|
+
:param str hostname: The target hostname.
|
|
2031
|
+
:param str logname: the target logname (default: current user)
|
|
2032
|
+
:param CompressionPipeline cpipeline: If not *None*, the object used to
|
|
2033
|
+
uncompress the data during the file transfer (default: *None*).
|
|
2034
|
+
"""
|
|
2035
|
+
logname = self.fix_ftuser(hostname, logname, fatal=False, defaults_to_user=False)
|
|
2036
|
+
msg = '[hostname={!s} logname={!s}]'.format(hostname, logname)
|
|
2037
|
+
ssh = self.ssh(hostname, logname)
|
|
2038
|
+
if isinstance(destination, str) and cpipeline is None:
|
|
2039
|
+
self.stderr('scpget', source, destination, msg)
|
|
2040
|
+
return ssh.scpget(source, destination)
|
|
2041
|
+
else:
|
|
2042
|
+
self.stderr('scpget_stream', source, destination, msg)
|
|
2043
|
+
if cpipeline is None:
|
|
2044
|
+
return ssh.scpget_stream(source, destination)
|
|
2045
|
+
else:
|
|
2046
|
+
with cpipeline.stream2uncompress(destination) as cdestination:
|
|
2047
|
+
return ssh.scpget_stream(source, cdestination)
|
|
2048
|
+
|
|
2049
|
+
def softlink(self, source, destination):
|
|
2050
|
+
"""Set a symbolic link if **source** is not **destination**."""
|
|
2051
|
+
self.stderr('softlink', source, destination)
|
|
2052
|
+
if source == destination:
|
|
2053
|
+
return False
|
|
2054
|
+
else:
|
|
2055
|
+
return self.symlink(source, destination)
|
|
2056
|
+
|
|
2057
|
+
def size(self, filepath):
|
|
2058
|
+
"""Returns the actual size in bytes of the specified **filepath**."""
|
|
2059
|
+
filepath = self.path.expanduser(filepath)
|
|
2060
|
+
self.stderr('size', filepath)
|
|
2061
|
+
try:
|
|
2062
|
+
return self.stat(filepath).st_size
|
|
2063
|
+
except Exception:
|
|
2064
|
+
return -1
|
|
2065
|
+
|
|
2066
|
+
def treesize(self, objpath):
|
|
2067
|
+
"""Size in byte of the whole **objpath** directory (or file).
|
|
2068
|
+
|
|
2069
|
+
Links are not followed, and directory sizes are taken into account:
|
|
2070
|
+
should return the same as ``du -sb``.
|
|
2071
|
+
|
|
2072
|
+
Raises ``OSError`` if **objpath** does not exist.
|
|
2073
|
+
"""
|
|
2074
|
+
objpath = self.path.expanduser(objpath)
|
|
2075
|
+
if self.path.isdir(objpath):
|
|
2076
|
+
total_size = self.size(objpath)
|
|
2077
|
+
for dirpath, dirnames, filenames in self.walk(objpath):
|
|
2078
|
+
for f in filenames + dirnames:
|
|
2079
|
+
total_size += self.lstat(self.path.join(dirpath, f)).st_size
|
|
2080
|
+
return total_size
|
|
2081
|
+
return self.lstat(objpath).st_size
|
|
2082
|
+
|
|
2083
|
+
def mkdir(self, dirpath, fatal=True):
|
|
2084
|
+
"""Normalises path name of **dirpath** and recursively creates this directory."""
|
|
2085
|
+
normdir = self.path.normpath(self.path.expanduser(dirpath))
|
|
2086
|
+
if normdir and not self.path.isdir(normdir):
|
|
2087
|
+
logger.debug('Cocooning directory %s', normdir)
|
|
2088
|
+
self.stderr('mkdir', normdir)
|
|
2089
|
+
try:
|
|
2090
|
+
self.makedirs(normdir)
|
|
2091
|
+
return True
|
|
2092
|
+
except OSError:
|
|
2093
|
+
# The directory may have been created exactly at the same time
|
|
2094
|
+
# by another process...
|
|
2095
|
+
if fatal and not self.path.isdir(normdir):
|
|
2096
|
+
raise
|
|
2097
|
+
return self.path.isdir(normdir)
|
|
2098
|
+
else:
|
|
2099
|
+
return True
|
|
2100
|
+
|
|
2101
|
+
def filecocoon(self, destination):
|
|
2102
|
+
"""Normalises path name of ``destination`` and creates **destination**'s directory."""
|
|
2103
|
+
return self.mkdir(self.path.dirname(self.path.expanduser(destination)))
|
|
2104
|
+
|
|
2105
|
+
_SAFE_SUFFIX_RE = re.compile('_[a-f0-9]{32}$')
|
|
2106
|
+
|
|
2107
|
+
def safe_filesuffix(self):
|
|
2108
|
+
"""Returns a file suffix that should be unique across the system."""
|
|
2109
|
+
return '_' + uuid.uuid1().hex
|
|
2110
|
+
|
|
2111
|
+
def safe_fileaddsuffix(self, name):
|
|
2112
|
+
"""Returns a file path that will look like name + a unique suffix."""
|
|
2113
|
+
d_name = self.path.dirname(name)
|
|
2114
|
+
b_name = self.path.basename(name)
|
|
2115
|
+
b_name = self._SAFE_SUFFIX_RE.sub('', b_name)
|
|
2116
|
+
return self.path.join(d_name, b_name + self.safe_filesuffix())
|
|
2117
|
+
|
|
2118
|
+
def _validate_symlink_below(self, symlink, valid_below):
|
|
2119
|
+
"""
|
|
2120
|
+
Check that **symlink** is relative and that its target is below
|
|
2121
|
+
the **valid_below** directory.
|
|
2122
|
+
|
|
2123
|
+
:note: **valid_below** needs to be an absolute canonical path
|
|
2124
|
+
(this is user responsability)
|
|
2125
|
+
"""
|
|
2126
|
+
link_to = self._os.readlink(symlink)
|
|
2127
|
+
# Is it relative ?
|
|
2128
|
+
if re.match('^([^{0:s}]|..{0:s}|.{0:s})'.format(re.escape(os.path.sep)),
|
|
2129
|
+
link_to):
|
|
2130
|
+
symlink_dir = self.path.realpath(
|
|
2131
|
+
self.path.abspath(
|
|
2132
|
+
self.path.dirname(symlink)
|
|
2133
|
+
)
|
|
2134
|
+
)
|
|
2135
|
+
abspath_to = self.path.normpath(
|
|
2136
|
+
self.path.join(symlink_dir, link_to)
|
|
2137
|
+
)
|
|
2138
|
+
# Valid ?
|
|
2139
|
+
valid = self.path.commonprefix([valid_below, abspath_to]) == valid_below
|
|
2140
|
+
return (self.path.relpath(abspath_to, start=symlink_dir)
|
|
2141
|
+
if valid else None)
|
|
2142
|
+
else:
|
|
2143
|
+
return None
|
|
2144
|
+
|
|
2145
|
+
def _copydatatree(self, src, dst, keep_symlinks_below=None):
|
|
2146
|
+
"""Recursively copy a directory tree using copyfile.
|
|
2147
|
+
|
|
2148
|
+
This is a variant of shutil's copytree. But, unlike with copytree,
|
|
2149
|
+
only data are copied (the permissions, access times, ... are ignored).
|
|
2150
|
+
|
|
2151
|
+
The destination directory must not already exist.
|
|
2152
|
+
"""
|
|
2153
|
+
self.stderr('_copydatatree', src, dst)
|
|
2154
|
+
with self.mute_stderr():
|
|
2155
|
+
keep_symlinks_below = (keep_symlinks_below or
|
|
2156
|
+
self.path.realpath(self.path.abspath(src)))
|
|
2157
|
+
names = self._os.listdir(src)
|
|
2158
|
+
self._os.makedirs(dst)
|
|
2159
|
+
errors = []
|
|
2160
|
+
for name in names:
|
|
2161
|
+
srcname = self._os.path.join(src, name)
|
|
2162
|
+
dstname = self._os.path.join(dst, name)
|
|
2163
|
+
try:
|
|
2164
|
+
if self.path.isdir(srcname):
|
|
2165
|
+
self._copydatatree(srcname, dstname,
|
|
2166
|
+
keep_symlinks_below=keep_symlinks_below)
|
|
2167
|
+
elif self._os.path.islink(srcname):
|
|
2168
|
+
linkto = self._validate_symlink_below(srcname, keep_symlinks_below)
|
|
2169
|
+
if linkto is not None:
|
|
2170
|
+
self._os.symlink(linkto, dstname)
|
|
2171
|
+
else:
|
|
2172
|
+
rc = self._sh.copyfile(srcname, dstname)
|
|
2173
|
+
else:
|
|
2174
|
+
# Will raise a SpecialFileError for unsupported file types
|
|
2175
|
+
self._sh.copyfile(srcname, dstname)
|
|
2176
|
+
# catch the Error from the recursive copytree so that we can
|
|
2177
|
+
# continue with other files
|
|
2178
|
+
except CopyTreeError as err:
|
|
2179
|
+
errors.extend(err.args[0])
|
|
2180
|
+
except OSError as why:
|
|
2181
|
+
errors.append((srcname, dstname, str(why)))
|
|
2182
|
+
if errors:
|
|
2183
|
+
raise CopyTreeError(errors)
|
|
2184
|
+
return dst
|
|
2185
|
+
|
|
2186
|
+
def rawcp(self, source, destination):
|
|
2187
|
+
"""Perform a simple ``copyfile`` or ``copytree`` command depending on **source**.
|
|
2188
|
+
|
|
2189
|
+
When copying a file, the operation is atomic. When copying a directory
|
|
2190
|
+
it is not (although the non-atomic portion is very limited).
|
|
2191
|
+
"""
|
|
2192
|
+
source = self.path.expanduser(source)
|
|
2193
|
+
destination = self.path.expanduser(destination)
|
|
2194
|
+
self.stderr('rawcp', source, destination)
|
|
2195
|
+
tmp = self.safe_fileaddsuffix(destination)
|
|
2196
|
+
if self.path.isdir(source):
|
|
2197
|
+
self._copydatatree(source, tmp)
|
|
2198
|
+
# Move fails if a directory already exists ; so be careful...
|
|
2199
|
+
with self.secure_directory_move(destination):
|
|
2200
|
+
self.move(tmp, destination)
|
|
2201
|
+
return self.path.isdir(destination)
|
|
2202
|
+
else:
|
|
2203
|
+
self.copyfile(source, tmp)
|
|
2204
|
+
# Preserve the execution permissions...
|
|
2205
|
+
if self.xperm(source):
|
|
2206
|
+
self.xperm(tmp, force=True)
|
|
2207
|
+
self.move(tmp, destination) # Move is atomic for a file
|
|
2208
|
+
if self._cmpaftercp:
|
|
2209
|
+
return filecmp.cmp(source, destination)
|
|
2210
|
+
else:
|
|
2211
|
+
return bool(self.size(source) == self.size(destination))
|
|
2212
|
+
|
|
2213
|
+
def hybridcp(self, source, destination, silent=False):
|
|
2214
|
+
"""Copy the **source** file to a safe **destination**.
|
|
2215
|
+
|
|
2216
|
+
**source** and/or **destination** may be File-like objects.
|
|
2217
|
+
|
|
2218
|
+
If **destination** is a real-word file name (i.e. not e File-like object),
|
|
2219
|
+
the operation is atomic.
|
|
2220
|
+
"""
|
|
2221
|
+
self.stderr('hybridcp', source, destination)
|
|
2222
|
+
if isinstance(source, str):
|
|
2223
|
+
if not self.path.exists(source):
|
|
2224
|
+
if not silent:
|
|
2225
|
+
logger.error('Missing source %s', source)
|
|
2226
|
+
return False
|
|
2227
|
+
source = open(self.path.expanduser(source), 'rb')
|
|
2228
|
+
xsource = True
|
|
2229
|
+
else:
|
|
2230
|
+
xsource = False
|
|
2231
|
+
try:
|
|
2232
|
+
source.seek(0)
|
|
2233
|
+
except AttributeError:
|
|
2234
|
+
logger.warning('Could not rewind io source before cp: ' + str(source))
|
|
2235
|
+
if isinstance(destination, str):
|
|
2236
|
+
if self.filecocoon(destination):
|
|
2237
|
+
# Write to a temp file
|
|
2238
|
+
original_dest = self.path.expanduser(destination)
|
|
2239
|
+
tmp_dest = self.safe_fileaddsuffix(self.path.expanduser(destination))
|
|
2240
|
+
destination = open(tmp_dest, 'wb')
|
|
2241
|
+
xdestination = True
|
|
2242
|
+
else:
|
|
2243
|
+
logger.error('Could not create a cocoon for file %s', destination)
|
|
2244
|
+
return False
|
|
2245
|
+
else:
|
|
2246
|
+
destination.seek(0)
|
|
2247
|
+
xdestination = False
|
|
2248
|
+
rc = self.copyfileobj(source, destination)
|
|
2249
|
+
if rc is None:
|
|
2250
|
+
rc = True
|
|
2251
|
+
if xsource:
|
|
2252
|
+
source.close()
|
|
2253
|
+
if xdestination:
|
|
2254
|
+
destination.close()
|
|
2255
|
+
# Move the tmp_file to the real destination
|
|
2256
|
+
if not self.move(tmp_dest, original_dest): # Move is atomic for a file
|
|
2257
|
+
logger.error('Cannot move the tmp file to the final destination %s', original_dest)
|
|
2258
|
+
return False
|
|
2259
|
+
return rc
|
|
2260
|
+
|
|
2261
|
+
def is_samefs(self, path1, path2):
|
|
2262
|
+
"""Check whether two paths are located on the same filesystem."""
|
|
2263
|
+
st1 = self.stat(path1)
|
|
2264
|
+
st2 = self.stat(self.path.dirname(self.path.realpath(path2)))
|
|
2265
|
+
return st1.st_dev == st2.st_dev and not self.path.islink(path1)
|
|
2266
|
+
|
|
2267
|
+
def _rawcp_instead_of_hardlink(self, source, destination, securecopy=True):
|
|
2268
|
+
self.stderr('rawcp_instead_of_hardlink', source, destination)
|
|
2269
|
+
if securecopy:
|
|
2270
|
+
rc = self.rawcp(source, destination)
|
|
2271
|
+
else:
|
|
2272
|
+
# Do not bother with a temporary file, create a direct copy
|
|
2273
|
+
self.copyfile(source, destination)
|
|
2274
|
+
# Preserve the execution permissions...
|
|
2275
|
+
if self.xperm(source):
|
|
2276
|
+
self.xperm(destination, force=True)
|
|
2277
|
+
rc = bool(self.size(source) == self.size(destination))
|
|
2278
|
+
return rc
|
|
2279
|
+
|
|
2280
|
+
def _safe_hardlink(self, source, destination, securecopy=True):
|
|
2281
|
+
"""Create a (unique) hardlink in a secure way.
|
|
2282
|
+
|
|
2283
|
+
i.e. if the "Too many links" OS error is raised, we try to replace
|
|
2284
|
+
the original file by a copy of itself. If that also fails because of
|
|
2285
|
+
the lack of file permissions, a "simple" rawcp is made.
|
|
2286
|
+
|
|
2287
|
+
:param bool securecopy: while creating the copy of the source file
|
|
2288
|
+
(because of a "Too many links" OS error), create
|
|
2289
|
+
a temporary filename and move it afterward to the
|
|
2290
|
+
*destination*: longer but safer.
|
|
2291
|
+
"""
|
|
2292
|
+
try:
|
|
2293
|
+
self._os.link(source, destination)
|
|
2294
|
+
except OSError as e:
|
|
2295
|
+
if e.errno == errno.EMLINK:
|
|
2296
|
+
# Too many links
|
|
2297
|
+
logger.warning('Too many links for the source file (%s).', source)
|
|
2298
|
+
if self.usr_file(source):
|
|
2299
|
+
rc = self._rawcp_instead_of_hardlink(source, destination, securecopy=securecopy)
|
|
2300
|
+
if rc:
|
|
2301
|
+
try:
|
|
2302
|
+
logger.warning('Replacing the orignal file with a copy...')
|
|
2303
|
+
self.move(destination, source)
|
|
2304
|
+
except OSError as ebis:
|
|
2305
|
+
if ebis.errno == errno.EACCES:
|
|
2306
|
+
# Permission denied
|
|
2307
|
+
logger.warning('No permissions to create a copy of the source file (%s)',
|
|
2308
|
+
source)
|
|
2309
|
+
logger.warning('Going on with the copy instead of the link...')
|
|
2310
|
+
else:
|
|
2311
|
+
raise
|
|
2312
|
+
else:
|
|
2313
|
+
# Ok, a copy was created for the source file
|
|
2314
|
+
self.link(source, destination)
|
|
2315
|
+
rc = self.path.samefile(source, destination)
|
|
2316
|
+
else:
|
|
2317
|
+
raise
|
|
2318
|
+
else:
|
|
2319
|
+
rc = self.path.samefile(source, destination)
|
|
2320
|
+
return rc
|
|
2321
|
+
|
|
2322
|
+
def hardlink(self, source, destination,
|
|
2323
|
+
link_threshold=0, readonly=True, securecopy=True,
|
|
2324
|
+
keep_symlinks_below=None):
|
|
2325
|
+
"""Create hardlinks for both single files or directories.
|
|
2326
|
+
|
|
2327
|
+
:param int link_threshold: if the source file size is smaller than
|
|
2328
|
+
**link_threshold** a copy is made (instead
|
|
2329
|
+
of a hardlink)
|
|
2330
|
+
:param bool readonly: ensure that all of the created links are readonly
|
|
2331
|
+
:param bool securecopy: while creating the copy of the source file
|
|
2332
|
+
(because of a "Too many links" OS error or **link_threshold**),
|
|
2333
|
+
create a temporary filename and move it afterward to the
|
|
2334
|
+
*destination*: longer but safer.
|
|
2335
|
+
:param str keep_symlinks_below: Preserve relative symlinks that have
|
|
2336
|
+
a target below the **keep_symlinks_below**
|
|
2337
|
+
directory (if omitted, **source** is used)
|
|
2338
|
+
"""
|
|
2339
|
+
if self.path.isdir(source):
|
|
2340
|
+
self.stderr('hardlink', source, destination,
|
|
2341
|
+
'#', 'directory,', 'readonly={!s}'.format(readonly))
|
|
2342
|
+
keep_symlinks_below = (keep_symlinks_below or
|
|
2343
|
+
self.path.realpath(self.path.abspath(source)))
|
|
2344
|
+
with self.mute_stderr():
|
|
2345
|
+
# Mimics 'cp -al'
|
|
2346
|
+
names = self._os.listdir(source)
|
|
2347
|
+
self._os.makedirs(destination)
|
|
2348
|
+
rc = True
|
|
2349
|
+
for name in names:
|
|
2350
|
+
srcname = self._os.path.join(source, name)
|
|
2351
|
+
dstname = self._os.path.join(destination, name)
|
|
2352
|
+
if self._os.path.islink(srcname):
|
|
2353
|
+
linkto = self._validate_symlink_below(srcname, keep_symlinks_below)
|
|
2354
|
+
if linkto is None:
|
|
2355
|
+
link_target = self.path.join(self.path.dirname(srcname),
|
|
2356
|
+
self._os.readlink(srcname))
|
|
2357
|
+
rc = self.hardlink(link_target, dstname,
|
|
2358
|
+
link_threshold=link_threshold,
|
|
2359
|
+
readonly=readonly, securecopy=securecopy,
|
|
2360
|
+
keep_symlinks_below=keep_symlinks_below)
|
|
2361
|
+
else:
|
|
2362
|
+
self._os.symlink(linkto, dstname)
|
|
2363
|
+
elif self.path.isdir(srcname):
|
|
2364
|
+
rc = self.hardlink(srcname, dstname,
|
|
2365
|
+
link_threshold=link_threshold,
|
|
2366
|
+
readonly=readonly, securecopy=securecopy,
|
|
2367
|
+
keep_symlinks_below=keep_symlinks_below)
|
|
2368
|
+
else:
|
|
2369
|
+
if link_threshold and self.size(srcname) < link_threshold:
|
|
2370
|
+
rc = self._rawcp_instead_of_hardlink(srcname, dstname, securecopy=securecopy)
|
|
2371
|
+
else:
|
|
2372
|
+
rc = self._safe_hardlink(srcname, dstname, securecopy=securecopy)
|
|
2373
|
+
if readonly and rc:
|
|
2374
|
+
self.readonly(dstname)
|
|
2375
|
+
if not rc:
|
|
2376
|
+
logger.error('Error while processing %s (rc=%s)', srcname, str(rc))
|
|
2377
|
+
break
|
|
2378
|
+
if rc:
|
|
2379
|
+
self._sh.copystat(source, destination)
|
|
2380
|
+
self.wperm(destination, force=True)
|
|
2381
|
+
return rc
|
|
2382
|
+
else:
|
|
2383
|
+
if link_threshold and self.size(source) < link_threshold:
|
|
2384
|
+
rc = self._rawcp_instead_of_hardlink(source, destination, securecopy=securecopy)
|
|
2385
|
+
else:
|
|
2386
|
+
self.stderr('hardlink', source, destination)
|
|
2387
|
+
rc = self._safe_hardlink(source, destination, securecopy=securecopy)
|
|
2388
|
+
if readonly and rc:
|
|
2389
|
+
self.readonly(destination)
|
|
2390
|
+
return rc
|
|
2391
|
+
|
|
2392
|
+
def _smartcp_cross_users_links_fallback(self, source, destination, smartcp_threshold, silent,
|
|
2393
|
+
exc, tmp_destination=None):
|
|
2394
|
+
"""Catch errors related to Kernel configuration."""
|
|
2395
|
+
if (exc.errno == errno.EPERM) and not self.usr_file(source):
|
|
2396
|
+
# This is expected to fail if the fs.protected_hardlinks
|
|
2397
|
+
# Linux kernel setting is 1.
|
|
2398
|
+
if tmp_destination is not None:
|
|
2399
|
+
self.remove(tmp_destination)
|
|
2400
|
+
logger.info("Force System's allow_cross_users_links to False")
|
|
2401
|
+
self.allow_cross_users_links = False
|
|
2402
|
+
logger.info("Re-running the smartcp command")
|
|
2403
|
+
return self.smartcp(source, destination,
|
|
2404
|
+
smartcp_threshold=smartcp_threshold, silent=silent)
|
|
2405
|
+
else:
|
|
2406
|
+
raise
|
|
2407
|
+
|
|
2408
|
+
def smartcp(self, source, destination, smartcp_threshold=0, silent=False):
|
|
2409
|
+
"""
|
|
2410
|
+
Hard link the **source** file to a safe **destination** (if possible).
|
|
2411
|
+
Otherwise, let the standard copy do the job.
|
|
2412
|
+
|
|
2413
|
+
**source** and/or **destination** may be File-like objects.
|
|
2414
|
+
|
|
2415
|
+
When working on a file, the operation is atomic. When working on a
|
|
2416
|
+
directory some restrictions apply (see :meth:`rawcp`)
|
|
2417
|
+
"""
|
|
2418
|
+
self.stderr('smartcp', source, destination)
|
|
2419
|
+
if not isinstance(source, str) or not isinstance(destination, str):
|
|
2420
|
+
return self.hybridcp(source, destination)
|
|
2421
|
+
source = self.path.expanduser(source)
|
|
2422
|
+
if not self.path.exists(source):
|
|
2423
|
+
if not silent:
|
|
2424
|
+
logger.error('Missing source %s', source)
|
|
2425
|
+
return False
|
|
2426
|
+
if self.filecocoon(destination):
|
|
2427
|
+
destination = self.path.expanduser(destination)
|
|
2428
|
+
if self.path.islink(source):
|
|
2429
|
+
# Solve the symbolic link: this may avoid a rawcp
|
|
2430
|
+
source = self.path.realpath(source)
|
|
2431
|
+
if (self.is_samefs(source, destination) and
|
|
2432
|
+
(self.allow_cross_users_links or self.usr_file(source))):
|
|
2433
|
+
tmp_destination = self.safe_fileaddsuffix(destination)
|
|
2434
|
+
if self.path.isdir(source):
|
|
2435
|
+
try:
|
|
2436
|
+
rc = self.hardlink(source, tmp_destination,
|
|
2437
|
+
link_threshold=smartcp_threshold, securecopy=False)
|
|
2438
|
+
except OSError as e:
|
|
2439
|
+
rc = self._smartcp_cross_users_links_fallback(
|
|
2440
|
+
source, destination,
|
|
2441
|
+
smartcp_threshold, silent, e, tmp_destination=tmp_destination
|
|
2442
|
+
)
|
|
2443
|
+
else:
|
|
2444
|
+
if rc:
|
|
2445
|
+
# Move fails if a directory already exists ; so be careful...
|
|
2446
|
+
with self.secure_directory_move(destination):
|
|
2447
|
+
rc = self.move(tmp_destination, destination)
|
|
2448
|
+
if not rc:
|
|
2449
|
+
logger.error('Cannot move the tmp directory to the final destination %s',
|
|
2450
|
+
destination)
|
|
2451
|
+
self.remove(tmp_destination) # Anyway, try to clean-up things
|
|
2452
|
+
else:
|
|
2453
|
+
logger.error('Cannot copy the data to the tmp directory %s', tmp_destination)
|
|
2454
|
+
self.remove(tmp_destination) # Anyway, try to clean-up things
|
|
2455
|
+
return rc
|
|
2456
|
+
else:
|
|
2457
|
+
try:
|
|
2458
|
+
rc = self.hardlink(source, tmp_destination,
|
|
2459
|
+
link_threshold=smartcp_threshold, securecopy=False)
|
|
2460
|
+
except OSError as e:
|
|
2461
|
+
rc = self._smartcp_cross_users_links_fallback(source, destination,
|
|
2462
|
+
smartcp_threshold, silent, e)
|
|
2463
|
+
else:
|
|
2464
|
+
rc = rc and self.move(tmp_destination, destination) # Move is atomic for a file
|
|
2465
|
+
# On some systems, the temporary file may remain (if the
|
|
2466
|
+
# destination's inode is identical to the tmp_destination's
|
|
2467
|
+
# inode). The following call to remove will remove leftovers.
|
|
2468
|
+
self.remove(tmp_destination)
|
|
2469
|
+
return rc
|
|
2470
|
+
else:
|
|
2471
|
+
rc = self.rawcp(source, destination) # Rawcp is atomic as much as possible
|
|
2472
|
+
if rc:
|
|
2473
|
+
if self.path.isdir(destination):
|
|
2474
|
+
for copiedfile in self.ffind(destination):
|
|
2475
|
+
if not self.path.islink(copiedfile): # This make no sense to chmod symlinks
|
|
2476
|
+
self.chmod(copiedfile, 0o444)
|
|
2477
|
+
else:
|
|
2478
|
+
self.readonly(destination)
|
|
2479
|
+
return rc
|
|
2480
|
+
else:
|
|
2481
|
+
logger.error('Could not create a cocoon for file %s', destination)
|
|
2482
|
+
return False
|
|
2483
|
+
|
|
2484
|
+
@fmtshcmd
|
|
2485
|
+
def cp(self, source, destination, intent='inout',
|
|
2486
|
+
smartcp=True, smartcp_threshold=0, silent=False):
|
|
2487
|
+
"""Copy the **source** file to a safe **destination**.
|
|
2488
|
+
|
|
2489
|
+
:param source: The source of data (either a path to file or a
|
|
2490
|
+
File-like object)
|
|
2491
|
+
:type source: str or File-like object
|
|
2492
|
+
:param destination: The destination of data (either a path to file or a
|
|
2493
|
+
File-like object)
|
|
2494
|
+
:type destination: str or File-like object
|
|
2495
|
+
:param str intent: 'in' for a read-only copy. 'inout' for a read-write copy
|
|
2496
|
+
(default: 'inout').
|
|
2497
|
+
:param bool smartcp: use :meth:`smartcp` as much as possible (default: *True*)
|
|
2498
|
+
:param int smartcp_threshold: Should smartcp be used, it will only be activated if
|
|
2499
|
+
the source file size is above *smartcp_threshold* Bytes.
|
|
2500
|
+
:param bool silent: do not complain on error (default: *False*).
|
|
2501
|
+
|
|
2502
|
+
It relies on :meth:`hybridcp`, :meth:`smartcp` or :meth:`rawcp`
|
|
2503
|
+
depending on **source**, **destination** and **intent**.
|
|
2504
|
+
|
|
2505
|
+
The fastest option should be used...
|
|
2506
|
+
"""
|
|
2507
|
+
self.stderr('cp', source, destination)
|
|
2508
|
+
if not isinstance(source, str) or not isinstance(destination, str):
|
|
2509
|
+
return self.hybridcp(source, destination, silent=silent)
|
|
2510
|
+
if not self.path.exists(source):
|
|
2511
|
+
if not silent:
|
|
2512
|
+
logger.error('Missing source %s', source)
|
|
2513
|
+
return False
|
|
2514
|
+
if smartcp and intent == 'in':
|
|
2515
|
+
return self.smartcp(source, destination,
|
|
2516
|
+
smartcp_threshold=smartcp_threshold, silent=silent)
|
|
2517
|
+
if self.filecocoon(destination):
|
|
2518
|
+
return self.rawcp(source, destination)
|
|
2519
|
+
else:
|
|
2520
|
+
logger.error('Could not create a cocoon for file %s', destination)
|
|
2521
|
+
return False
|
|
2522
|
+
|
|
2523
|
+
def glob(self, *args):
|
|
2524
|
+
"""Glob file system entries according to ``args``. Returns a list."""
|
|
2525
|
+
entries = []
|
|
2526
|
+
for entry in args:
|
|
2527
|
+
if entry.startswith(':'):
|
|
2528
|
+
entries.append(entry[1:])
|
|
2529
|
+
else:
|
|
2530
|
+
entries.extend(glob.glob(self.path.expanduser(entry)))
|
|
2531
|
+
return entries
|
|
2532
|
+
|
|
2533
|
+
def rmall(self, *args, **kw):
|
|
2534
|
+
"""Unlink the specified **args** objects with globbing."""
|
|
2535
|
+
rc = True
|
|
2536
|
+
for pname in args:
|
|
2537
|
+
for objpath in self.glob(pname):
|
|
2538
|
+
rc = self.remove(objpath, **kw) and rc
|
|
2539
|
+
|
|
2540
|
+
def safepath(self, thispath, safedirs):
|
|
2541
|
+
"""
|
|
2542
|
+
Boolean to check if **thispath** is a subpath of a **safedirs**
|
|
2543
|
+
with sufficient depth (or not a subpath at all)
|
|
2544
|
+
"""
|
|
2545
|
+
safe = True
|
|
2546
|
+
if len(thispath.split(self._os.sep)) < self._rmtreemin + 1:
|
|
2547
|
+
logger.warning('Unsafe starting point depth %s (min is %s)', thispath, self._rmtreemin)
|
|
2548
|
+
safe = False
|
|
2549
|
+
else:
|
|
2550
|
+
for safepack in safedirs:
|
|
2551
|
+
(safedir, d) = safepack
|
|
2552
|
+
rp = self.path.relpath(thispath, safedir)
|
|
2553
|
+
if not rp.startswith('..'):
|
|
2554
|
+
if len(rp.split(self._os.sep)) < d:
|
|
2555
|
+
logger.warning('Unsafe access to %s relative to %s', thispath, safedir)
|
|
2556
|
+
safe = False
|
|
2557
|
+
return safe
|
|
2558
|
+
|
|
2559
|
+
def rmsafe(self, pathlist, safedirs):
|
|
2560
|
+
"""
|
|
2561
|
+
Recursive unlinks of the specified **pathlist** objects (if safe according
|
|
2562
|
+
to :meth:`safepath`).
|
|
2563
|
+
"""
|
|
2564
|
+
ok = True
|
|
2565
|
+
if isinstance(pathlist, str):
|
|
2566
|
+
pathlist = [pathlist]
|
|
2567
|
+
for pname in pathlist:
|
|
2568
|
+
for entry in filter(lambda x: self.safepath(x, safedirs), self.glob(pname)):
|
|
2569
|
+
ok = self.remove(entry) and ok
|
|
2570
|
+
return ok
|
|
2571
|
+
|
|
2572
|
+
def _globcmd(self, cmd, args, **kw):
|
|
2573
|
+
"""Globbing files or directories as arguments before running ``cmd``."""
|
|
2574
|
+
cmd.extend([opt for opt in args if opt.startswith('-')])
|
|
2575
|
+
cmdlen = len(cmd)
|
|
2576
|
+
cmdargs = False
|
|
2577
|
+
globtries = [self.path.expanduser(x) for x in args if not x.startswith('-')]
|
|
2578
|
+
for pname in globtries:
|
|
2579
|
+
cmdargs = True
|
|
2580
|
+
cmd.extend(self.glob(pname))
|
|
2581
|
+
if cmdargs and len(cmd) == cmdlen:
|
|
2582
|
+
logger.warning('Could not find any matching pattern %s', globtries)
|
|
2583
|
+
return False
|
|
2584
|
+
else:
|
|
2585
|
+
kw.setdefault('ok', [0])
|
|
2586
|
+
return self.spawn(cmd, **kw)
|
|
2587
|
+
|
|
2588
|
+
@_kw2spawn
|
|
2589
|
+
def wc(self, *args, **kw):
|
|
2590
|
+
"""Word count on globbed files."""
|
|
2591
|
+
return self._globcmd(['wc'], args, **kw)
|
|
2592
|
+
|
|
2593
|
+
@_kw2spawn
|
|
2594
|
+
def ls(self, *args, **kw):
|
|
2595
|
+
"""Clone of the eponymous unix command."""
|
|
2596
|
+
return self._globcmd(['ls'], args, **kw)
|
|
2597
|
+
|
|
2598
|
+
@_kw2spawn
|
|
2599
|
+
def ll(self, *args, **kw):
|
|
2600
|
+
"""Clone of the eponymous unix alias (ls -l)."""
|
|
2601
|
+
kw['output'] = True
|
|
2602
|
+
llresult = self._globcmd(['ls', '-l'], args, **kw)
|
|
2603
|
+
if llresult:
|
|
2604
|
+
for lline in [x for x in llresult if not x.startswith('total')]:
|
|
2605
|
+
print(lline)
|
|
2606
|
+
else:
|
|
2607
|
+
return False
|
|
2608
|
+
|
|
2609
|
+
@_kw2spawn
|
|
2610
|
+
def dir(self, *args, **kw):
|
|
2611
|
+
"""Proxy to ``ls('-l')``."""
|
|
2612
|
+
return self._globcmd(['ls', '-l'], args, **kw)
|
|
2613
|
+
|
|
2614
|
+
@_kw2spawn
|
|
2615
|
+
def cat(self, *args, **kw):
|
|
2616
|
+
"""Clone of the eponymous unix command."""
|
|
2617
|
+
return self._globcmd(['cat'], args, **kw)
|
|
2618
|
+
|
|
2619
|
+
@fmtshcmd
|
|
2620
|
+
@_kw2spawn
|
|
2621
|
+
def diff(self, *args, **kw):
|
|
2622
|
+
"""Clone of the eponymous unix command."""
|
|
2623
|
+
kw.setdefault('ok', [0, 1])
|
|
2624
|
+
kw.setdefault('output', False)
|
|
2625
|
+
return self._globcmd(['cmp'], args, **kw)
|
|
2626
|
+
|
|
2627
|
+
@_kw2spawn
|
|
2628
|
+
def rmglob(self, *args, **kw):
|
|
2629
|
+
"""Wrapper of the shell's ``rm`` command through the :meth:`globcmd` method."""
|
|
2630
|
+
return self._globcmd(['rm'], args, **kw)
|
|
2631
|
+
|
|
2632
|
+
@fmtshcmd
|
|
2633
|
+
def move(self, source, destination):
|
|
2634
|
+
"""Move the ``source`` file or directory (using shutil).
|
|
2635
|
+
|
|
2636
|
+
:param str source: The source object (file, directory, ...)
|
|
2637
|
+
:param str destination: The destination object (file, directory, ...)
|
|
2638
|
+
"""
|
|
2639
|
+
self.stderr('move', source, destination)
|
|
2640
|
+
try:
|
|
2641
|
+
self._sh.move(source, destination)
|
|
2642
|
+
except Exception:
|
|
2643
|
+
logger.critical('Could not move <%s> to <%s>', source, destination)
|
|
2644
|
+
raise
|
|
2645
|
+
else:
|
|
2646
|
+
return True
|
|
2647
|
+
|
|
2648
|
+
@contextlib.contextmanager
|
|
2649
|
+
def secure_directory_move(self, destination):
|
|
2650
|
+
with self.lockdir_context(destination + '.vortex-lockdir', sloppy=True):
|
|
2651
|
+
do_cleanup = (isinstance(destination, str) and
|
|
2652
|
+
self.path.exists(destination))
|
|
2653
|
+
if do_cleanup:
|
|
2654
|
+
# Warning: Not an atomic portion of code (sorry)
|
|
2655
|
+
tmp_destination = self.safe_fileaddsuffix(destination)
|
|
2656
|
+
self.move(destination, tmp_destination)
|
|
2657
|
+
yield do_cleanup
|
|
2658
|
+
# End of none atomic part
|
|
2659
|
+
else:
|
|
2660
|
+
yield do_cleanup
|
|
2661
|
+
if do_cleanup:
|
|
2662
|
+
self.remove(tmp_destination)
|
|
2663
|
+
|
|
2664
|
+
@fmtshcmd
|
|
2665
|
+
def mv(self, source, destination):
|
|
2666
|
+
"""Move the ``source`` file or directory (using shutil or hybridcp).
|
|
2667
|
+
|
|
2668
|
+
:param source: The source object (file, directory, File-like object, ...)
|
|
2669
|
+
:param destination: The destination object (file, directory, File-like object, ...)
|
|
2670
|
+
"""
|
|
2671
|
+
self.stderr('mv', source, destination)
|
|
2672
|
+
if not isinstance(source, str) or not isinstance(destination, str):
|
|
2673
|
+
self.hybridcp(source, destination)
|
|
2674
|
+
if isinstance(source, str):
|
|
2675
|
+
return self.remove(source)
|
|
2676
|
+
else:
|
|
2677
|
+
return self.move(source, destination)
|
|
2678
|
+
|
|
2679
|
+
@_kw2spawn
|
|
2680
|
+
def mvglob(self, *args):
|
|
2681
|
+
"""Wrapper of the shell's ``mv`` command through the :meth:`globcmd` method."""
|
|
2682
|
+
return self._globcmd(['mv'], args)
|
|
2683
|
+
|
|
2684
|
+
def listdir(self, *args):
|
|
2685
|
+
"""Proxy to standard :mod:`os` directory listing function."""
|
|
2686
|
+
if not args:
|
|
2687
|
+
args = ('.',)
|
|
2688
|
+
self.stderr('listdir', *args)
|
|
2689
|
+
return self._os.listdir(self.path.expanduser(args[0]))
|
|
2690
|
+
|
|
2691
|
+
def pyls(self, *args):
|
|
2692
|
+
"""
|
|
2693
|
+
Proxy to globbing after removing any option. A bit like the
|
|
2694
|
+
:meth:`ls` method except that that shell's ``ls`` command is not actually
|
|
2695
|
+
called.
|
|
2696
|
+
"""
|
|
2697
|
+
rl = [x for x in args if not x.startswith('-')]
|
|
2698
|
+
if not rl:
|
|
2699
|
+
rl.append('*')
|
|
2700
|
+
self.stderr('pyls', *rl)
|
|
2701
|
+
return self.glob(*rl)
|
|
2702
|
+
|
|
2703
|
+
def ldirs(self, *args):
|
|
2704
|
+
"""
|
|
2705
|
+
Proxy to directories globbing after removing any option. A bit like the
|
|
2706
|
+
:meth:`ls` method except that that shell's ``ls`` command is not actually
|
|
2707
|
+
called.
|
|
2708
|
+
"""
|
|
2709
|
+
rl = [x for x in args if not x.startswith('-')]
|
|
2710
|
+
if not rl:
|
|
2711
|
+
rl.append('*')
|
|
2712
|
+
self.stderr('ldirs', *rl)
|
|
2713
|
+
return [x for x in self.glob(*rl) if self.path.isdir(x)]
|
|
2714
|
+
|
|
2715
|
+
@_kw2spawn
|
|
2716
|
+
def gzip(self, *args, **kw):
|
|
2717
|
+
"""Simple gzip compression of a file."""
|
|
2718
|
+
cmd = ['gzip', '-vf', args[0]]
|
|
2719
|
+
cmd.extend(args[1:])
|
|
2720
|
+
return self.spawn(cmd, **kw)
|
|
2721
|
+
|
|
2722
|
+
@_kw2spawn
|
|
2723
|
+
def gunzip(self, *args, **kw):
|
|
2724
|
+
"""Simple gunzip of a gzip-compressed file."""
|
|
2725
|
+
cmd = ['gunzip', args[0]]
|
|
2726
|
+
cmd.extend(args[1:])
|
|
2727
|
+
return self.spawn(cmd, **kw)
|
|
2728
|
+
|
|
2729
|
+
def is_tarfile(self, filename):
|
|
2730
|
+
"""Return a boolean according to the tar status of the **filename**."""
|
|
2731
|
+
return tarfile.is_tarfile(self.path.expanduser(filename))
|
|
2732
|
+
|
|
2733
|
+
def taropts(self, tarfile, opts, verbose=True, autocompress=True):
|
|
2734
|
+
"""Build a proper string sequence of tar options."""
|
|
2735
|
+
zopt = set(opts)
|
|
2736
|
+
if verbose:
|
|
2737
|
+
zopt.add('v')
|
|
2738
|
+
else:
|
|
2739
|
+
zopt.discard('v')
|
|
2740
|
+
if autocompress:
|
|
2741
|
+
if tarfile.endswith('gz'):
|
|
2742
|
+
# includes the conventional "*.tgz"
|
|
2743
|
+
zopt.add('z')
|
|
2744
|
+
else:
|
|
2745
|
+
zopt.discard('z')
|
|
2746
|
+
if tarfile.endswith('bz') or tarfile.endswith('bz2'):
|
|
2747
|
+
# includes the conventional "*.tbz"
|
|
2748
|
+
zopt.add('j')
|
|
2749
|
+
else:
|
|
2750
|
+
zopt.discard('j')
|
|
2751
|
+
return ''.join(zopt)
|
|
2752
|
+
|
|
2753
|
+
@_kw2spawn
|
|
2754
|
+
def tar(self, *args, **kw):
|
|
2755
|
+
"""Create a file archive (always c-something).
|
|
2756
|
+
|
|
2757
|
+
:example: ``self.tar('destination.tar', 'directory1', 'directory2')``
|
|
2758
|
+
"""
|
|
2759
|
+
opts = self.taropts(args[0], 'cf', kw.pop('verbose', True), kw.pop('autocompress', True))
|
|
2760
|
+
cmd = ['tar', opts, args[0]]
|
|
2761
|
+
cmd.extend(self.glob(*args[1:]))
|
|
2762
|
+
return self.spawn(cmd, **kw)
|
|
2763
|
+
|
|
2764
|
+
@_kw2spawn
|
|
2765
|
+
def untar(self, *args, **kw):
|
|
2766
|
+
"""Unpack a file archive (always x-something).
|
|
2767
|
+
|
|
2768
|
+
:example: ``self.untar('source.tar')``
|
|
2769
|
+
:example: ``self.untar('source.tar', 'to_untar1', 'to_untar2')``
|
|
2770
|
+
"""
|
|
2771
|
+
opts = self.taropts(args[0], 'xf', kw.pop('verbose', True), kw.pop('autocompress', True))
|
|
2772
|
+
cmd = ['tar', opts, args[0]]
|
|
2773
|
+
cmd.extend(args[1:])
|
|
2774
|
+
return self.spawn(cmd, **kw)
|
|
2775
|
+
|
|
2776
|
+
def smartuntar(self, source, destination, **kw):
|
|
2777
|
+
"""Unpack a file archive in the appropriate directory.
|
|
2778
|
+
|
|
2779
|
+
If **uniquelevel_ignore** is *True* (default: *False*) and the tar file
|
|
2780
|
+
contains only one directory, it will be extracted and renamed to
|
|
2781
|
+
**destination**. Otherwise, **destination** will be created and the tar's
|
|
2782
|
+
content will be extracted inside it.
|
|
2783
|
+
|
|
2784
|
+
This is done in a relatively safe way since it is checked that no existing
|
|
2785
|
+
files/directories are overwritten.
|
|
2786
|
+
"""
|
|
2787
|
+
uniquelevel_ignore = kw.pop('uniquelevel_ignore', False)
|
|
2788
|
+
fullsource = self.path.realpath(source)
|
|
2789
|
+
self.mkdir(destination)
|
|
2790
|
+
loctmp = tempfile.mkdtemp(prefix='untar_', dir=destination)
|
|
2791
|
+
with self.cdcontext(loctmp, clean_onexit=True):
|
|
2792
|
+
output_setting = kw.pop('output', True)
|
|
2793
|
+
output_txt = self.untar(fullsource, output=output_setting, **kw)
|
|
2794
|
+
if output_setting and output_txt:
|
|
2795
|
+
logger.info('Untar command output:\n%s', '\n'.join(output_txt))
|
|
2796
|
+
unpacked = self.glob('*')
|
|
2797
|
+
unpacked_prefix = '.'
|
|
2798
|
+
# If requested, ignore the first level of directory
|
|
2799
|
+
if (uniquelevel_ignore and len(unpacked) == 1 and
|
|
2800
|
+
self.path.isdir(self.path.join(unpacked[0]))):
|
|
2801
|
+
unpacked_prefix = unpacked[0]
|
|
2802
|
+
logger.info('Moving contents one level up: %s', unpacked_prefix)
|
|
2803
|
+
with self.cdcontext(unpacked_prefix):
|
|
2804
|
+
unpacked = self.glob('*')
|
|
2805
|
+
for untaritem in unpacked:
|
|
2806
|
+
itemtarget = self.path.join('..', self.path.basename(untaritem))
|
|
2807
|
+
if self.path.exists(itemtarget):
|
|
2808
|
+
logger.error('Some previous item exists before untar [%s]', untaritem)
|
|
2809
|
+
else:
|
|
2810
|
+
self.mv(self.path.join(unpacked_prefix, untaritem),
|
|
2811
|
+
itemtarget)
|
|
2812
|
+
return unpacked
|
|
2813
|
+
|
|
2814
|
+
def is_tarname(self, objname):
|
|
2815
|
+
"""Check if a ``objname`` is a string with ``.tar`` suffix."""
|
|
2816
|
+
return isinstance(objname, str) and (objname.endswith('.tar') or
|
|
2817
|
+
objname.endswith('.tar.gz') or
|
|
2818
|
+
objname.endswith('.tgz') or
|
|
2819
|
+
objname.endswith('.tar.bz2') or
|
|
2820
|
+
objname.endswith('.tbz'))
|
|
2821
|
+
|
|
2822
|
+
def tarname_radix(self, objname):
|
|
2823
|
+
"""Remove any ``.tar`` specific suffix."""
|
|
2824
|
+
if not self.is_tarname(objname):
|
|
2825
|
+
return objname
|
|
2826
|
+
radix = self.path.splitext(objname)[0]
|
|
2827
|
+
if radix.endswith('.tar'):
|
|
2828
|
+
radix = radix[:-4]
|
|
2829
|
+
return radix
|
|
2830
|
+
|
|
2831
|
+
def tarname_splitext(self, objname):
|
|
2832
|
+
"""Like os.path.splitext, but for tar names (e.g. might return ``.tar.gz``)."""
|
|
2833
|
+
if not self.is_tarname(objname):
|
|
2834
|
+
return (objname, '')
|
|
2835
|
+
radix = self.tarname_radix(objname)
|
|
2836
|
+
ext = objname.replace(radix, '')
|
|
2837
|
+
return (radix, ext)
|
|
2838
|
+
|
|
2839
|
+
@fmtshcmd
|
|
2840
|
+
def forcepack(self, source, destination=None): # @UnusedVariable
|
|
2841
|
+
"""Return the path to a "packed" data (i.e. a ready to send single file)."""
|
|
2842
|
+
return source
|
|
2843
|
+
|
|
2844
|
+
@fmtshcmd
|
|
2845
|
+
def forceunpack(self, source): # @UnusedVariable
|
|
2846
|
+
"""Unpack the data "inplace" (if needed, depending on the format)."""
|
|
2847
|
+
return True
|
|
2848
|
+
|
|
2849
|
+
def blind_dump(self, gateway, obj, destination, bytesdump=False, **opts):
|
|
2850
|
+
"""
|
|
2851
|
+
Use **gateway** for a blind dump of the **obj** in file **destination**,
|
|
2852
|
+
(either a file descriptor or a filename).
|
|
2853
|
+
"""
|
|
2854
|
+
rc = None
|
|
2855
|
+
if hasattr(destination, 'write'):
|
|
2856
|
+
rc = gateway.dump(obj, destination, **opts)
|
|
2857
|
+
else:
|
|
2858
|
+
if self.filecocoon(destination):
|
|
2859
|
+
with open(self.path.expanduser(destination),
|
|
2860
|
+
'w' + ('b' if bytesdump else '')) as fd:
|
|
2861
|
+
rc = gateway.dump(obj, fd, **opts)
|
|
2862
|
+
return rc
|
|
2863
|
+
|
|
2864
|
+
def pickle_dump(self, obj, destination, **opts):
|
|
2865
|
+
"""
|
|
2866
|
+
Dump a pickled representation of specified **obj** in file **destination**,
|
|
2867
|
+
(either a file descriptor or a filename).
|
|
2868
|
+
"""
|
|
2869
|
+
return self.blind_dump(pickle, obj, destination, bytesdump=True, **opts)
|
|
2870
|
+
|
|
2871
|
+
def json_dump(self, obj, destination, **opts):
|
|
2872
|
+
"""
|
|
2873
|
+
Dump a json representation of specified **obj** in file **destination**,
|
|
2874
|
+
(either a file descriptor or a filename).
|
|
2875
|
+
"""
|
|
2876
|
+
return self.blind_dump(json, obj, destination, **opts)
|
|
2877
|
+
|
|
2878
|
+
def blind_load(self, source, gateway, bytesload=False):
|
|
2879
|
+
"""
|
|
2880
|
+
Use **gateway** for a blind load the representation stored in file **source**,
|
|
2881
|
+
(either a file descriptor or a filename).
|
|
2882
|
+
"""
|
|
2883
|
+
if hasattr(source, 'read'):
|
|
2884
|
+
obj = gateway.load(source)
|
|
2885
|
+
else:
|
|
2886
|
+
with open(self.path.expanduser(source),
|
|
2887
|
+
'r' + ('b' if bytesload else '')) as fd:
|
|
2888
|
+
obj = gateway.load(fd)
|
|
2889
|
+
return obj
|
|
2890
|
+
|
|
2891
|
+
def pickle_load(self, source):
|
|
2892
|
+
"""
|
|
2893
|
+
Load from a pickled representation stored in file **source**,
|
|
2894
|
+
(either a file descriptor or a filename).
|
|
2895
|
+
"""
|
|
2896
|
+
return self.blind_load(source, gateway=pickle, bytesload=True)
|
|
2897
|
+
|
|
2898
|
+
def json_load(self, source):
|
|
2899
|
+
"""
|
|
2900
|
+
Load from a json representation stored in file **source**,
|
|
2901
|
+
(either a file descriptor or a filename).
|
|
2902
|
+
"""
|
|
2903
|
+
return self.blind_load(source, gateway=json)
|
|
2904
|
+
|
|
2905
|
+
def pickle_clone(self, obj):
|
|
2906
|
+
"""Clone an object (**obj**) through pickling / unpickling."""
|
|
2907
|
+
return pickle.loads(pickle.dumps(obj))
|
|
2908
|
+
|
|
2909
|
+
def utlines(self, *args):
|
|
2910
|
+
"""Return number of significant code or configuration lines in specified directories."""
|
|
2911
|
+
lookfiles = [
|
|
2912
|
+
x for x in self.ffind(*args)
|
|
2913
|
+
if self.path.splitext[1] in ['.py', '.ini', '.tpl', '.rst']
|
|
2914
|
+
]
|
|
2915
|
+
return len([
|
|
2916
|
+
x for x in self.cat(*lookfiles)
|
|
2917
|
+
if re.search(r'\S', x) and re.search(r'[^\'\"\)\],\s]', x)
|
|
2918
|
+
])
|
|
2919
|
+
|
|
2920
|
+
def _signal_intercept_init(self):
|
|
2921
|
+
"""Initialise the signal handler object (but do not activate it)."""
|
|
2922
|
+
self._sighandler = SignalInterruptHandler(emitlogs=False)
|
|
2923
|
+
|
|
2924
|
+
def signal_intercept_on(self):
|
|
2925
|
+
"""Activate the signal's catching.
|
|
2926
|
+
|
|
2927
|
+
See :class:`bronx.system.interrupt.SignalInterruptHandler` documentation.
|
|
2928
|
+
"""
|
|
2929
|
+
self._sighandler.activate()
|
|
2930
|
+
|
|
2931
|
+
def signal_intercept_off(self):
|
|
2932
|
+
"""Deactivate the signal's catching.
|
|
2933
|
+
|
|
2934
|
+
See :class:`bronx.system.interrupt.SignalInterruptHandler` documentation.
|
|
2935
|
+
"""
|
|
2936
|
+
self._sighandler.deactivate()
|
|
2937
|
+
|
|
2938
|
+
_LDD_REGEX = re.compile(r'^\s*([^\s]+)\s+=>\s*(?:([^\s]+)\s+\(0x.+\)|not found)$')
|
|
2939
|
+
|
|
2940
|
+
def ldd(self, filename):
|
|
2941
|
+
"""Call ldd on **filename**.
|
|
2942
|
+
|
|
2943
|
+
Return the mapping between the library name and its physical path.
|
|
2944
|
+
"""
|
|
2945
|
+
if self.path.isfile(filename):
|
|
2946
|
+
ldd_out = self.spawn(('ldd', filename))
|
|
2947
|
+
libs = dict()
|
|
2948
|
+
for ldd_match in [self._LDD_REGEX.match(l) for l in ldd_out]:
|
|
2949
|
+
if ldd_match is not None:
|
|
2950
|
+
libs[ldd_match.group(1)] = ldd_match.group(2) or None
|
|
2951
|
+
return libs
|
|
2952
|
+
else:
|
|
2953
|
+
raise ValueError('{} is not a regular file'.format(filename))
|
|
2954
|
+
|
|
2955
|
+
def generic_compress(self, pipelinedesc, source, destination=None):
|
|
2956
|
+
"""Compress a file using the :class:`CompressionPipeline` class.
|
|
2957
|
+
|
|
2958
|
+
See the :class:`CompressionPipeline` class documentation for more details.
|
|
2959
|
+
|
|
2960
|
+
:example: "generic_compress('bzip2', 'toto')" will create a toto.bz2 file.
|
|
2961
|
+
"""
|
|
2962
|
+
cp = CompressionPipeline(self, pipelinedesc)
|
|
2963
|
+
if destination is None:
|
|
2964
|
+
if isinstance(source, str):
|
|
2965
|
+
destination = source + cp.suffix
|
|
2966
|
+
else:
|
|
2967
|
+
raise ValueError("If destination is omitted, source must be a filename.")
|
|
2968
|
+
return cp.compress2file(source, destination)
|
|
2969
|
+
|
|
2970
|
+
def generic_uncompress(self, pipelinedesc, source, destination=None):
|
|
2971
|
+
"""Uncompress a file using the :class:`CompressionPipeline` class.
|
|
2972
|
+
|
|
2973
|
+
See the :class:`CompressionPipeline` class documentation for more details.
|
|
2974
|
+
|
|
2975
|
+
:example: "generic_uncompress('bzip2', 'toto.bz2')" will create a toto file.
|
|
2976
|
+
"""
|
|
2977
|
+
cp = CompressionPipeline(self, pipelinedesc)
|
|
2978
|
+
if destination is None:
|
|
2979
|
+
if isinstance(source, str):
|
|
2980
|
+
if source.endswith(cp.suffix):
|
|
2981
|
+
destination = source[:-len(cp.suffix)]
|
|
2982
|
+
else:
|
|
2983
|
+
raise ValueError("Source do not exhibit the appropriate suffix ({:s})".format(cp.suffix))
|
|
2984
|
+
else:
|
|
2985
|
+
raise ValueError("If destination is omitted, source must be a filename.")
|
|
2986
|
+
return cp.file2uncompress(source, destination)
|
|
2987
|
+
|
|
2988
|
+
def find_mount_point(self, path):
|
|
2989
|
+
"""Return the mount point of *path*.
|
|
2990
|
+
|
|
2991
|
+
:param str path: path where to look for a mount point
|
|
2992
|
+
:return: the path to the mount point
|
|
2993
|
+
:rtype: str
|
|
2994
|
+
"""
|
|
2995
|
+
if not self._os.path.exists(path):
|
|
2996
|
+
logger.warning('Path does not exist: <%s>', path)
|
|
2997
|
+
|
|
2998
|
+
path = self._os.path.abspath(path)
|
|
2999
|
+
while not self._os.path.ismount(path):
|
|
3000
|
+
path = self._os.path.dirname(path)
|
|
3001
|
+
|
|
3002
|
+
return path
|
|
3003
|
+
|
|
3004
|
+
def _lockdir_create(self, ldir, blocking=False, timeout=300, sleeptime=2):
|
|
3005
|
+
"""Pseudo-lock mechanism based on atomic directory creation: acquire lock.
|
|
3006
|
+
|
|
3007
|
+
:param str ldir: The target directory that acts as a lock
|
|
3008
|
+
:param bool blocking: Block (at most **timeout** seconds) until the
|
|
3009
|
+
lock can be acquired
|
|
3010
|
+
:param float timeout: Block at most timeout seconds (if **blocking** is True)
|
|
3011
|
+
:param float sleeptime: When blocking, wait **sleeptime** seconds between to
|
|
3012
|
+
attempts to acquire the lock.
|
|
3013
|
+
"""
|
|
3014
|
+
rc = None
|
|
3015
|
+
t0 = time.time()
|
|
3016
|
+
while rc is None or (not rc and blocking and time.time() - t0 < timeout):
|
|
3017
|
+
if rc is not None:
|
|
3018
|
+
self.sleep(sleeptime)
|
|
3019
|
+
try:
|
|
3020
|
+
# Important note: os' original mkdir function is used on purpose
|
|
3021
|
+
# since we need to get an error if the target directory already
|
|
3022
|
+
# exists
|
|
3023
|
+
self._os.mkdir(ldir)
|
|
3024
|
+
except FileExistsError as os_e:
|
|
3025
|
+
rc = False
|
|
3026
|
+
else:
|
|
3027
|
+
rc = True
|
|
3028
|
+
return rc
|
|
3029
|
+
|
|
3030
|
+
def _lockdir_destroy(self, ldir):
|
|
3031
|
+
"""Pseudo-lock mechanism based on atomic directory creation: release lock.
|
|
3032
|
+
|
|
3033
|
+
:param str ldir: The target directory that acts as a lock
|
|
3034
|
+
"""
|
|
3035
|
+
try:
|
|
3036
|
+
self.rmdir(ldir)
|
|
3037
|
+
except FileNotFoundError:
|
|
3038
|
+
logger.warning("'%s' did not exists... that's odd", ldir)
|
|
3039
|
+
|
|
3040
|
+
@contextlib.contextmanager
|
|
3041
|
+
def lockdir_context(self, ldir,
|
|
3042
|
+
sloppy=False, timeout=120, sleeptime_min=0.1, sleeptime_max=0.3):
|
|
3043
|
+
"""Try to acquire a lock directory and after that remove it.
|
|
3044
|
+
|
|
3045
|
+
:param bool sloppy: If the lock can be acquired after *timeout* second, go on anyway
|
|
3046
|
+
:param float timeout: Block at most timeout seconds
|
|
3047
|
+
:param float sleeptime_min: When blocking, wait at least **sleeptime_min** seconds
|
|
3048
|
+
between to attempts to acquire the lock.
|
|
3049
|
+
:param float sleeptime_max: When blocking, wait at most **sleeptime_max** seconds
|
|
3050
|
+
between to attempts to acquire the lock.
|
|
3051
|
+
"""
|
|
3052
|
+
sleeptime = sleeptime_min + (sleeptime_max - sleeptime_min) * random.random()
|
|
3053
|
+
self.filecocoon(ldir)
|
|
3054
|
+
rc = self._lockdir_create(ldir, blocking=True, timeout=timeout, sleeptime=sleeptime)
|
|
3055
|
+
try:
|
|
3056
|
+
if not rc:
|
|
3057
|
+
msg = "Could not acquire lockdir < {:s} >. Already exists.".format(ldir)
|
|
3058
|
+
if sloppy:
|
|
3059
|
+
logger.warning(msg + '.. but going on.')
|
|
3060
|
+
else:
|
|
3061
|
+
raise OSError(msg)
|
|
3062
|
+
yield
|
|
3063
|
+
finally:
|
|
3064
|
+
if rc or sloppy:
|
|
3065
|
+
self._lockdir_destroy(ldir)
|
|
3066
|
+
|
|
3067
|
+
@property
|
|
3068
|
+
def _appwide_lockbase(self):
|
|
3069
|
+
"""Compute the path to the application wide locks base directory."""
|
|
3070
|
+
if self.glove is not None:
|
|
3071
|
+
myglove = self.glove
|
|
3072
|
+
rcdir = myglove.configrc
|
|
3073
|
+
lockdir = self.path.join(rcdir,
|
|
3074
|
+
'appwide_locks',
|
|
3075
|
+
'{0.vapp:s}-{0.vconf:s}'.format(myglove))
|
|
3076
|
+
self.mkdir(lockdir)
|
|
3077
|
+
return lockdir
|
|
3078
|
+
else:
|
|
3079
|
+
raise RuntimeError("A glove must be defined")
|
|
3080
|
+
|
|
3081
|
+
def _appwide_lockdir_path(self, label):
|
|
3082
|
+
"""Compute the path to the lock directory."""
|
|
3083
|
+
return self.path.join(self._appwide_lockbase, label)
|
|
3084
|
+
|
|
3085
|
+
def appwide_lock(self, label, blocking=False, timeout=300, sleeptime=2):
|
|
3086
|
+
"""Pseudo-lock mechanism based on atomic directory creation: acquire lock.
|
|
3087
|
+
|
|
3088
|
+
The lock is located in a directory that depends on the vapp and vconf
|
|
3089
|
+
attributes of the current glove. The user must provide a **label** that
|
|
3090
|
+
helps to identify the lock purpose (it may include the xpid, ...).
|
|
3091
|
+
|
|
3092
|
+
:param str label: The name of the desired lock
|
|
3093
|
+
:param bool blocking: Block (at most **timeout** seconds) until the
|
|
3094
|
+
lock can be acquired
|
|
3095
|
+
:param float timeout: Block at most timeout seconds (if **blocking** is True)
|
|
3096
|
+
:param float sleeptime: When blocking, wait **sleeptime** seconds between to
|
|
3097
|
+
attempts to acquire the lock.
|
|
3098
|
+
"""
|
|
3099
|
+
ldir = self._appwide_lockdir_path(label)
|
|
3100
|
+
return self._lockdir_create(ldir,
|
|
3101
|
+
blocking=blocking,
|
|
3102
|
+
timeout=timeout,
|
|
3103
|
+
sleeptime=sleeptime)
|
|
3104
|
+
|
|
3105
|
+
def appwide_unlock(self, label):
|
|
3106
|
+
"""Pseudo-lock mechanism based on atomic directory creation: release lock.
|
|
3107
|
+
|
|
3108
|
+
:param str label: The name of the desired lock
|
|
3109
|
+
"""
|
|
3110
|
+
ldir = self._appwide_lockdir_path(label)
|
|
3111
|
+
self._lockdir_destroy(ldir)
|
|
3112
|
+
|
|
3113
|
+
|
|
3114
|
+
class Python34:
|
|
3115
|
+
"""Python features starting from version 3.4."""
|
|
3116
|
+
|
|
3117
|
+
def netcdf_diff(self, netcdf1, netcdf2, **kw):
|
|
3118
|
+
"""Difference between two NetCDF files.
|
|
3119
|
+
|
|
3120
|
+
Use the netCDF4 package to do so...
|
|
3121
|
+
|
|
3122
|
+
:param netcdf1: first file to compare
|
|
3123
|
+
:param netcdf2: second file to compare
|
|
3124
|
+
"""
|
|
3125
|
+
|
|
3126
|
+
# Optional, netcdf comparison tool
|
|
3127
|
+
b_netcdf_checker = ExternalCodeImportChecker('netdcf')
|
|
3128
|
+
with b_netcdf_checker as npregister:
|
|
3129
|
+
from bronx.datagrip import netcdf as b_netcdf
|
|
3130
|
+
|
|
3131
|
+
if b_netcdf_checker.is_available():
|
|
3132
|
+
# Unfortunately, the netCDF4 package seems to leak memory,
|
|
3133
|
+
# using multiprocessing to mitigate this mess :-(
|
|
3134
|
+
|
|
3135
|
+
def _compare_function(nc1, nc2, outcome):
|
|
3136
|
+
"""Function started by the subprocess."""
|
|
3137
|
+
outcome.value = int(b_netcdf.netcdf_file_diff(nc1, nc2))
|
|
3138
|
+
|
|
3139
|
+
rc = multiprocessing.Value('i', 0)
|
|
3140
|
+
p = multiprocessing.Process(target=_compare_function,
|
|
3141
|
+
args=(netcdf1, netcdf2, rc))
|
|
3142
|
+
p.start()
|
|
3143
|
+
p.join()
|
|
3144
|
+
return bool(rc.value)
|
|
3145
|
+
else:
|
|
3146
|
+
logger.error("Unable to load the 'bronx.datagrip.netcdf' package. " +
|
|
3147
|
+
"The netcdf library and/or 'netCDF4' python package are probably missing.")
|
|
3148
|
+
return False
|
|
3149
|
+
|
|
3150
|
+
# Let's make this method compatible with fmtshcmd...
|
|
3151
|
+
netcdf_diff.func_extern = True
|
|
3152
|
+
|
|
3153
|
+
|
|
3154
|
+
_python34_fp = footprints.Footprint(info='An abstract footprint to be used with the Python34 Mixin',
|
|
3155
|
+
only=dict(
|
|
3156
|
+
after_python=PythonSimplifiedVersion('3.4.0')
|
|
3157
|
+
))
|
|
3158
|
+
|
|
3159
|
+
|
|
3160
|
+
class Garbage(OSExtended):
|
|
3161
|
+
"""
|
|
3162
|
+
Default system class for weird systems.
|
|
3163
|
+
|
|
3164
|
+
Hopefully an extended system will be loaded later on...
|
|
3165
|
+
"""
|
|
3166
|
+
|
|
3167
|
+
_abstract = True
|
|
3168
|
+
_footprint = dict(
|
|
3169
|
+
info = 'Garbage base system',
|
|
3170
|
+
attr = dict(
|
|
3171
|
+
sysname = dict(
|
|
3172
|
+
outcast = ['Linux', 'Darwin', 'UnitTestLinux', 'UnitTestable']
|
|
3173
|
+
)
|
|
3174
|
+
),
|
|
3175
|
+
priority = dict(
|
|
3176
|
+
level = footprints.priorities.top.DEFAULT
|
|
3177
|
+
)
|
|
3178
|
+
)
|
|
3179
|
+
|
|
3180
|
+
def __init__(self, *args, **kw):
|
|
3181
|
+
"""Gateway to parent method after debug logging."""
|
|
3182
|
+
logger.debug('Garbage system init %s', self.__class__)
|
|
3183
|
+
super().__init__(*args, **kw)
|
|
3184
|
+
|
|
3185
|
+
|
|
3186
|
+
class Garbage34p(Garbage, Python34):
|
|
3187
|
+
"""Default system class for weird systems with python version >= 3.4"""
|
|
3188
|
+
|
|
3189
|
+
_footprint = [
|
|
3190
|
+
_python34_fp,
|
|
3191
|
+
dict(info = 'Garbage base system withh a blazing Python version')
|
|
3192
|
+
]
|
|
3193
|
+
|
|
3194
|
+
|
|
3195
|
+
class Linux(OSExtended):
|
|
3196
|
+
"""Abstract default system class for most Linux based systems."""
|
|
3197
|
+
|
|
3198
|
+
_abstract = True
|
|
3199
|
+
_footprint = dict(
|
|
3200
|
+
info = 'Abstract Linux base system',
|
|
3201
|
+
attr = dict(
|
|
3202
|
+
sysname = dict(
|
|
3203
|
+
values = ['Linux']
|
|
3204
|
+
)
|
|
3205
|
+
)
|
|
3206
|
+
)
|
|
3207
|
+
|
|
3208
|
+
def __init__(self, *args, **kw):
|
|
3209
|
+
"""
|
|
3210
|
+
Before going through parent initialisation (see :class:`OSExtended`),
|
|
3211
|
+
pickle this attributes:
|
|
3212
|
+
|
|
3213
|
+
* **psopts** - as default option for the ps command (default: ``-w -f -a``).
|
|
3214
|
+
"""
|
|
3215
|
+
logger.debug('Linux system init %s', self.__class__)
|
|
3216
|
+
self._psopts = kw.pop('psopts', ['-w', '-f', '-a'])
|
|
3217
|
+
super().__init__(*args, **kw)
|
|
3218
|
+
self.__dict__['_cpusinfo'] = LinuxCpusInfo()
|
|
3219
|
+
try:
|
|
3220
|
+
self.__dict__['_numainfo'] = LibNumaNodesInfo()
|
|
3221
|
+
except (OSError, NotImplementedError):
|
|
3222
|
+
# On very few Linux systems, libnuma is not available...
|
|
3223
|
+
pass
|
|
3224
|
+
self.__dict__['_memoryinfo'] = LinuxMemInfo()
|
|
3225
|
+
self.__dict__['_netstatsinfo'] = LinuxNetstats()
|
|
3226
|
+
|
|
3227
|
+
@property
|
|
3228
|
+
def realkind(self):
|
|
3229
|
+
return 'linux'
|
|
3230
|
+
|
|
3231
|
+
def cpus_ids_per_blocks(self, blocksize=1, topology='raw', hexmask=False):
|
|
3232
|
+
"""Get the list of CPUs IDs for nicely ordered for subsequent binding.
|
|
3233
|
+
|
|
3234
|
+
:param int blocksize: the number of thread consumed by one task
|
|
3235
|
+
:param str topology: The task distribution scheme
|
|
3236
|
+
:param bool hexmask: Return a list of CPU masks in hexadecimal
|
|
3237
|
+
"""
|
|
3238
|
+
if topology.startswith('numa'):
|
|
3239
|
+
if topology.endswith('_discardsmt'):
|
|
3240
|
+
topology = topology[:-11]
|
|
3241
|
+
smtlayout = None
|
|
3242
|
+
else:
|
|
3243
|
+
smtlayout = self.cpus_info.physical_cores_smtthreads
|
|
3244
|
+
try:
|
|
3245
|
+
cpulist = getattr(self.numa_info,
|
|
3246
|
+
topology + '_cpulist')(blocksize,
|
|
3247
|
+
smtlayout=smtlayout)
|
|
3248
|
+
except AttributeError:
|
|
3249
|
+
raise ValueError('Unknown topology ({:s}).'.format(topology))
|
|
3250
|
+
else:
|
|
3251
|
+
try:
|
|
3252
|
+
cpulist = getattr(self.cpus_info, topology + '_cpulist')(blocksize)
|
|
3253
|
+
except AttributeError:
|
|
3254
|
+
raise ValueError('Unknown topology ({:s}).'.format(topology))
|
|
3255
|
+
cpulist = list(cpulist)
|
|
3256
|
+
cpulist = [[cpulist[(taskid * blocksize + i)]
|
|
3257
|
+
for i in range(blocksize)]
|
|
3258
|
+
for taskid in range(len(cpulist) // blocksize)]
|
|
3259
|
+
if hexmask:
|
|
3260
|
+
cpulist = [hex(sum([1 << i for i in item])) for item in cpulist]
|
|
3261
|
+
return cpulist
|
|
3262
|
+
|
|
3263
|
+
def cpus_ids_dispenser(self, topology='raw'):
|
|
3264
|
+
"""Get a dispenser of CPUs IDs for nicely ordered for subsequent binding.
|
|
3265
|
+
|
|
3266
|
+
:param str topology: The task distribution scheme
|
|
3267
|
+
"""
|
|
3268
|
+
if topology.startswith('numa'):
|
|
3269
|
+
if topology.endswith('_discardsmt'):
|
|
3270
|
+
topology = topology[:-11]
|
|
3271
|
+
smtlayout = None
|
|
3272
|
+
else:
|
|
3273
|
+
smtlayout = self.cpus_info.physical_cores_smtthreads
|
|
3274
|
+
try:
|
|
3275
|
+
cpudisp = getattr(self.numa_info,
|
|
3276
|
+
topology + '_cpu_dispenser')(smtlayout=smtlayout)
|
|
3277
|
+
except AttributeError:
|
|
3278
|
+
raise ValueError('Unknown topology ({:s}).'.format(topology))
|
|
3279
|
+
else:
|
|
3280
|
+
try:
|
|
3281
|
+
cpudisp = getattr(self.cpus_info, topology + '_cpu_dispenser')()
|
|
3282
|
+
except AttributeError:
|
|
3283
|
+
raise ValueError('Unknown topology ({:s}).'.format(topology))
|
|
3284
|
+
return cpudisp
|
|
3285
|
+
|
|
3286
|
+
def cpus_affinity_get(self, taskid, blocksize=1, topology='socketpacked', method='taskset'):
|
|
3287
|
+
"""Get the necessary command/environment to set the CPUs affinity.
|
|
3288
|
+
|
|
3289
|
+
:param int taskid: the task number
|
|
3290
|
+
:param int blocksize: the number of thread consumed by one task
|
|
3291
|
+
:param str method: The binding method
|
|
3292
|
+
:param str topology: The task distribution scheme
|
|
3293
|
+
:return: A 3-elements tuple. (bool: BindingPossible,
|
|
3294
|
+
list: Starting command prefix, dict: Environment update)
|
|
3295
|
+
"""
|
|
3296
|
+
if method not in ('taskset', 'gomp', 'omp', 'ompverbose'):
|
|
3297
|
+
raise ValueError('Unknown binding method ({:s}).'.format(method))
|
|
3298
|
+
if method == 'taskset':
|
|
3299
|
+
if not self.which('taskset'):
|
|
3300
|
+
logger.warning("The taskset is program is missing. Going on without binding.")
|
|
3301
|
+
return (False, list(), dict())
|
|
3302
|
+
cpulist = self.cpus_ids_per_blocks(blocksize=blocksize, topology=topology)
|
|
3303
|
+
cpus = cpulist[taskid % len(cpulist)]
|
|
3304
|
+
cmdl = list()
|
|
3305
|
+
env = dict()
|
|
3306
|
+
if method == 'taskset':
|
|
3307
|
+
cmdl += ['taskset', '--cpu-list', ','.join([str(c) for c in cpus])]
|
|
3308
|
+
elif method == 'gomp':
|
|
3309
|
+
env['GOMP_CPU_AFFINITY'] = ' '.join([str(c) for c in cpus])
|
|
3310
|
+
elif method.startswith('omp'):
|
|
3311
|
+
env['OMP_PLACES'] = ','.join(['{{{:d}}}'.format(c) for c in cpus])
|
|
3312
|
+
if method.endswith('verbose'):
|
|
3313
|
+
env['OMP_DISPLAY_ENV'] = 'TRUE'
|
|
3314
|
+
env['OMP_DISPLAY_AFFINITY'] = 'TRUE'
|
|
3315
|
+
return (True, cmdl, env)
|
|
3316
|
+
|
|
3317
|
+
|
|
3318
|
+
class Linux34p(Linux, Python34):
|
|
3319
|
+
"""Linux system with python version >= 3.4"""
|
|
3320
|
+
|
|
3321
|
+
_footprint = [
|
|
3322
|
+
_python34_fp,
|
|
3323
|
+
dict(info = 'Linux based system with a blazing Python version')
|
|
3324
|
+
]
|
|
3325
|
+
|
|
3326
|
+
|
|
3327
|
+
class LinuxDebug(Linux34p):
|
|
3328
|
+
"""Special system class for crude debugging on Linux based systems."""
|
|
3329
|
+
|
|
3330
|
+
_footprint = dict(
|
|
3331
|
+
info = 'Linux debug system',
|
|
3332
|
+
attr = dict(
|
|
3333
|
+
version = dict(
|
|
3334
|
+
optional = False,
|
|
3335
|
+
values = ['dbug', 'debug'],
|
|
3336
|
+
remap = dict(
|
|
3337
|
+
dbug = 'debug'
|
|
3338
|
+
)
|
|
3339
|
+
)
|
|
3340
|
+
)
|
|
3341
|
+
)
|
|
3342
|
+
|
|
3343
|
+
def __init__(self, *args, **kw):
|
|
3344
|
+
"""Gateway to parent method after debug logging."""
|
|
3345
|
+
logger.debug('LinuxDebug system init %s', self.__class__)
|
|
3346
|
+
super().__init__(*args, **kw)
|
|
3347
|
+
|
|
3348
|
+
@property
|
|
3349
|
+
def realkind(self):
|
|
3350
|
+
return 'linuxdebug'
|
|
3351
|
+
|
|
3352
|
+
|
|
3353
|
+
class Macosx(OSExtended):
|
|
3354
|
+
"""Mac under MacOSX."""
|
|
3355
|
+
|
|
3356
|
+
_abstract = True
|
|
3357
|
+
_footprint = dict(
|
|
3358
|
+
info = 'Apple Mac computer under Macosx',
|
|
3359
|
+
attr = dict(
|
|
3360
|
+
sysname = dict(
|
|
3361
|
+
values = ['Darwin']
|
|
3362
|
+
),
|
|
3363
|
+
),
|
|
3364
|
+
priority = dict(
|
|
3365
|
+
level = footprints.priorities.top.TOOLBOX
|
|
3366
|
+
)
|
|
3367
|
+
)
|
|
3368
|
+
|
|
3369
|
+
def __init__(self, *args, **kw):
|
|
3370
|
+
"""
|
|
3371
|
+
Before going through parent initialisation (see :class:`OSExtended`),
|
|
3372
|
+
pickle this attributes:
|
|
3373
|
+
|
|
3374
|
+
* **psopts** - as default option for the ps command (default: ``-w -f -a``).
|
|
3375
|
+
"""
|
|
3376
|
+
logger.debug('Darwin system init %s', self.__class__)
|
|
3377
|
+
self._psopts = kw.pop('psopts', ['-w', '-f', '-a'])
|
|
3378
|
+
super().__init__(*args, **kw)
|
|
3379
|
+
|
|
3380
|
+
@property
|
|
3381
|
+
def realkind(self):
|
|
3382
|
+
return 'darwin'
|
|
3383
|
+
|
|
3384
|
+
@property
|
|
3385
|
+
def default_syslog(self):
|
|
3386
|
+
"""Address to use in logging.handler.SysLogHandler()."""
|
|
3387
|
+
return '/var/run/syslog'
|
|
3388
|
+
|
|
3389
|
+
|
|
3390
|
+
class Macosx34p(Macosx, Python34):
|
|
3391
|
+
"""Mac under MacOSX with python version >= 3.4"""
|
|
3392
|
+
|
|
3393
|
+
_footprint = [
|
|
3394
|
+
_python34_fp,
|
|
3395
|
+
dict(info = 'Apple Mac computer under Macosx with a blazing Python version')
|
|
3396
|
+
]
|