vortex-nwp 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. vortex/__init__.py +159 -0
  2. vortex/algo/__init__.py +13 -0
  3. vortex/algo/components.py +2462 -0
  4. vortex/algo/mpitools.py +1953 -0
  5. vortex/algo/mpitools_templates/__init__.py +1 -0
  6. vortex/algo/mpitools_templates/envelope_wrapper_default.tpl +27 -0
  7. vortex/algo/mpitools_templates/envelope_wrapper_mpiauto.tpl +29 -0
  8. vortex/algo/mpitools_templates/wrapstd_wrapper_default.tpl +18 -0
  9. vortex/algo/serversynctools.py +171 -0
  10. vortex/config.py +112 -0
  11. vortex/data/__init__.py +19 -0
  12. vortex/data/abstractstores.py +1510 -0
  13. vortex/data/containers.py +835 -0
  14. vortex/data/contents.py +622 -0
  15. vortex/data/executables.py +275 -0
  16. vortex/data/flow.py +119 -0
  17. vortex/data/geometries.ini +2689 -0
  18. vortex/data/geometries.py +799 -0
  19. vortex/data/handlers.py +1230 -0
  20. vortex/data/outflow.py +67 -0
  21. vortex/data/providers.py +487 -0
  22. vortex/data/resources.py +207 -0
  23. vortex/data/stores.py +1390 -0
  24. vortex/data/sync_templates/__init__.py +0 -0
  25. vortex/gloves.py +309 -0
  26. vortex/layout/__init__.py +20 -0
  27. vortex/layout/contexts.py +577 -0
  28. vortex/layout/dataflow.py +1220 -0
  29. vortex/layout/monitor.py +969 -0
  30. vortex/nwp/__init__.py +14 -0
  31. vortex/nwp/algo/__init__.py +21 -0
  32. vortex/nwp/algo/assim.py +537 -0
  33. vortex/nwp/algo/clim.py +1086 -0
  34. vortex/nwp/algo/coupling.py +831 -0
  35. vortex/nwp/algo/eda.py +840 -0
  36. vortex/nwp/algo/eps.py +785 -0
  37. vortex/nwp/algo/forecasts.py +886 -0
  38. vortex/nwp/algo/fpserver.py +1303 -0
  39. vortex/nwp/algo/ifsnaming.py +463 -0
  40. vortex/nwp/algo/ifsroot.py +404 -0
  41. vortex/nwp/algo/monitoring.py +263 -0
  42. vortex/nwp/algo/mpitools.py +694 -0
  43. vortex/nwp/algo/odbtools.py +1258 -0
  44. vortex/nwp/algo/oopsroot.py +916 -0
  45. vortex/nwp/algo/oopstests.py +220 -0
  46. vortex/nwp/algo/request.py +660 -0
  47. vortex/nwp/algo/stdpost.py +1641 -0
  48. vortex/nwp/data/__init__.py +30 -0
  49. vortex/nwp/data/assim.py +380 -0
  50. vortex/nwp/data/boundaries.py +314 -0
  51. vortex/nwp/data/climfiles.py +521 -0
  52. vortex/nwp/data/configfiles.py +153 -0
  53. vortex/nwp/data/consts.py +954 -0
  54. vortex/nwp/data/ctpini.py +149 -0
  55. vortex/nwp/data/diagnostics.py +209 -0
  56. vortex/nwp/data/eda.py +147 -0
  57. vortex/nwp/data/eps.py +432 -0
  58. vortex/nwp/data/executables.py +1045 -0
  59. vortex/nwp/data/fields.py +111 -0
  60. vortex/nwp/data/gridfiles.py +380 -0
  61. vortex/nwp/data/logs.py +584 -0
  62. vortex/nwp/data/modelstates.py +363 -0
  63. vortex/nwp/data/monitoring.py +193 -0
  64. vortex/nwp/data/namelists.py +696 -0
  65. vortex/nwp/data/obs.py +840 -0
  66. vortex/nwp/data/oopsexec.py +74 -0
  67. vortex/nwp/data/providers.py +207 -0
  68. vortex/nwp/data/query.py +206 -0
  69. vortex/nwp/data/stores.py +160 -0
  70. vortex/nwp/data/surfex.py +337 -0
  71. vortex/nwp/syntax/__init__.py +9 -0
  72. vortex/nwp/syntax/stdattrs.py +437 -0
  73. vortex/nwp/tools/__init__.py +10 -0
  74. vortex/nwp/tools/addons.py +40 -0
  75. vortex/nwp/tools/agt.py +67 -0
  76. vortex/nwp/tools/bdap.py +59 -0
  77. vortex/nwp/tools/bdcp.py +41 -0
  78. vortex/nwp/tools/bdm.py +24 -0
  79. vortex/nwp/tools/bdmp.py +54 -0
  80. vortex/nwp/tools/conftools.py +1661 -0
  81. vortex/nwp/tools/drhook.py +66 -0
  82. vortex/nwp/tools/grib.py +294 -0
  83. vortex/nwp/tools/gribdiff.py +104 -0
  84. vortex/nwp/tools/ifstools.py +203 -0
  85. vortex/nwp/tools/igastuff.py +273 -0
  86. vortex/nwp/tools/mars.py +68 -0
  87. vortex/nwp/tools/odb.py +657 -0
  88. vortex/nwp/tools/partitioning.py +258 -0
  89. vortex/nwp/tools/satrad.py +71 -0
  90. vortex/nwp/util/__init__.py +6 -0
  91. vortex/nwp/util/async.py +212 -0
  92. vortex/nwp/util/beacon.py +40 -0
  93. vortex/nwp/util/diffpygram.py +447 -0
  94. vortex/nwp/util/ens.py +279 -0
  95. vortex/nwp/util/hooks.py +139 -0
  96. vortex/nwp/util/taskdeco.py +85 -0
  97. vortex/nwp/util/usepygram.py +697 -0
  98. vortex/nwp/util/usetnt.py +101 -0
  99. vortex/proxy.py +6 -0
  100. vortex/sessions.py +374 -0
  101. vortex/syntax/__init__.py +9 -0
  102. vortex/syntax/stdattrs.py +867 -0
  103. vortex/syntax/stddeco.py +185 -0
  104. vortex/toolbox.py +1117 -0
  105. vortex/tools/__init__.py +20 -0
  106. vortex/tools/actions.py +523 -0
  107. vortex/tools/addons.py +316 -0
  108. vortex/tools/arm.py +96 -0
  109. vortex/tools/compression.py +325 -0
  110. vortex/tools/date.py +27 -0
  111. vortex/tools/ddhpack.py +10 -0
  112. vortex/tools/delayedactions.py +782 -0
  113. vortex/tools/env.py +541 -0
  114. vortex/tools/folder.py +834 -0
  115. vortex/tools/grib.py +738 -0
  116. vortex/tools/lfi.py +953 -0
  117. vortex/tools/listings.py +423 -0
  118. vortex/tools/names.py +637 -0
  119. vortex/tools/net.py +2124 -0
  120. vortex/tools/odb.py +10 -0
  121. vortex/tools/parallelism.py +368 -0
  122. vortex/tools/prestaging.py +210 -0
  123. vortex/tools/rawfiles.py +10 -0
  124. vortex/tools/schedulers.py +480 -0
  125. vortex/tools/services.py +940 -0
  126. vortex/tools/storage.py +996 -0
  127. vortex/tools/surfex.py +61 -0
  128. vortex/tools/systems.py +3976 -0
  129. vortex/tools/targets.py +440 -0
  130. vortex/util/__init__.py +9 -0
  131. vortex/util/config.py +1122 -0
  132. vortex/util/empty.py +24 -0
  133. vortex/util/helpers.py +216 -0
  134. vortex/util/introspection.py +69 -0
  135. vortex/util/iosponge.py +80 -0
  136. vortex/util/roles.py +49 -0
  137. vortex/util/storefunctions.py +129 -0
  138. vortex/util/structs.py +26 -0
  139. vortex/util/worker.py +162 -0
  140. vortex_nwp-2.0.0.dist-info/METADATA +67 -0
  141. vortex_nwp-2.0.0.dist-info/RECORD +144 -0
  142. vortex_nwp-2.0.0.dist-info/WHEEL +5 -0
  143. vortex_nwp-2.0.0.dist-info/licenses/LICENSE +517 -0
  144. vortex_nwp-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,3976 @@
1
+ """
2
+ This package handles system interfaces objects that are in charge of
3
+ system interaction. Systems objects use the :mod:`footprints` mechanism.
4
+
5
+ The current active System object should be retrieved using the session's Ticket
6
+ (*i.e.* System classes should not be instantiated directly) ::
7
+
8
+ t = vortex.ticket()
9
+ sh = t.sh
10
+
11
+ The System retrieved by this property will always be an instance of subclasses of
12
+ :class:`OSExtended`. Consequently, you can safely assume that all attributes,
13
+ properties and methods available in :class:`OSExtended` ad :class:`System` are
14
+ available to you.
15
+
16
+ When working with System objects, preferentialy use high-level methods such as
17
+ :meth:`~OSExtended.cp`, :meth:`~OSExtended.mv`, :meth:`~OSExtended.rm`,
18
+ :meth:`~OSExtended.smartftput`, :meth:`~OSExtended.smartftget`, ...
19
+
20
+ """
21
+
22
+ import contextlib
23
+ import errno
24
+ import filecmp
25
+ import ftplib
26
+ import functools
27
+ import glob
28
+ import hashlib
29
+ import importlib
30
+ import io
31
+ import json
32
+ import locale
33
+ import multiprocessing
34
+ import os
35
+ import pickle
36
+ import platform
37
+ import pwd as passwd
38
+ import random
39
+ import re
40
+ import resource
41
+ import shutil
42
+ import signal
43
+ import socket
44
+ import stat
45
+ import subprocess
46
+ import sys
47
+ import tarfile
48
+ import tempfile
49
+ import threading
50
+ import time
51
+ import uuid
52
+ from collections import namedtuple
53
+
54
+ import footprints
55
+ from bronx.fancies import loggers
56
+ from bronx.stdtypes import date
57
+ from bronx.stdtypes.history import History
58
+ from bronx.syntax.decorators import nicedeco_plusdoc, secure_getattr
59
+ from bronx.syntax.externalcode import ExternalCodeImportChecker
60
+ from bronx.system.cpus import LinuxCpusInfo
61
+ from bronx.system.interrupt import SignalInterruptError, SignalInterruptHandler
62
+ from bronx.system.memory import LinuxMemInfo
63
+ from bronx.system.numa import LibNumaNodesInfo
64
+ from vortex.gloves import Glove
65
+ from vortex.syntax.stdattrs import DelayedInit
66
+ from vortex.tools.compression import CompressionPipeline
67
+ from vortex.tools.env import Environment
68
+ from vortex.tools.net import AssistedSsh, AutoRetriesFtp, DEFAULT_FTP_PORT
69
+ from vortex.tools.net import FtpConnectionPool, LinuxNetstats, StdFtp
70
+ import vortex.tools.storage
71
+
72
+ #: No automatic export
73
+ __all__ = []
74
+
75
+ logger = loggers.getLogger(__name__)
76
+
77
+ #: Pre-compiled regex to check a none str value
78
+ isnonedef = re.compile(r"\s*none\s*$", re.IGNORECASE)
79
+
80
+ #: Pre-compiled regex to check a boolean true str value
81
+ istruedef = re.compile(r"\s*(on|true|ok)\s*$", re.IGNORECASE)
82
+
83
+ #: Pre-compiled regex to check a boolean false str value
84
+ isfalsedef = re.compile(r"\s*(off|false|ko)\s*$", re.IGNORECASE)
85
+
86
+ #: Global lock to protect temporary locale changes
87
+ LOCALE_LOCK = threading.Lock()
88
+
89
+ _fmtshcmd_docbonus = """
90
+
91
+ This method is decorated by :func:`fmtshcmd`, consequently it accepts
92
+ an additional **fmt** attribute that might alter this method behaviour
93
+ (*i.e.* if a ``thefmt_{name:s}`` method exists (where ``thefmt`` is the
94
+ value of the **ftm** attribute), it will be executed instead of the
95
+ present one).
96
+ """
97
+
98
+ # Constant items
99
+
100
+ #: Definition of a named tuple ftpflavour
101
+ FtpFlavourTuple = namedtuple(
102
+ "FtpFlavourTuple", ["STD", "RETRIES", "CONNECTION_POOLS"]
103
+ )
104
+
105
+ #: Predefined FTP_FLAVOUR values IN, OUT and INOUT.
106
+ FTP_FLAVOUR = FtpFlavourTuple(STD=0, RETRIES=1, CONNECTION_POOLS=2)
107
+
108
+
109
+ @nicedeco_plusdoc(_fmtshcmd_docbonus)
110
+ def fmtshcmd(func):
111
+ """This decorator gives a try to the equivalent formatted command.
112
+
113
+ Example: let ``decomethod`` be a method decorated with the present decorator,
114
+ if a user calls ``decomethod(..., fmt='toto')``, the decorator with look for
115
+ a method called ``toto_decomethod`` : if it exists, it will be used (otherwise,
116
+ the original method is used).
117
+ """
118
+
119
+ def formatted_method(self, *args, **kw):
120
+ fmt = kw.pop("fmt", None)
121
+ shtarget = self if isinstance(self, System) else self.sh
122
+ fmtcall = getattr(
123
+ shtarget, str(fmt).lower() + "_" + func.__name__, func
124
+ )
125
+ if getattr(fmtcall, "func_extern", False):
126
+ return fmtcall(*args, **kw)
127
+ else:
128
+ return fmtcall(self, *args, **kw)
129
+
130
+ return formatted_method
131
+
132
+
133
+ def _kw2spawn(func):
134
+ """This decorator justs update the docstring of a class...
135
+
136
+ It will state that all **kw** arguments will be passed directly to the
137
+ ```spawn`` method.
138
+
139
+ (Because laziness is good and cut&paste is bad)
140
+ """
141
+ func.__doc__ += """
142
+
143
+ At some point, all of the **kw** arguments will be passed directly to the
144
+ :meth:`spawn` method. Please see refer to the :meth:`spawn` method
145
+ documentation for more details.
146
+ """
147
+ return func
148
+
149
+
150
+ class ExecutionError(RuntimeError):
151
+ """Go through exception for internal :meth:`OSExtended.spawn` errors."""
152
+
153
+ pass
154
+
155
+
156
+ class CopyTreeError(OSError):
157
+ """An error raised during the recursive copy of a directory."""
158
+
159
+ pass
160
+
161
+
162
+ class CdContext:
163
+ """
164
+ Context manager for temporarily changing the working directory.
165
+
166
+ Returns to the initial directory, even when an exception is raised.
167
+ Has the syntax of the :meth:`~OSExtended.cd` call, and can be used through an :class:`OSExtended` object::
168
+
169
+ with sh.cdcontext(newpath, create=True):
170
+ # work in newpath
171
+ # back to the original path
172
+ """
173
+
174
+ def __init__(self, sh, newpath, create=False, clean_onexit=False):
175
+ self.sh = sh
176
+ self.create = create
177
+ self.clean_onexit = clean_onexit
178
+ self.newpath = self.sh.path.expanduser(newpath)
179
+
180
+ def __enter__(self):
181
+ if self.newpath not in ("", "."):
182
+ self.oldpath = self.sh.getcwd()
183
+ self.sh.cd(self.newpath, create=self.create)
184
+
185
+ def __exit__(self, etype, value, traceback): # @UnusedVariable
186
+ if self.newpath not in ("", "."):
187
+ self.sh.cd(self.oldpath)
188
+ if self.clean_onexit:
189
+ self.sh.rm(self.newpath)
190
+
191
+
192
+ @contextlib.contextmanager
193
+ def NullContext():
194
+ """A context that does nothing, but with a context's semantic."""
195
+ yield
196
+
197
+
198
+ @contextlib.contextmanager
199
+ def LocaleContext(category, localename=None, uselock=False):
200
+ """Context used to locally change the Locale.
201
+
202
+ This is used like the :func:`~locale.setlocale` function::
203
+
204
+ with LocaleContext(locale.LC_TIME, 'fr_FR.UTF-8'):
205
+ strtime = date.now().strftime('%X')
206
+
207
+ The ``locale`` is changed at the process level ; to avoid conflicting changes
208
+ in a multithread context, use *with care* the additional ``uselock`` argument.
209
+ """
210
+ lock = LOCALE_LOCK if uselock else NullContext()
211
+ with lock:
212
+ previous = locale.setlocale(category)
213
+ try:
214
+ yield locale.setlocale(category, localename)
215
+ finally:
216
+ locale.setlocale(category, previous)
217
+
218
+
219
+ @functools.total_ordering
220
+ class PythonSimplifiedVersion:
221
+ """
222
+ Type that holds a simplified representation of the Python's version
223
+
224
+ It provides basic comparison operators to determine if a given version is
225
+ more recent or not compared to another one.
226
+
227
+ It can be used in a footprint specification.
228
+ """
229
+
230
+ _VERSION_RE = re.compile(r"(\d+)\.(\d+)\.(\d+)")
231
+
232
+ def __init__(self, versionstr):
233
+ v_match = self._VERSION_RE.match(versionstr)
234
+ if v_match:
235
+ self._version = tuple([int(d) for d in v_match.groups()])
236
+ else:
237
+ raise ValueError("Malformed version string: {}".format(versionstr))
238
+
239
+ @property
240
+ def version(self):
241
+ return self._version
242
+
243
+ def __hash__(self):
244
+ return hash(self.version)
245
+
246
+ def __eq__(self, other):
247
+ if not isinstance(other, self.__class__):
248
+ try:
249
+ other = self.__class__(other)
250
+ except (ValueError, TypeError):
251
+ return False
252
+ return self.version == other.version
253
+
254
+ def __gt__(self, other):
255
+ if not isinstance(other, self.__class__):
256
+ other = self.__class__(other)
257
+ return self.version > other.version
258
+
259
+ def __str__(self):
260
+ return ".".join([str(d) for d in self.version])
261
+
262
+ def __repr__(self):
263
+ return "<{} | {!s}>".format(
264
+ object.__repr__(self).lstrip("<").rstrip(">"), self
265
+ )
266
+
267
+ def export_dict(self):
268
+ """The pure dict/json output is the raw integer"""
269
+ return str(self)
270
+
271
+
272
+ class System(footprints.FootprintBase):
273
+ """Abstract root class for any :class:`System` subclasses.
274
+
275
+ It contains basic generic methods and redefinition of some of the usual
276
+ Python's system methods.
277
+ """
278
+
279
+ _abstract = True
280
+ _explicit = False
281
+ _collector = ("system",)
282
+
283
+ _footprint = dict(
284
+ info="Default system interface",
285
+ attr=dict(
286
+ hostname=dict(
287
+ info="The computer's network name",
288
+ optional=True,
289
+ default=platform.node(),
290
+ alias=("nodename",),
291
+ ),
292
+ sysname=dict(
293
+ info="The underlying system/OS name (e.g. Linux, Darwin, ...)",
294
+ optional=True,
295
+ default=platform.system(),
296
+ ),
297
+ arch=dict(
298
+ info="The underlying machine type (e.g. i386, x86_64, ...)",
299
+ optional=True,
300
+ default=platform.machine(),
301
+ alias=("machine",),
302
+ ),
303
+ release=dict(
304
+ info="The underlying system's release, (e.g. 2.2.0, NT, ...)",
305
+ optional=True,
306
+ default=platform.release(),
307
+ ),
308
+ version=dict(
309
+ info="The underlying system's release version",
310
+ optional=True,
311
+ default=platform.version(),
312
+ ),
313
+ python=dict(
314
+ info="The Python's version (e.g 2.7.5)",
315
+ type=PythonSimplifiedVersion,
316
+ optional=True,
317
+ default=platform.python_version(),
318
+ ),
319
+ glove=dict(
320
+ info="The session's Glove object",
321
+ optional=True,
322
+ type=Glove,
323
+ ),
324
+ ),
325
+ )
326
+
327
+ def __init__(self, *args, **kw):
328
+ """
329
+ In addition to footprint's attributes, the following attributes may be added:
330
+
331
+ * **prompt** - as a starting comment line in :meth:`title` like methods.
332
+ * **trace** - if *True* or *"log"* mimic ``set -x`` behaviour (default: *False*).
333
+ With trace="log", the information is sent through the logger.
334
+ * **timer** - time all the calls to external commands (default: *False*).
335
+ * **output** - as a default value for any external spawning command (default: *True*).
336
+
337
+ The following attributes are also picked from ``kw`` (by default the
338
+ usual Python's modules are used):
339
+
340
+ * **os** - as an alternative to :mod:`os`.
341
+ * **rlimit** - as an alternative to :mod:`resource`.
342
+ * **sh** or **shutil** - as an alternative to :mod:`shutil`.
343
+
344
+ **The proxy concept:**
345
+
346
+ The :class:`System` class acts as a proxy for the :mod:`os`, :mod:`resource`
347
+ and :mod:`shutil` modules. *i.e.* if a method or attribute
348
+ is not defined in the :class:`System` class, the :mod:`os`, :mod:`resource`
349
+ and :mod:`shutil` modules are looked-up (in turn): if one of them has
350
+ the desired attribute/method, it is returned.
351
+
352
+ Example: let ``sh`` be an object of class :class:`System`, calling
353
+ ``sh.path.exists`` is equivalent to calling ``os.path.exists`` since
354
+ ``path`` is not redefined in the :class:`System` class.
355
+
356
+ In vortex, it is mandatory to use the :class:`System` class (and not the
357
+ official Python modules) even for attributes/methods that are not
358
+ redefined. This is not pointless since, in the future, we may decide to
359
+ redefine a given attribute/method either globally or for a specific
360
+ architecture.
361
+
362
+ **Addons:**
363
+
364
+ Using the :meth:`extend` method, a :class:`System` object can be extended
365
+ by any object. This mechanism is used by classes deriving from
366
+ :class:`vortex.tools.addons.Addon`.
367
+
368
+ Example: let ``sh`` be an object of class :class:`System` and ``MyAddon``
369
+ a subclass of :class:`~vortex.tools.addons.Addon` (of kind 'myaddon') that
370
+ defines the ``greatstuff`` attribute; creating an object of class
371
+ ``MyAddon`` using ``footprints.proxy.addon(kind='myaddon', shell=sh)``
372
+ will extend the ``sh`` with the ``greatstuff`` attribute (*e.g.* any
373
+ user will be able to call ``sh.greatstuff``).
374
+
375
+ """
376
+ logger.debug("Abstract System init %s", self.__class__)
377
+ self.__dict__["_os"] = kw.pop("os", os)
378
+ self.__dict__["_rl"] = kw.pop("rlimit", resource)
379
+ self.__dict__["_sh"] = kw.pop("shutil", kw.pop("sh", shutil))
380
+ self.__dict__["_search"] = [
381
+ self.__dict__["_os"],
382
+ self.__dict__["_sh"],
383
+ self.__dict__["_rl"],
384
+ ]
385
+ self.__dict__["_xtrack"] = dict()
386
+ self.__dict__["_history"] = History(tag="shell")
387
+ self.__dict__["_rclast"] = 0
388
+ self.__dict__["prompt"] = str(kw.pop("prompt", ""))
389
+ for flag in ("trace", "timer"):
390
+ self.__dict__[flag] = kw.pop(flag, False)
391
+ for flag in ("output",):
392
+ self.__dict__[flag] = kw.pop(flag, True)
393
+ super().__init__(*args, **kw)
394
+
395
+ @property
396
+ def realkind(self):
397
+ """The object/class realkind."""
398
+ return "system"
399
+
400
+ @property
401
+ def history(self):
402
+ """The :class:`History` object associated with all :class:`System` objects."""
403
+ return self._history
404
+
405
+ @property
406
+ def rclast(self):
407
+ """The last return-code (for external commands)."""
408
+ return self._rclast
409
+
410
+ @property
411
+ def search(self):
412
+ """A list of Python's modules that are looked up when an attribute is not found.
413
+
414
+ At startup, mod:`os`, :mod:`resource` and :mod:`shutil` are looked up but
415
+ additional Addon classes may be added to this list (see the :meth:`extend`
416
+ method).
417
+ """
418
+ return self._search
419
+
420
+ @property
421
+ def default_syslog(self):
422
+ """Address to use in logging.handler.SysLogHandler()."""
423
+ return "/dev/log"
424
+
425
+ def extend(self, obj=None):
426
+ """Extend the current external attribute resolution to **obj** (module or object)."""
427
+ if obj is not None:
428
+ if hasattr(obj, "kind"):
429
+ for k, v in self._xtrack.items():
430
+ if hasattr(v, "kind"):
431
+ if hasattr(self, k):
432
+ delattr(self, k)
433
+ for addon in self.search:
434
+ if hasattr(addon, "kind") and addon.kind == obj.kind:
435
+ self.search.remove(addon)
436
+ self.search.append(obj)
437
+ return len(self.search)
438
+
439
+ def loaded_addons(self):
440
+ """
441
+ Kind of all the loaded :class:`~vortex.tools.addons.Addon objects
442
+ (*i.e.* :class:`~vortex.tools.addons.Addon objects previously
443
+ loaded with the :meth:`extend` method).
444
+ """
445
+ return [addon.kind for addon in self.search if hasattr(addon, "kind")]
446
+
447
+ def external(self, key):
448
+ """Return effective module object reference if any, or *None*."""
449
+ try:
450
+ getattr(self, key)
451
+ except AttributeError:
452
+ pass
453
+ return self._xtrack.get(key, None)
454
+
455
+ @secure_getattr
456
+ def __getattr__(self, key):
457
+ """Gateway to undefined methods or attributes.
458
+
459
+ This is the place where the ``self.search`` list is looked for...
460
+ """
461
+ actualattr = None
462
+ if key.startswith("_"):
463
+ # Do not attempt to look for hidden attributes
464
+ raise AttributeError("Method or attribute " + key + " not found")
465
+ for shxobj in self.search:
466
+ if hasattr(shxobj, key):
467
+ if isinstance(
468
+ shxobj, footprints.FootprintBase
469
+ ) and shxobj.footprint_has_attribute(key):
470
+ # Ignore footprint attributes
471
+ continue
472
+ if actualattr is None:
473
+ actualattr = getattr(shxobj, key)
474
+ self._xtrack[key] = shxobj
475
+ else:
476
+ # Do not warn for a restricted list of keys
477
+ if key not in ("stat",):
478
+ logger.warning(
479
+ 'System: duplicate entry while looking for key="%s". '
480
+ + "First result in %s but also available in %s.",
481
+ key,
482
+ self._xtrack[key],
483
+ shxobj,
484
+ )
485
+ if actualattr is None:
486
+ raise AttributeError("Method or attribute " + key + " not found")
487
+ if callable(actualattr):
488
+
489
+ def osproxy(*args, **kw):
490
+ cmd = [key]
491
+ cmd.extend(args)
492
+ cmd.extend(
493
+ ["{:s}={:s}".format(x, str(kw[x])) for x in kw.keys()]
494
+ )
495
+ self.stderr(*cmd)
496
+ return actualattr(*args, **kw)
497
+
498
+ osproxy.func_name = str(key)
499
+ osproxy.__name__ = str(key)
500
+ osproxy.func_doc = actualattr.__doc__
501
+ osproxy.func_extern = True
502
+ setattr(self, key, osproxy)
503
+ return osproxy
504
+ else:
505
+ return actualattr
506
+
507
+ def stderr(self, *args):
508
+ """Write a formatted message to standard error (if ``self.trace == True``)."""
509
+ (
510
+ count,
511
+ justnow,
512
+ ) = self.history.append(*args)
513
+ if self.trace:
514
+ if self.trace == "log":
515
+ logger.info(
516
+ "[sh:#%d] %s", count, " ".join([str(x) for x in args])
517
+ )
518
+ else:
519
+ sys.stderr.write(
520
+ "* [{:s}][{:d}] {:s}\n".format(
521
+ justnow.strftime("%Y/%m/%d-%H:%M:%S"),
522
+ count,
523
+ " ".join([str(x) for x in args]),
524
+ )
525
+ )
526
+
527
+ def flush_stdall(self):
528
+ """Flush stdout and stderr."""
529
+ sys.stdout.flush()
530
+ sys.stderr.flush()
531
+
532
+ @contextlib.contextmanager
533
+ def mute_stderr(self):
534
+ oldtrace = self.trace
535
+ self.trace = False
536
+ try:
537
+ yield
538
+ finally:
539
+ self.trace = oldtrace
540
+
541
+ def echo(self, *args):
542
+ """Joined **args** are echoed."""
543
+ print(">>>", " ".join([str(arg) for arg in args]))
544
+
545
+ def title(self, textlist, tchar="=", autolen=96):
546
+ """Formated title output.
547
+
548
+ :param list|str textlist: A list of strings that contains the title's text
549
+ :param str tchar: The character used to frame the title text
550
+ :param int autolen: The title width
551
+ """
552
+ if isinstance(textlist, str):
553
+ textlist = (textlist,)
554
+ if autolen:
555
+ nbc = autolen
556
+ else:
557
+ nbc = max([len(text) for text in textlist])
558
+ print()
559
+ print(tchar * (nbc + 4))
560
+ for text in textlist:
561
+ print(
562
+ "{0:s} {1:^{size}s} {0:s}".format(
563
+ tchar, text.upper(), size=nbc
564
+ )
565
+ )
566
+ print(tchar * (nbc + 4))
567
+ print()
568
+ self.flush_stdall()
569
+
570
+ def subtitle(self, text="", tchar="-", autolen=96):
571
+ """Formatted subtitle output.
572
+
573
+ :param str text: The subtitle's text
574
+ :param str tchar: The character used to frame the title text
575
+ :param int autolen: The title width
576
+ """
577
+ if autolen:
578
+ nbc = autolen
579
+ else:
580
+ nbc = len(text)
581
+ print()
582
+ print(tchar * (nbc + 4))
583
+ if text:
584
+ print("# {0:{size}s} #".format(text, size=nbc))
585
+ print(tchar * (nbc + 4))
586
+ self.flush_stdall()
587
+
588
+ def header(
589
+ self, text="", tchar="-", autolen=False, xline=True, prompt=None
590
+ ):
591
+ """Formatted header output.
592
+
593
+ :param str text: The subtitle's text
594
+ :param str tchar: The character used to frame the title text
595
+ :param bool autolen: If True the header width will match the text width (10. otherwise)
596
+ :param bool xline: Adds a line of **tchar** after the header text
597
+ :param str prompt: A customised prompt (otherwise ``self.prompt`` is used)
598
+ """
599
+ if autolen:
600
+ nbc = len(prompt + text) + 1
601
+ else:
602
+ nbc = 100
603
+ print()
604
+ print(tchar * nbc)
605
+ if text:
606
+ if not prompt:
607
+ prompt = self.prompt
608
+ if prompt:
609
+ prompt = str(prompt) + " "
610
+ else:
611
+ prompt = ""
612
+ print(prompt + str(text))
613
+ if xline:
614
+ print(tchar * nbc)
615
+ self.flush_stdall()
616
+
617
+ def highlight(
618
+ self, text="", hchar="----", bchar="#", bline=False, bline0=True
619
+ ):
620
+ """Highlight some text.
621
+
622
+ :param str text: The text to be highlighted
623
+ :param str hchar: The characters used to frame the text
624
+ :param str bchar: The characters used at the beging
625
+ :param bool bline: Adds a blank line after
626
+ :param bool bline0: Adds a blank line before
627
+ """
628
+ if bline0:
629
+ print()
630
+ print(
631
+ "{0:s} {1:s} {2:s} {1:s} {3:s}".format(
632
+ bchar.rstrip(), hchar, text, bchar.lstrip()
633
+ )
634
+ )
635
+ if bline:
636
+ print()
637
+ self.flush_stdall()
638
+
639
+ @property
640
+ def executable(self):
641
+ """Return the actual ``sys.executable``."""
642
+ self.stderr("executable")
643
+ return sys.executable
644
+
645
+ def pythonpath(self, output=None):
646
+ """Return or print actual ``sys.path``."""
647
+ if output is None:
648
+ output = self.output
649
+ self.stderr("pythonpath")
650
+ if output:
651
+ return sys.path[:]
652
+ else:
653
+ self.subtitle("Python PATH")
654
+ for pypath in sys.path:
655
+ print(pypath)
656
+ return True
657
+
658
+ @property
659
+ def env(self):
660
+ """Returns the current active environment."""
661
+ return Environment.current()
662
+
663
+ def guess_job_identifier(self):
664
+ """Try to determine an identification string for the current script."""
665
+ # PBS scheduler SLURM scheduler Good-old PID
666
+ env = self.env
667
+ label = env.PBS_JOBID or env.SLURM_JOB_ID or "localpid"
668
+ if label == "localpid":
669
+ label = str(self.getpid())
670
+ return label
671
+
672
+ def vortex_modules(self, only="."):
673
+ """Return a filtered list of modules in the vortex package.
674
+
675
+ :param str only: The regex used to filter the modules list.
676
+ """
677
+ if self.glove is not None:
678
+ g = self.glove
679
+ mfiles = [
680
+ re.sub(r"^" + mroot + r"/", "", x)
681
+ for mroot in (g.siteroot + "/src", g.siteroot + "/site")
682
+ for x in self.ffind(mroot)
683
+ if self.path.isfile(
684
+ self.path.join(self.path.dirname(x), "__init__.py")
685
+ )
686
+ ]
687
+ return [
688
+ re.sub(r"(?:/__init__)?\.py$", "", x).replace("/", ".")
689
+ for x in mfiles
690
+ if (
691
+ not x.startswith(".")
692
+ and re.search(only, x, re.IGNORECASE)
693
+ and x.endswith(".py")
694
+ )
695
+ ]
696
+ else:
697
+ raise RuntimeError("A glove must be defined")
698
+
699
+ def vortex_loaded_modules(self, only=".", output=None):
700
+ """Check loaded modules, producing either a dump or a list of tuple (status, modulename).
701
+
702
+ :param str only: The regex used to filter the modules list.
703
+ """
704
+ checklist = list()
705
+ if output is None:
706
+ output = self.output
707
+ for modname in self.vortex_modules(only):
708
+ checklist.append((modname, modname in sys.modules))
709
+ if not output:
710
+ for m, s in checklist:
711
+ print(str(s).ljust(8), m)
712
+ print("--")
713
+ return True
714
+ else:
715
+ return checklist
716
+
717
+ def systems_reload(self):
718
+ """Load extra systems modules not yet loaded."""
719
+ extras = list()
720
+ for modname in self.vortex_modules(only="systems"):
721
+ if modname not in sys.modules:
722
+ try:
723
+ self.import_module(modname)
724
+ extras.append(modname)
725
+ except ValueError as err:
726
+ logger.critical(
727
+ "systems_reload: cannot import module %s (%s)",
728
+ modname,
729
+ str(err),
730
+ )
731
+ return extras
732
+
733
+ # Redefinition of methods of the resource package...
734
+
735
+ def numrlimit(self, r_id):
736
+ """
737
+ Convert actual resource id (**r_id**) in some acceptable *int* for the
738
+ :mod:`resource` module.
739
+ """
740
+ if type(r_id) is not int:
741
+ r_id = r_id.upper()
742
+ if not r_id.startswith("RLIMIT_"):
743
+ r_id = "RLIMIT_" + r_id
744
+ r_id = getattr(self._rl, r_id, None)
745
+ if r_id is None:
746
+ raise ValueError("Invalid resource specified")
747
+ return r_id
748
+
749
+ def setrlimit(self, r_id, r_limits):
750
+ """Proxy to :mod:`resource` function of the same name."""
751
+ self.stderr("setrlimit", r_id, r_limits)
752
+ try:
753
+ r_limits = tuple(r_limits)
754
+ except TypeError:
755
+ r_limits = (r_limits, r_limits)
756
+ return self._rl.setrlimit(self.numrlimit(r_id), r_limits)
757
+
758
+ def getrlimit(self, r_id):
759
+ """Proxy to :mod:`resource` function of the same name."""
760
+ self.stderr("getrlimit", r_id)
761
+ return self._rl.getrlimit(self.numrlimit(r_id))
762
+
763
+ def getrusage(self, pid=None):
764
+ """Proxy to :mod:`resource` function of the same name with current process as defaut."""
765
+ if pid is None:
766
+ pid = self._rl.RUSAGE_SELF
767
+ self.stderr("getrusage", pid)
768
+ return self._rl.getrusage(pid)
769
+
770
+ def import_module(self, modname):
771
+ """Import the module named **modname** with :mod:`importlib` package."""
772
+ importlib.import_module(modname)
773
+ return sys.modules.get(modname)
774
+
775
+ def import_function(self, funcname):
776
+ """Import the function named **funcname** qualified by a proper module name package."""
777
+ thisfunc = None
778
+ if "." in funcname:
779
+ thismod = self.import_module(".".join(funcname.split(".")[:-1]))
780
+ if thismod:
781
+ thisfunc = getattr(thismod, funcname.split(".")[-1], None)
782
+ else:
783
+ logger.error("Bad function path name <%s>" % funcname)
784
+ return thisfunc
785
+
786
+
787
+ class OSExtended(System):
788
+ """Abstract extended base system.
789
+
790
+ It contains many useful Vortex's additions to the usual Python's shell.
791
+ """
792
+
793
+ _abstract = True
794
+ _footprint = dict(info="Abstract extended base system")
795
+
796
+ def __init__(self, *args, **kw):
797
+ """
798
+ Before going through parent initialisation (see :class:`System`),
799
+ pickle these attributes:
800
+
801
+ * **rmtreemin** - as the minimal depth needed for a :meth:`rmsafe`.
802
+ * **cmpaftercp** - as a boolean for activating full comparison after plain cp (default: *True*).
803
+ * **ftraw** - allows ``smartft*`` methods to use the raw FTP commands
804
+ (e.g. ftget, ftput) instead of the internal Vortex's FTP client
805
+ (default: *False*).
806
+ * **ftputcmd** - The name of the raw FTP command for the "put" action
807
+ (default: ftput).
808
+ * **ftgetcmd** - The name of the raw FTP command for the "get" action
809
+ (default: ftget).
810
+ * **ftpflavour** - The default Vortex's FTP client behaviour
811
+ (default: `FTP_FLAVOUR.CONNECTION_POOLS`). See the :meth:`ftp` method
812
+ for more details.
813
+ """
814
+ logger.debug("Abstract System init %s", self.__class__)
815
+ self._rmtreemin = kw.pop("rmtreemin", 3)
816
+ self._cmpaftercp = kw.pop("cmpaftercp", True)
817
+ # Switches for rawft* methods
818
+ self._ftraw = kw.pop("ftraw", None)
819
+ self.ftputcmd = kw.pop("ftputcmd", None)
820
+ self.ftgetcmd = kw.pop("ftgetcmd", None)
821
+ # FTP stuff again
822
+ self.ftpflavour = kw.pop("ftpflavour", FTP_FLAVOUR.CONNECTION_POOLS)
823
+ self._current_ftppool = None
824
+ # Some internal variables used by particular methods
825
+ self._ftspool_cache = None
826
+ self._frozen_target = None
827
+ # Hardlinks behaviour...
828
+ self.allow_cross_users_links = True
829
+ # Go for the superclass' constructor
830
+ super().__init__(*args, **kw)
831
+ # Initialise possibly missing objects
832
+ self.__dict__["_cpusinfo"] = None
833
+ self.__dict__["_numainfo"] = None
834
+ self.__dict__["_memoryinfo"] = None
835
+ self.__dict__["_netstatsinfo"] = None
836
+
837
+ # Initialise the signal handler object
838
+ self._signal_intercept_init()
839
+
840
+ @property
841
+ def ftraw(self):
842
+ """Use the system's FTP service (e.g. ftserv)."""
843
+ if self._ftraw is None:
844
+ return self.default_target.ftraw_default
845
+ else:
846
+ return self._ftraw
847
+
848
+ @ftraw.setter
849
+ def ftraw(self, value):
850
+ """Use the system's FTP service (e.g. ftserv)."""
851
+ self._ftraw = bool(value)
852
+
853
+ @ftraw.deleter
854
+ def ftraw(self):
855
+ """Use the system's FTP service (e.g. ftserv)."""
856
+ self._ftraw = None
857
+
858
+ def target(self, **kw):
859
+ """
860
+ Provide a default :class:`~vortex.tools.targets.Target` according
861
+ to System's own attributes.
862
+
863
+ * Extra or alternative attributes may still be provided using **kw**.
864
+ * The object returned by this method will be used in subsequent calls
865
+ to ::attr:`default_target` (this is the concept of frozen target).
866
+ """
867
+ desc = dict(hostname=self.hostname, sysname=self.sysname)
868
+ desc.update(kw)
869
+ self._frozen_target = footprints.proxy.targets.default(**desc)
870
+ return self._frozen_target
871
+
872
+ @property
873
+ def default_target(self):
874
+ """Return the latest frozen target."""
875
+ return DelayedInit(self._frozen_target, self.target)
876
+
877
+ def fmtspecific_mtd(self, method, fmt):
878
+ """Check if a format specific implementation is available for a given format."""
879
+ return hasattr(self, "{:s}_{:s}".format(fmt, method))
880
+
881
+ def popen(
882
+ self,
883
+ args,
884
+ stdin=None,
885
+ stdout=None,
886
+ stderr=None,
887
+ shell=False,
888
+ output=False,
889
+ bufsize=0,
890
+ ): # @UnusedVariable
891
+ """Return an open pipe for the **args** command.
892
+
893
+ :param str|list args: The command (+ its command-line arguments) to be
894
+ executed. When **shell** is *False* it should be a list. When **shell**
895
+ is *True*, it should be a string.
896
+ :param stdin: Specify the input stream characteristics:
897
+
898
+ * If *None*, the standard input stream will be opened.
899
+ * If *True*, a pipe is created and data may be sent to the process
900
+ using the :meth:`~subprocess.Popen.communicate` method of the
901
+ returned object.
902
+ * If a File-like object is provided, it will be used as an input stream
903
+
904
+ :param stdout: Specify the output stream characteristics:
905
+
906
+ * If *None*, the standard output stream is used.
907
+ * If *True*, a pipe is created and standard outputs may be retrieved
908
+ using the :meth:`~subprocess.Popen.communicate` method of the
909
+ returned object.
910
+ * If a File-like object is provided, standard outputs will be written there.
911
+
912
+ :param stderr: Specify the error stream characteristics:
913
+
914
+ * If *None*, the standard error stream is used.
915
+ * If *True*, a pipe is created and standard errors may be retrieved
916
+ using the :meth:`~subprocess.Popen.communicate` method of the
917
+ returned object.
918
+ * If a File-like object is provided, standard errors will be written there.
919
+
920
+ :param bool shell: If *True*, the specified command will be executed
921
+ through the system shell. (It is usually considered a security hazard:
922
+ see the :mod:`subprocess` documentation for more details).
923
+ :param bool output: unused (kept for backward compatibility).
924
+ :param int bufsize: The default buffer size for new pipes (``0`` means unbuffered)
925
+ :return subprocess.Popen: A Python's :class:`~subprocess.Popen` object
926
+ handling the process.
927
+ """
928
+ self.stderr(*args)
929
+ if stdout is True:
930
+ stdout = subprocess.PIPE
931
+ if stdin is True:
932
+ stdin = subprocess.PIPE
933
+ if stderr is True:
934
+ stderr = subprocess.PIPE
935
+ return subprocess.Popen(
936
+ args,
937
+ bufsize=bufsize,
938
+ stdin=stdin,
939
+ stdout=stdout,
940
+ stderr=stderr,
941
+ shell=shell,
942
+ )
943
+
944
+ def pclose(self, p, ok=None):
945
+ """Do its best to nicely shutdown the process started by **p**.
946
+
947
+ :param subprocess.Popen p: The process to be shutdown
948
+ :param list[int] ok: The shell return codes considered as successful
949
+ (if *None*, only 0 is considered successful)
950
+ :return bool: Returns *True* if the process return code is within the
951
+ **ok** list.
952
+ """
953
+ if p.stdin is not None:
954
+ p.stdin.close()
955
+ p.wait()
956
+ if p.stdout is not None:
957
+ p.stdout.close()
958
+ if p.stderr is not None:
959
+ p.stderr.close()
960
+
961
+ try:
962
+ p.terminate()
963
+ except OSError as e:
964
+ if e.errno == 3:
965
+ logger.debug("Processus %s alreaded terminated." % str(p))
966
+ else:
967
+ raise
968
+
969
+ self._rclast = p.returncode
970
+ if ok is None:
971
+ ok = [0]
972
+ if p.returncode in ok:
973
+ return True
974
+ else:
975
+ return False
976
+
977
+ def spawn(
978
+ self,
979
+ args,
980
+ ok=None,
981
+ shell=False,
982
+ stdin=None,
983
+ output=None,
984
+ outmode="a+b",
985
+ outsplit=True,
986
+ silent=False,
987
+ fatal=True,
988
+ taskset=None,
989
+ taskset_id=0,
990
+ taskset_bsize=1,
991
+ ):
992
+ """Subprocess call of **args**.
993
+
994
+ :param str|list[str] args: The command (+ its command-line arguments) to be
995
+ executed. When **shell** is *False* it should be a list. When **shell**
996
+ is *True*, it should be a string.
997
+ :param list[int] ok: The shell return codes considered as successful
998
+ (if *None*, only 0 is considered successful)
999
+ :param bool shell: If *True*, the specified command will be executed
1000
+ through the system shell. (It is usually considered a security hazard:
1001
+ see the :mod:`subprocess` documentation for more details).
1002
+ :param stdin: Specify the input stream characteristics:
1003
+
1004
+ * If *None*, the standard input stream will be opened.
1005
+ * If *True*, no standard input will be sent.
1006
+ * If a File-like object is provided, it will be used as an input stream.
1007
+
1008
+ :param output: Specify the standard and error stream characteristics:
1009
+
1010
+ * If *None*, ``self.output`` (that defaults to *True*) will be used.
1011
+ * If *True*, *stderr* and *stdout* will be captured and *stdout*
1012
+ will be returned by the method if the execution goes well
1013
+ according to the **ok** list. (see the **outsplit** argument).
1014
+ * If *False*, the standard output and error streams will be used.
1015
+ * If a File-like object is provided, outputs will be written there.
1016
+ * If a string is provided, it's considered to be a filename. The
1017
+ file will be opened (see the **outmode** argument) and used to
1018
+ redirect *stdout* and *stderr*
1019
+
1020
+ :param str outmode: The open mode of the file output file
1021
+ (meaningful only when **output** is a filename).
1022
+ :param bools outsplit: It *True*, the captured standard output will be split
1023
+ on line-breaks and a list of lines will be returned (with all the
1024
+ line-breaks stripped out). Otherwise, the raw standard output text
1025
+ is returned. (meaningful only when **output** is *True*).
1026
+ :param bool silent: If *True*, in case a bad return-code is encountered
1027
+ (according to the **ok** list), the standard error strem is not printed
1028
+ out.
1029
+ :param bool fatal: It *True*, exceptions will be raised in case of failures
1030
+ (more precisely, if a bad return-code is detected (according to the
1031
+ **ok** list), an :class:`ExecutionError` is raised). Otherwise, the
1032
+ method just returns *False*.
1033
+ :param str taskset: If *None*, process will not be binded to a specific
1034
+ CPU core (this is usually what we want). Otherwise, **taskset** can be
1035
+ a string describing the wanted *topology* and the *method* used to
1036
+ bind the process (the string should looks like ``topology_method``).
1037
+ (see the :meth:`cpus_affinity_get` method and the
1038
+ :mod:`bronx.system.cpus` module for more details).
1039
+ :param int taskset_id: The task id for this process
1040
+ :param int taskset_bsize: The number of CPU that will be used (usually 1,
1041
+ but possibly more when using threaded programs).
1042
+ :note: When a signal is caught by the Python script, the TERM signal is
1043
+ sent to the spawned process and then the signal Exception is re-raised
1044
+ (the **fatal** argument has no effect on that).
1045
+ :note: If **output** = True, the results is a Unicode string decoded
1046
+ assuming the **locale.getpreferredencoding(False)**
1047
+ encoding.
1048
+ """
1049
+ rc = False
1050
+ if ok is None:
1051
+ ok = [0]
1052
+ if output is None:
1053
+ output = self.output
1054
+ if stdin is True:
1055
+ stdin = subprocess.PIPE
1056
+ localenv = self._os.environ.copy()
1057
+ if taskset is not None:
1058
+ taskset_def = taskset.split("_")
1059
+ taskset, taskset_cmd, taskset_env = self.cpus_affinity_get(
1060
+ taskset_id, taskset_bsize, *taskset_def
1061
+ )
1062
+ if taskset:
1063
+ localenv.update(taskset_env)
1064
+ else:
1065
+ logger.warning("CPU binding is not available on this platform")
1066
+ if isinstance(args, str):
1067
+ if taskset:
1068
+ args = taskset_cmd + " " + args
1069
+ if self.timer:
1070
+ args = "time " + args
1071
+ self.stderr(args)
1072
+ else:
1073
+ if taskset:
1074
+ args[:0] = taskset_cmd
1075
+ if self.timer:
1076
+ args[:0] = ["time"]
1077
+ self.stderr(*args)
1078
+ if isinstance(output, bool):
1079
+ if output:
1080
+ cmdout, cmderr = subprocess.PIPE, subprocess.PIPE
1081
+ else:
1082
+ cmdout, cmderr = None, None
1083
+ else:
1084
+ if isinstance(output, str):
1085
+ output = open(output, outmode)
1086
+ cmdout, cmderr = output, output
1087
+ p = None
1088
+ try:
1089
+ p = subprocess.Popen(
1090
+ args,
1091
+ stdin=stdin,
1092
+ stdout=cmdout,
1093
+ stderr=cmderr,
1094
+ shell=shell,
1095
+ env=localenv,
1096
+ )
1097
+ p_out, p_err = p.communicate()
1098
+ except ValueError as e:
1099
+ logger.critical(
1100
+ "Weird arguments to Popen ({!s}, stdout={!s}, stderr={!s}, shell={!s})".format(
1101
+ args, cmdout, cmderr, shell
1102
+ )
1103
+ )
1104
+ logger.critical("Caught exception: %s", e)
1105
+ if fatal:
1106
+ raise
1107
+ else:
1108
+ logger.warning("Carry on because fatal is off")
1109
+ except OSError:
1110
+ logger.critical("Could not call %s", str(args))
1111
+ if fatal:
1112
+ raise
1113
+ else:
1114
+ logger.warning("Carry on because fatal is off")
1115
+ except Exception as perr:
1116
+ logger.critical("System returns %s", str(perr))
1117
+ if fatal:
1118
+ raise RuntimeError(
1119
+ "System {!s} spawned {!s} got [{!s}]: {!s}".format(
1120
+ self, args, p.returncode, perr
1121
+ )
1122
+ )
1123
+ else:
1124
+ logger.warning("Carry on because fatal is off")
1125
+ except (SignalInterruptError, KeyboardInterrupt) as perr:
1126
+ logger.critical(
1127
+ "The python process was killed: %s. Trying to terminate the subprocess.",
1128
+ str(perr),
1129
+ )
1130
+ if p:
1131
+ if shell:
1132
+ # Kill the process group: apparently it's the only way when shell=T
1133
+ self.killpg(self.getpgid(p.pid), signal.SIGTERM)
1134
+ else:
1135
+ p.terminate()
1136
+ p.wait()
1137
+ raise # Fatal has no effect on that !
1138
+ else:
1139
+ plocale = locale.getlocale()[1] or "ascii"
1140
+ if p.returncode in ok:
1141
+ if isinstance(output, bool) and output:
1142
+ rc = p_out.decode(plocale, "replace")
1143
+ if outsplit:
1144
+ rc = rc.rstrip("\n").split("\n")
1145
+ p.stdout.close()
1146
+ else:
1147
+ rc = not bool(p.returncode)
1148
+ else:
1149
+ if not silent:
1150
+ logger.warning(
1151
+ "Bad return code [%d] for %s", p.returncode, str(args)
1152
+ )
1153
+ if isinstance(output, bool) and output:
1154
+ sys.stderr.write(p_err.decode(plocale, "replace"))
1155
+ if fatal:
1156
+ raise ExecutionError()
1157
+ else:
1158
+ logger.warning("Carry on because fatal is off")
1159
+ finally:
1160
+ self._rclast = p.returncode if p else 1
1161
+ if isinstance(output, bool) and p:
1162
+ if output:
1163
+ if p.stdout:
1164
+ p.stdout.close()
1165
+ if p.stderr:
1166
+ p.stderr.close()
1167
+ elif isinstance(output, str):
1168
+ output.close()
1169
+ elif isinstance(output, io.IOBase):
1170
+ output.flush()
1171
+ del p
1172
+
1173
+ return rc
1174
+
1175
+ def getlogname(self):
1176
+ """Be sure to get the actual login name."""
1177
+ return passwd.getpwuid(self._os.getuid())[0]
1178
+
1179
+ def getfqdn(self, name=None):
1180
+ """
1181
+ Return a fully qualified domain name for **name**. Default is to
1182
+ check for current *hostname**
1183
+ """
1184
+ if name is None:
1185
+ name = self.default_target.inetname
1186
+ return socket.getfqdn(name)
1187
+
1188
+ def pwd(self, output=None):
1189
+ """Current working directory."""
1190
+ if output is None:
1191
+ output = self.output
1192
+ self.stderr("pwd")
1193
+ try:
1194
+ realpwd = self._os.getcwd()
1195
+ except OSError as e:
1196
+ logger.error("getcwdu failed: %s.", str(e))
1197
+ return None
1198
+ if output:
1199
+ return realpwd
1200
+ else:
1201
+ print(realpwd)
1202
+ return True
1203
+
1204
+ def cd(self, pathtogo, create=False):
1205
+ """Change the current working directory to **pathtogo**."""
1206
+ pathtogo = self.path.expanduser(pathtogo)
1207
+ self.stderr("cd", pathtogo, create)
1208
+ if create:
1209
+ self.mkdir(pathtogo)
1210
+ self._os.chdir(pathtogo)
1211
+ return True
1212
+
1213
+ def cdcontext(self, path, create=False, clean_onexit=False):
1214
+ """
1215
+ Returns a new :class:`CdContext` context manager initialised with the
1216
+ **path**, **create** and **clean_onexit** arguments.
1217
+ """
1218
+ return CdContext(self, path, create, clean_onexit)
1219
+
1220
+ @contextlib.contextmanager
1221
+ def temporary_dir_context(self, suffix=None, prefix=None, dir=None):
1222
+ """Creates a temporary directory and remove it when exiting the context.
1223
+
1224
+ :param suffix: The temporary directory name will end with that suffix
1225
+ :param prefix: The temporary directory name will start with that suffix
1226
+ :param dir: The temporary directory will be created in that directory
1227
+ """
1228
+ self.stderr("temporary_dir_context starts", suffix)
1229
+ self.stderr("tempfile.TemporaryDirectory", suffix, prefix, dir)
1230
+ with tempfile.TemporaryDirectory(
1231
+ suffix=suffix, prefix=prefix, dir=dir
1232
+ ) as tmp_dir:
1233
+ yield tmp_dir
1234
+ self.stderr("tempfile.TemporaryDirectory cleanup", tmp_dir)
1235
+
1236
+ @contextlib.contextmanager
1237
+ def temporary_dir_cdcontext(self, suffix=None, prefix=None, dir=None):
1238
+ """Change to a temporary directory, and remove it when exiting the context.
1239
+
1240
+ For a description of the context's arguments, see :func:`temporary_dir_context`.
1241
+ """
1242
+ with self.temporary_dir_context(
1243
+ suffix=suffix, prefix=prefix, dir=dir
1244
+ ) as tmp_dir:
1245
+ with self.cdcontext(tmp_dir, create=False, clean_onexit=False):
1246
+ yield tmp_dir
1247
+
1248
+ def ffind(self, *args):
1249
+ """Recursive file find. Arguments are starting paths."""
1250
+ if not args:
1251
+ args = ["*"]
1252
+ else:
1253
+ args = [self.path.expanduser(x) for x in args]
1254
+ files = []
1255
+ self.stderr("ffind", *args)
1256
+ for pathtogo in self.glob(*args):
1257
+ if self.path.isfile(pathtogo):
1258
+ files.append(pathtogo)
1259
+ else:
1260
+ for root, u_dirs, filenames in self._os.walk(
1261
+ pathtogo
1262
+ ): # @UnusedVariable
1263
+ files.extend([self.path.join(root, f) for f in filenames])
1264
+ return sorted(files)
1265
+
1266
+ def xperm(self, filename, force=False):
1267
+ """Return whether a **filename** exists and is executable or not.
1268
+
1269
+ If **force** is set to *True*, the file's permission will be modified
1270
+ so that the file becomes executable.
1271
+ """
1272
+ if self._os.path.exists(filename):
1273
+ is_x = bool(self._os.stat(filename).st_mode & 1)
1274
+ if not is_x and force:
1275
+ self.chmod(
1276
+ filename,
1277
+ self._os.stat(filename).st_mode
1278
+ | stat.S_IXUSR
1279
+ | stat.S_IXGRP
1280
+ | stat.S_IXOTH,
1281
+ )
1282
+ is_x = True
1283
+ return is_x
1284
+ else:
1285
+ return False
1286
+
1287
+ def rperm(self, filename, force=False):
1288
+ """Return whether a **filename** exists and is readable by all or not.
1289
+
1290
+ If **force** is set to *True*, the file's permission will be modified
1291
+ so that the file becomes readable for all.
1292
+ """
1293
+ if self._os.path.exists(filename):
1294
+ mode = self._os.stat(filename).st_mode
1295
+ is_r = all(
1296
+ [
1297
+ bool(mode & i)
1298
+ for i in [stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH]
1299
+ ]
1300
+ )
1301
+ if not is_r and force:
1302
+ self.chmod(
1303
+ filename, mode | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
1304
+ )
1305
+ is_r = True
1306
+ return is_r
1307
+ else:
1308
+ return False
1309
+
1310
+ def wperm(self, filename, force=False):
1311
+ """Return whether a **filename** exists and is writable by owner or not.
1312
+
1313
+ If **force** is set to *True*, the file's permission will be modified
1314
+ so that the file becomes writable.
1315
+ """
1316
+ if self._os.path.exists(filename):
1317
+ st = self._os.stat(filename).st_mode
1318
+ is_w = bool(st & stat.S_IWUSR)
1319
+ if not is_w and force:
1320
+ self.chmod(filename, st | stat.S_IWUSR)
1321
+ is_w = True
1322
+ return is_w
1323
+ else:
1324
+ return False
1325
+
1326
+ def wpermtree(self, objpath, force=False):
1327
+ """Return whether all items are owner-writeable in a hierarchy.
1328
+
1329
+ If **force** is set to *True*, the file's permission of all files in the
1330
+ hierarchy will be modified so that they writable.
1331
+ """
1332
+ rc = self.wperm(objpath, force)
1333
+ for dirpath, dirnames, filenames in self.walk(objpath):
1334
+ for item in filenames + dirnames:
1335
+ rc = self.wperm(self.path.join(dirpath, item), force) and rc
1336
+ return rc
1337
+
1338
+ def readonly(self, inodename):
1339
+ """Set permissions of the ``inodename`` object to read-only."""
1340
+ inodename = self.path.expanduser(inodename)
1341
+ self.stderr("readonly", inodename)
1342
+ rc = None
1343
+ if self._os.path.exists(inodename):
1344
+ if self._os.path.isdir(inodename):
1345
+ rc = self.chmod(inodename, 0o555)
1346
+ else:
1347
+ st = self.stat(inodename).st_mode
1348
+ if st & stat.S_IWUSR or st & stat.S_IWGRP or st & stat.S_IWOTH:
1349
+ rc = self.chmod(
1350
+ inodename,
1351
+ st & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH),
1352
+ )
1353
+ else:
1354
+ rc = True
1355
+ return rc
1356
+
1357
+ def readonlytree(self, objpath):
1358
+ """Recursively set permissions of the **objpath** object to read-only."""
1359
+ rc = self.readonly(objpath)
1360
+ for dirpath, dirnames, filenames in self.walk(objpath):
1361
+ for item in filenames + dirnames:
1362
+ rc = self.readonly(self.path.join(dirpath, item)) and rc
1363
+ return rc
1364
+
1365
+ def usr_file(self, filename):
1366
+ """Return whether or not **filename** belongs to the current user."""
1367
+ return self._os.stat(filename).st_uid == self._os.getuid()
1368
+
1369
+ def touch(self, filename):
1370
+ """Clone of the eponymous unix command."""
1371
+ filename = self.path.expanduser(filename)
1372
+ self.stderr("touch", filename)
1373
+ rc = True
1374
+ if self.path.exists(filename):
1375
+ # Note: "filename" might as well be a directory...
1376
+ try:
1377
+ os.utime(filename, None)
1378
+ except Exception:
1379
+ rc = False
1380
+ else:
1381
+ fh = open(filename, "a")
1382
+ fh.close()
1383
+ return rc
1384
+
1385
+ @fmtshcmd
1386
+ def remove(self, objpath):
1387
+ """Unlink the specified object (file, directory or directory tree).
1388
+
1389
+ :param str objpath: Path to the object to unlink
1390
+ """
1391
+ objpath = self.path.expanduser(objpath)
1392
+ if self._os.path.exists(objpath):
1393
+ self.stderr("remove", objpath)
1394
+ if self._os.path.isdir(objpath):
1395
+ self.rmtree(objpath)
1396
+ else:
1397
+ self.unlink(objpath)
1398
+ else:
1399
+ self.stderr("clear", objpath)
1400
+ return not self._os.path.exists(objpath)
1401
+
1402
+ @fmtshcmd
1403
+ def rm(self, objpath):
1404
+ """Shortcut to :meth:`remove` method.
1405
+
1406
+ :param str objpath: Path to the object to unlink
1407
+ """
1408
+ return self.remove(objpath)
1409
+
1410
+ def ps(self, opts=None, search=None, pscmd=None):
1411
+ """
1412
+ Performs a standard process inquiry through :class:`subprocess.Popen`
1413
+ and filter the output if a **search** expression is provided (regular
1414
+ expressions are used).
1415
+ """
1416
+ if not pscmd:
1417
+ pscmd = ["ps"]
1418
+ if opts is None:
1419
+ opts = []
1420
+ pscmd.extend(self._psopts)
1421
+ pscmd.extend(opts)
1422
+ self.stderr(*pscmd)
1423
+ psall = (
1424
+ subprocess.Popen(pscmd, stdout=subprocess.PIPE)
1425
+ .communicate()[0]
1426
+ .split("\n")
1427
+ )
1428
+ if search:
1429
+ psall = filter(lambda x: re.search(search, x), psall)
1430
+ return [x.strip() for x in psall]
1431
+
1432
+ def sleep(self, nbsecs):
1433
+ """Clone of the unix eponymous command."""
1434
+ self.stderr("sleep", nbsecs)
1435
+ time.sleep(nbsecs)
1436
+
1437
+ def setulimit(self, r_id):
1438
+ """Set an unlimited value to the specified resource (**r_id**)."""
1439
+ self.stderr("setulimit", r_id)
1440
+ u_soft, hard = self.getrlimit(r_id) # @UnusedVariable
1441
+ if hard != self._rl.RLIM_INFINITY:
1442
+ logger.info(
1443
+ 'Unable to raise the %s soft limit to "unlimited", '
1444
+ + "using the hard limit instead (%s).",
1445
+ str(r_id),
1446
+ str(hard),
1447
+ )
1448
+ return self.setrlimit(r_id, (hard, hard))
1449
+
1450
+ def ulimit(self):
1451
+ """Dump the user limits currently defined."""
1452
+ for limit in [r for r in dir(self._rl) if r.startswith("RLIMIT_")]:
1453
+ print(
1454
+ " ",
1455
+ limit.ljust(16),
1456
+ ":",
1457
+ self._rl.getrlimit(getattr(self._rl, limit)),
1458
+ )
1459
+
1460
+ @property
1461
+ def cpus_info(self):
1462
+ """Return an object of a subclass of :class:`bronx.system.cpus.CpusInfo`.
1463
+
1464
+ Such objects are designed to get informations on the platform's CPUs.
1465
+
1466
+ :note: *None* might be returned on some platforms (if cpufinfo is not
1467
+ implemented)
1468
+ """
1469
+ return self._cpusinfo
1470
+
1471
+ def cpus_ids_per_blocks(
1472
+ self, blocksize=1, topology="raw", hexmask=False
1473
+ ): # @UnusedVariable
1474
+ """Get the list of CPUs IDs ordered for subsequent binding.
1475
+
1476
+ :param int blocksize: The number of thread consumed by one task
1477
+ :param str topology: The task distribution scheme
1478
+ :param bool hexmask: Return a list of CPU masks in hexadecimal
1479
+ """
1480
+ return []
1481
+
1482
+ def cpus_ids_dispenser(self, topology="raw"):
1483
+ """Get a dispenser of CPUs IDs for nicely ordered for subsequent binding.
1484
+
1485
+ :param str topology: The task distribution scheme
1486
+ """
1487
+ return None
1488
+
1489
+ def cpus_affinity_get(
1490
+ self, taskid, blocksize=1, method="default", topology="raw"
1491
+ ): # @UnusedVariable
1492
+ """Get the necessary command/environment to set the CPUs affinity.
1493
+
1494
+ :param int taskid: the task number
1495
+ :param int blocksize: the number of thread consumed by one task
1496
+ :param str method: The binding method
1497
+ :param str topology: The task distribution scheme
1498
+ :return: A 3-elements tuple. (bool: BindingPossible,
1499
+ list: Starting command prefix, dict: Environment update)
1500
+ """
1501
+ return (False, list(), dict())
1502
+
1503
+ @property
1504
+ def numa_info(self):
1505
+ """Return an object of a subclass of :class:`bronx.system.numa.NumaNodesInfo`.
1506
+
1507
+ Such objects are designed to get informations on the platform's NUMA layout
1508
+
1509
+ :note: *None* might be returned on some platforms (if numainfo is not
1510
+ implemented)
1511
+ """
1512
+ return self._numainfo
1513
+
1514
+ @property
1515
+ def memory_info(self):
1516
+ """Return an object of a subclass of :class:`bronx.system.memory.MemInfo`.
1517
+
1518
+ Such objects are designed to get informations on the platform's RAM.
1519
+
1520
+ :note: *None* might be returned on some platforms (if meminfo is not
1521
+ implemented)
1522
+ """
1523
+ return self._memoryinfo
1524
+
1525
+ @property
1526
+ def netstatsinfo(self):
1527
+ """Return an object of a subclass of :class:`vortex;tools;net.AbstractNetstats`.
1528
+
1529
+ Such objects are designed to get informations on the platform's network
1530
+ connection (both TCP and UDP)
1531
+
1532
+ :note: *None* might be returned on some platforms (if netstat is not
1533
+ implemented)
1534
+ """
1535
+ return self._netstatsinfo
1536
+
1537
+ def available_localport(self):
1538
+ """Returns an available port number for a new TCP or UDP connection.
1539
+
1540
+ :note: The ``NotImplementedError`` might be raised on some platforms since
1541
+ netstat may not be implemented.
1542
+ """
1543
+ if self.netstatsinfo is None:
1544
+ raise NotImplementedError(
1545
+ "This function is not implemented on this system."
1546
+ )
1547
+ return self.netstatsinfo.available_localport()
1548
+
1549
+ def check_localport(self, port):
1550
+ """Check if a **port** number is currently being used.
1551
+
1552
+ :note: The ``NotImplementedError`` might be raised on some platforms since
1553
+ netstat may not be implemented.
1554
+ """
1555
+ if self.netstatsinfo is None:
1556
+ raise NotImplementedError(
1557
+ "This function is not implemented on this system."
1558
+ )
1559
+ return self.netstatsinfo.check_localport(port)
1560
+
1561
+ def clear(self):
1562
+ """Clone of the unix eponymous command."""
1563
+ self._os.system("clear")
1564
+
1565
+ @property
1566
+ def cls(self):
1567
+ """Property shortcut to :meth:`clear` screen."""
1568
+ self.clear()
1569
+ return None
1570
+
1571
+ def rawopts(
1572
+ self,
1573
+ cmdline=None,
1574
+ defaults=None,
1575
+ isnone=isnonedef,
1576
+ istrue=istruedef,
1577
+ isfalse=isfalsedef,
1578
+ ):
1579
+ """Parse a simple options command line that looks like `` key=value``.
1580
+
1581
+ :param str cmdline: The command line to be processed (if *None*, ``sys.argv``
1582
+ is used to get the script's command line)
1583
+ :param dict defaults: defaults values for any missing option.
1584
+ :param re.sre_compile isnone: Regex that detects ``None`` values.
1585
+ :param re.sre_compile isnone: Regex that detects ``True`` values.
1586
+ :param re.sre_compile isnone: Regex that detects ``False`` values.
1587
+ :return dict: a dictionary that contains the parsed options (or their defaults)
1588
+ """
1589
+ opts = dict()
1590
+ if defaults:
1591
+ try:
1592
+ opts.update(defaults)
1593
+ except (ValueError, TypeError):
1594
+ logger.warning(
1595
+ "Could not update options default: %s", defaults
1596
+ )
1597
+
1598
+ if cmdline is None:
1599
+ cmdline = sys.argv[1:]
1600
+ opts.update(dict([x.split("=") for x in cmdline]))
1601
+ for k, v in opts.items():
1602
+ if v not in (None, True, False):
1603
+ if istrue.match(v):
1604
+ opts[k] = True
1605
+ if isfalse.match(v):
1606
+ opts[k] = False
1607
+ if isnone.match(v):
1608
+ opts[k] = None
1609
+ return opts
1610
+
1611
+ def is_iofile(self, iocandidate):
1612
+ """Check if actual **iocandidate** is a valid filename or io stream."""
1613
+ return iocandidate is not None and (
1614
+ (isinstance(iocandidate, str) and self.path.exists(iocandidate))
1615
+ or isinstance(iocandidate, io.IOBase)
1616
+ or isinstance(iocandidate, io.BytesIO)
1617
+ or isinstance(iocandidate, io.StringIO)
1618
+ )
1619
+
1620
+ @contextlib.contextmanager
1621
+ def ftppool(self, nrcfile=None):
1622
+ """Create a context manager that initialises the FTP connection pool.
1623
+
1624
+ Within this context manager, if `self.ftpflavour==FTP_FLAVOUR.CONNECTION_POOLS`,
1625
+ the :meth:`ftp` method will use the FTP connection pool initialised by this
1626
+ context manager (see the :class:`~vortex.tools.net.FtpConnectionPool` class)
1627
+ in order to dispense FTP clients.
1628
+
1629
+ When the context manager is exited, the FTP connection pool is destroyed
1630
+ (and all the space FTP clients are closed).
1631
+ """
1632
+ pool_control = self._current_ftppool is None
1633
+ if pool_control:
1634
+ self._current_ftppool = FtpConnectionPool(self, nrcfile=nrcfile)
1635
+ try:
1636
+ yield self._current_ftppool
1637
+ finally:
1638
+ if pool_control:
1639
+ self._current_ftppool.clear()
1640
+ self._current_ftppool = None
1641
+
1642
+ def fix_fthostname(self, hostname, fatal=True):
1643
+ """If *hostname* is None, tries to find a default value for it."""
1644
+ if hostname is None:
1645
+ hostname = self.glove.default_fthost
1646
+ if not hostname:
1647
+ if fatal:
1648
+ raise ValueError(
1649
+ "An *hostname* must be provided one way or another"
1650
+ )
1651
+ return hostname
1652
+
1653
+ def fix_ftuser(self, hostname, logname, fatal=True, defaults_to_user=True):
1654
+ """Given *hostname*, if *logname* is None, tries to find a default value for it."""
1655
+ if logname is None:
1656
+ if self.glove is not None:
1657
+ logname = self.glove.getftuser(
1658
+ hostname, defaults_to_user=defaults_to_user
1659
+ )
1660
+ else:
1661
+ if fatal:
1662
+ raise ValueError(
1663
+ "Either a *logname* or a glove must be set-up"
1664
+ )
1665
+ return logname
1666
+
1667
+ def ftp(
1668
+ self, hostname, logname=None, delayed=False, port=DEFAULT_FTP_PORT
1669
+ ):
1670
+ """Return an FTP client object.
1671
+
1672
+ :param str hostname: the remote host's name for FTP.
1673
+ :param str logname: the logname on the remote host.
1674
+ :param bool delayed: delay the actual connection attempt as much as possible.
1675
+ :param int port: the port number on the remote host.
1676
+
1677
+ The returned object is an instance of :class:`~vortex.tools.net.StdFtp`
1678
+ or of one of its subclasses. Consequently, see the :class:`~vortex.tools.net.StdFtp`
1679
+ class documentation to get more information on the client's capabilities.
1680
+
1681
+ The type and behaviour of the returned object depends of the `self.ftpflavour`
1682
+ setting. Possible values are:
1683
+
1684
+ * `FTP_FLAVOUR.STD`: a :class:`~vortex.tools.net.StdFtp` object is returned.
1685
+ * `FTP_FLAVOUR.RETRIES`: a :class:`~vortex.tools.net.AutoRetriesFtp` object
1686
+ is returned (consequently multiple retries will be made if something
1687
+ goes wrong with any FTP command).
1688
+ * `FTP_FLAVOUR.CONNECTION_POOLS`: a :class:`~vortex.tools.net.AutoRetriesFtp`
1689
+ or a :class:`~vortex.tools.net.PooledResetableAutoRetriesFtp` object
1690
+ is returned. If the :meth:`ftp` method is called from within a context
1691
+ manager created by the :meth:`ftppool`, a
1692
+ :class:`~vortex.tools.net.FtpConnectionPool` object is used in order
1693
+ to create and re-use FTP connections; Otherwise a "usual"
1694
+ :class:`~vortex.tools.net.AutoRetriesFtp` is returned.
1695
+ """
1696
+ logname = self.fix_ftuser(hostname, logname)
1697
+ if port is None:
1698
+ port = DEFAULT_FTP_PORT
1699
+ if (
1700
+ self.ftpflavour == FTP_FLAVOUR.CONNECTION_POOLS
1701
+ and self._current_ftppool is not None
1702
+ ):
1703
+ return self._current_ftppool.deal(
1704
+ hostname, logname, port=port, delayed=delayed
1705
+ )
1706
+ else:
1707
+ ftpclass = (
1708
+ AutoRetriesFtp
1709
+ if self.ftpflavour != FTP_FLAVOUR.STD
1710
+ else StdFtp
1711
+ )
1712
+ ftpbox = ftpclass(self, hostname, port=port)
1713
+ rc = ftpbox.fastlogin(logname, delayed=delayed)
1714
+ if rc:
1715
+ return ftpbox
1716
+ else:
1717
+ logger.warning(
1718
+ "Could not login on %s as %s [%s]",
1719
+ hostname,
1720
+ logname,
1721
+ str(rc),
1722
+ )
1723
+ return None
1724
+
1725
+ @contextlib.contextmanager
1726
+ def ftpcontext(
1727
+ self, hostname, logname=None, delayed=False, port=DEFAULT_FTP_PORT
1728
+ ):
1729
+ """Create an FTP object and close it when the context exits.
1730
+
1731
+ For a description of the context's arguments, see :func:`ftp`.
1732
+ """
1733
+ ftp = self.ftp(hostname, logname=logname, delayed=delayed, port=port)
1734
+ if ftp:
1735
+ try:
1736
+ yield ftp
1737
+ finally:
1738
+ ftp.close()
1739
+ else:
1740
+ yield ftp
1741
+
1742
+ @fmtshcmd
1743
+ def anyft_remote_rewrite(self, remote):
1744
+ """
1745
+ When copying the data using a file transfer protocol (FTP, scp, ...),
1746
+ given the format, possibly modify the remote name.
1747
+ """
1748
+ return remote
1749
+
1750
+ @fmtshcmd
1751
+ def ftget(
1752
+ self,
1753
+ source,
1754
+ destination,
1755
+ hostname=None,
1756
+ logname=None,
1757
+ port=DEFAULT_FTP_PORT,
1758
+ cpipeline=None,
1759
+ ):
1760
+ """Proceed to a direct ftp get on the specified target (using Vortex's FTP client).
1761
+
1762
+ :param str source: the remote path to get data
1763
+ :param destination: The destination of data (either a path to file or a
1764
+ File-like object)
1765
+ :type destination: str or File-like object
1766
+ :param str hostname: The target hostname (default: *None*, see the
1767
+ :class:`~vortex.tools.net.StdFtp` class to get the effective default)
1768
+ :param str logname: the target logname (default: *None*, see the
1769
+ :class:`~vortex.tools.net.StdFtp` class to get the effective default)
1770
+ :param int port: the port number on the remote host.
1771
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
1772
+ uncompress the data during the file transfer (default: *None*).
1773
+ """
1774
+ if isinstance(destination, str): # destination may be Virtual
1775
+ self.rm(destination)
1776
+ hostname = self.fix_fthostname(hostname)
1777
+ ftp = self.ftp(hostname, logname, port=port)
1778
+ if ftp:
1779
+ try:
1780
+ if cpipeline is None:
1781
+ rc = ftp.get(source, destination)
1782
+ else:
1783
+ with cpipeline.stream2uncompress(
1784
+ destination
1785
+ ) as cdestination:
1786
+ rc = ftp.get(source, cdestination)
1787
+ except ftplib.all_errors as e:
1788
+ logger.warning("An FTP error occured: %s", str(e))
1789
+ rc = False
1790
+ finally:
1791
+ ftp.close()
1792
+ return rc
1793
+ else:
1794
+ return False
1795
+
1796
+ @fmtshcmd
1797
+ def ftput(
1798
+ self,
1799
+ source,
1800
+ destination,
1801
+ hostname=None,
1802
+ logname=None,
1803
+ port=DEFAULT_FTP_PORT,
1804
+ cpipeline=None,
1805
+ sync=False,
1806
+ ): # @UnusedVariable
1807
+ """Proceed to a direct ftp put on the specified target (using Vortex's FTP client).
1808
+
1809
+ :param source: The source of data (either a path to file or a
1810
+ File-like object)
1811
+ :type source: str or File-like object
1812
+ :param str destination: The path where to upload the data.
1813
+ :param str hostname: The target hostname (default: *None*, see the
1814
+ :class:`~vortex.tools.net.StdFtp` class to get the effective default)
1815
+ :param str logname: the target logname (default: *None*, see the
1816
+ :class:`~vortex.tools.net.StdFtp` class to get the effective default)
1817
+ :param int port: the port number on the remote host.
1818
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
1819
+ compress the data during the file transfer (default: *None*).
1820
+ :param bool sync: If False, allow asynchronous transfers (currently not
1821
+ used: transfers are always synchronous).
1822
+ """
1823
+ rc = False
1824
+ if self.is_iofile(source):
1825
+ hostname = self.fix_fthostname(hostname)
1826
+ ftp = self.ftp(hostname, logname, port=port)
1827
+ if ftp:
1828
+ try:
1829
+ if cpipeline is None:
1830
+ rc = ftp.put(source, destination)
1831
+ else:
1832
+ with cpipeline.compress2stream(
1833
+ source, iosponge=True
1834
+ ) as csource:
1835
+ # csource is an IoSponge consequently the size attribute exists
1836
+ rc = ftp.put(
1837
+ csource, destination, size=csource.size
1838
+ )
1839
+ except ftplib.all_errors as e:
1840
+ logger.warning("An FTP error occured: %s", str(e))
1841
+ rc = False
1842
+ finally:
1843
+ ftp.close()
1844
+ else:
1845
+ raise OSError("No such file or directory: {!r}".format(source))
1846
+ return rc
1847
+
1848
+ def ftspool_cache(self):
1849
+ """Return a cache object for the FtSpool."""
1850
+ if self._ftspool_cache is not None:
1851
+ return self._ftspool_cache
1852
+ self._ftspool_cache = footprints.proxy.cache(
1853
+ entry=os.path.join(
1854
+ vortex.data.stores.get_cache_location(), "ftspool"
1855
+ ),
1856
+ )
1857
+ return self._ftspool_cache
1858
+
1859
+ def copy2ftspool(self, source, nest=False, **kwargs):
1860
+ """Make a copy of **source** to the FtSpool cache."""
1861
+ h = hashlib.new("md5")
1862
+ h.update(source.encode(encoding="utf-8"))
1863
+ outputname = "vortex_{:s}_P{:06d}_{:s}".format(
1864
+ date.now().strftime("%Y%m%d%H%M%S-%f"),
1865
+ self.getpid(),
1866
+ h.hexdigest(),
1867
+ )
1868
+ if nest:
1869
+ outputname = self.path.join(outputname, self.path.basename(source))
1870
+ kwargs["intent"] = "in" # Force intent=in
1871
+ if self.ftspool_cache().insert(outputname, source, **kwargs):
1872
+ return self.ftspool_cache().fullpath(outputname)
1873
+ else:
1874
+ return False
1875
+
1876
+ def ftserv_allowed(self, source, destination):
1877
+ """Given **source** and **destination**, is FtServ usable ?"""
1878
+ return isinstance(source, str) and isinstance(destination, str)
1879
+
1880
+ def ftserv_put(
1881
+ self,
1882
+ source,
1883
+ destination,
1884
+ hostname=None,
1885
+ logname=None,
1886
+ port=None,
1887
+ specialshell=None,
1888
+ sync=False,
1889
+ ):
1890
+ """Asynchronous put of a file using FtServ."""
1891
+ if self.ftserv_allowed(source, destination):
1892
+ if self.path.exists(source):
1893
+ ftcmd = self.ftputcmd or "ftput"
1894
+ hostname = self.fix_fthostname(hostname, fatal=False)
1895
+ logname = self.fix_ftuser(hostname, logname, fatal=False)
1896
+ extras = list()
1897
+ if not sync:
1898
+ extras.extend(
1899
+ [
1900
+ "-q",
1901
+ ]
1902
+ )
1903
+ if hostname:
1904
+ if port is not None:
1905
+ hostname += ":{:s}".format(port)
1906
+ extras.extend(["-h", hostname])
1907
+ if logname:
1908
+ extras.extend(["-u", logname])
1909
+ if specialshell:
1910
+ extras.extend(["-s", specialshell])
1911
+ # Remove ~/ and ~logname/ from the destinations' path
1912
+ actual_dest = re.sub("^~/+", "", destination)
1913
+ if logname:
1914
+ actual_dest = re.sub(
1915
+ "^~{:s}/+".format(logname), "", actual_dest
1916
+ )
1917
+ try:
1918
+ rc = self.spawn(
1919
+ [
1920
+ ftcmd,
1921
+ "-o",
1922
+ "mkdir",
1923
+ ] # Automatically create subdirectories
1924
+ + extras
1925
+ + [source, actual_dest],
1926
+ output=False,
1927
+ )
1928
+ except ExecutionError:
1929
+ rc = False
1930
+ else:
1931
+ raise OSError("No such file or directory: {!s}".format(source))
1932
+ else:
1933
+ raise OSError(
1934
+ "Source or destination is not a plain file path: {!r}".format(
1935
+ source
1936
+ )
1937
+ )
1938
+ return rc
1939
+
1940
+ def ftserv_get(
1941
+ self, source, destination, hostname=None, logname=None, port=None
1942
+ ):
1943
+ """Get a file using FtServ."""
1944
+ if self.ftserv_allowed(source, destination):
1945
+ if self.filecocoon(destination):
1946
+ hostname = self.fix_fthostname(hostname, fatal=False)
1947
+ logname = self.fix_ftuser(hostname, logname, fatal=False)
1948
+ destination = self.path.expanduser(destination)
1949
+ extras = list()
1950
+ if hostname:
1951
+ if port is not None:
1952
+ hostname += ":{:s}".format(port)
1953
+ extras.extend(["-h", hostname])
1954
+ if logname:
1955
+ extras.extend(["-u", logname])
1956
+ ftcmd = self.ftgetcmd or "ftget"
1957
+ try:
1958
+ rc = self.spawn(
1959
+ [
1960
+ ftcmd,
1961
+ ]
1962
+ + extras
1963
+ + [source, destination],
1964
+ output=False,
1965
+ )
1966
+ except ExecutionError:
1967
+ rc = False
1968
+ else:
1969
+ raise OSError("Could not cocoon: {!s}".format(destination))
1970
+ else:
1971
+ raise OSError(
1972
+ "Source or destination is not a plain file path: {!r}".format(
1973
+ source
1974
+ )
1975
+ )
1976
+ return rc
1977
+
1978
+ def ftserv_batchget(
1979
+ self, source, destination, hostname=None, logname=None, port=None
1980
+ ):
1981
+ """Get a list of files using FtServ.
1982
+
1983
+ :note: **source** and **destination** are list or tuple.
1984
+ """
1985
+ if all(
1986
+ [self.ftserv_allowed(s, d) for s, d in zip(source, destination)]
1987
+ ):
1988
+ for d in destination:
1989
+ if not self.filecocoon(d):
1990
+ raise OSError("Could not cocoon: {!s}".format(d))
1991
+ extras = list()
1992
+ hostname = self.fix_fthostname(hostname, fatal=False)
1993
+ logname = self.fix_ftuser(hostname, logname, fatal=False)
1994
+ if hostname:
1995
+ if port is not None:
1996
+ hostname += ":{:s}".format(port)
1997
+ extras.extend(["-h", hostname])
1998
+ if logname:
1999
+ extras.extend(["-u", logname])
2000
+ ftcmd = self.ftgetcmd or "ftget"
2001
+ plocale = locale.getlocale()[1] or "ascii"
2002
+ with tempfile.TemporaryFile(
2003
+ dir=self.path.dirname(self.path.abspath(destination[0])),
2004
+ mode="wb",
2005
+ ) as tmpio:
2006
+ tmpio.writelines(
2007
+ [
2008
+ "{:s} {:s}\n".format(s, d).encode(plocale)
2009
+ for s, d in zip(source, destination)
2010
+ ]
2011
+ )
2012
+ tmpio.seek(0)
2013
+ with tempfile.TemporaryFile(
2014
+ dir=self.path.dirname(self.path.abspath(destination[0])),
2015
+ mode="w+b",
2016
+ ) as tmpoutput:
2017
+ try:
2018
+ rc = self.spawn(
2019
+ [
2020
+ ftcmd,
2021
+ ]
2022
+ + extras,
2023
+ output=tmpoutput,
2024
+ stdin=tmpio,
2025
+ )
2026
+ except ExecutionError:
2027
+ rc = False
2028
+ # Process output data
2029
+ tmpoutput.seek(0)
2030
+ ft_outputs = tmpoutput.read()
2031
+ ft_outputs = ft_outputs.decode(
2032
+ locale.getlocale()[1] or "ascii", "replace"
2033
+ )
2034
+ logger.info("Here is the ftget command output: \n%s", ft_outputs)
2035
+ # Expand the return codes
2036
+ if rc:
2037
+ x_rc = [
2038
+ True,
2039
+ ] * len(source)
2040
+ else:
2041
+ ack_re = re.compile(r".*FT_(OK|ABORT)\s*:\s*GET\s+(.*)$")
2042
+ ack_lines = dict()
2043
+ for line in ft_outputs.split("\n"):
2044
+ ack_match = ack_re.match(line)
2045
+ if ack_match:
2046
+ ack_lines[ack_match.group(2)] = (
2047
+ ack_match.group(1) == "OK"
2048
+ )
2049
+ x_rc = []
2050
+ for a_source in source:
2051
+ my_rc = None
2052
+ for ack_globish, ack_rc in ack_lines.items():
2053
+ if a_source in ack_globish:
2054
+ my_rc = ack_rc
2055
+ break
2056
+ x_rc.append(my_rc)
2057
+ else:
2058
+ raise OSError(
2059
+ "Source or destination is not a plain file path: {!r}".format(
2060
+ source
2061
+ )
2062
+ )
2063
+ return x_rc
2064
+
2065
+ def rawftput_worthy(self, source, destination):
2066
+ """Is it allowed to use FtServ given **source** and **destination**."""
2067
+ return self.ftraw and self.ftserv_allowed(source, destination)
2068
+
2069
+ @fmtshcmd
2070
+ def rawftput(
2071
+ self,
2072
+ source,
2073
+ destination,
2074
+ hostname=None,
2075
+ logname=None,
2076
+ port=None,
2077
+ cpipeline=None,
2078
+ sync=False,
2079
+ ):
2080
+ """Proceed with some external ftput command on the specified target.
2081
+
2082
+ :param str source: Path to the source filename
2083
+ :param str destination: The path where to upload the data.
2084
+ :param str hostname: The target hostname (default: *None*).
2085
+ :param str logname: the target logname (default: *None*).
2086
+ :param int port: the port number on the remote host.
2087
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
2088
+ compress the data during the file transfer (default: *None*).
2089
+ :param bool sync: If False, allow asynchronous transfers.
2090
+ """
2091
+ if cpipeline is not None:
2092
+ if cpipeline.compress2rawftp(source):
2093
+ return self.ftserv_put(
2094
+ source,
2095
+ destination,
2096
+ hostname,
2097
+ logname=logname,
2098
+ port=port,
2099
+ specialshell=cpipeline.compress2rawftp(source),
2100
+ sync=sync,
2101
+ )
2102
+ else:
2103
+ if port is None:
2104
+ port = DEFAULT_FTP_PORT
2105
+ return self.ftput(
2106
+ source,
2107
+ destination,
2108
+ hostname,
2109
+ logname=logname,
2110
+ port=port,
2111
+ cpipeline=cpipeline,
2112
+ sync=sync,
2113
+ )
2114
+ else:
2115
+ return self.ftserv_put(
2116
+ source, destination, hostname, logname, port=port, sync=sync
2117
+ )
2118
+
2119
+ def smartftput(
2120
+ self,
2121
+ source,
2122
+ destination,
2123
+ hostname=None,
2124
+ logname=None,
2125
+ port=None,
2126
+ cpipeline=None,
2127
+ sync=False,
2128
+ fmt=None,
2129
+ ):
2130
+ """Select the best alternative between ``ftput`` and ``rawftput``.
2131
+
2132
+ :param source: The source of data (either a path to file or a
2133
+ File-like object)
2134
+ :type source: str or File-like object
2135
+ :param str destination: The path where to upload the data.
2136
+ :param str hostname: The target hostname (see :class:`~vortex.tools.net.StdFtp`
2137
+ for the default)
2138
+ :param str logname: the target logname (see :class:`~vortex.tools.net.StdFtp`
2139
+ for the default)
2140
+ :param int port: the port number on the remote host.
2141
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
2142
+ compress the data during the file transfer.
2143
+ :param bool sync: If False, allow asynchronous transfers.
2144
+ :param str fmt: The format of data.
2145
+
2146
+ ``rawftput`` will be used if all of the following conditions are met:
2147
+
2148
+ * ``self.ftraw`` is *True*
2149
+ * **source** is a string (as opposed to a File like object)
2150
+ * **destination** is a string (as opposed to a File like object)
2151
+ """
2152
+ if self.rawftput_worthy(source, destination):
2153
+ return self.rawftput(
2154
+ source,
2155
+ destination,
2156
+ hostname=hostname,
2157
+ logname=logname,
2158
+ port=port,
2159
+ cpipeline=cpipeline,
2160
+ sync=sync,
2161
+ fmt=fmt,
2162
+ )
2163
+ else:
2164
+ if port is None:
2165
+ port = DEFAULT_FTP_PORT
2166
+ return self.ftput(
2167
+ source,
2168
+ destination,
2169
+ hostname=hostname,
2170
+ logname=logname,
2171
+ port=port,
2172
+ cpipeline=cpipeline,
2173
+ sync=sync,
2174
+ fmt=fmt,
2175
+ )
2176
+
2177
+ def rawftget_worthy(self, source, destination, cpipeline=None):
2178
+ """Is it allowed to use FtServ given **source** and **destination**."""
2179
+ return (
2180
+ self.ftraw
2181
+ and cpipeline is None
2182
+ and self.ftserv_allowed(source, destination)
2183
+ )
2184
+
2185
+ @fmtshcmd
2186
+ def rawftget(
2187
+ self,
2188
+ source,
2189
+ destination,
2190
+ hostname=None,
2191
+ logname=None,
2192
+ port=None,
2193
+ cpipeline=None,
2194
+ ):
2195
+ """Proceed with some external ftget command on the specified target.
2196
+
2197
+ :param str source: the remote path to get data
2198
+ :param str destination: path to the filename where to put the data.
2199
+ :param str hostname: the target hostname (default: *None*).
2200
+ :param str logname: the target logname (default: *None*).
2201
+ :param int port: the port number on the remote host.
2202
+ :param CompressionPipeline cpipeline: unused (kept for compatibility)
2203
+ """
2204
+ if cpipeline is not None:
2205
+ raise OSError("cpipeline is not supported by this method.")
2206
+ return self.ftserv_get(
2207
+ source, destination, hostname, logname, port=port
2208
+ )
2209
+
2210
+ @fmtshcmd
2211
+ def batchrawftget(
2212
+ self,
2213
+ source,
2214
+ destination,
2215
+ hostname=None,
2216
+ logname=None,
2217
+ port=None,
2218
+ cpipeline=None,
2219
+ ):
2220
+ """Proceed with some external ftget command on the specified target.
2221
+
2222
+ :param source: A list of remote paths to get data
2223
+ :param destination: A list of paths to the filename where to put the data.
2224
+ :param str hostname: the target hostname (default: *None*).
2225
+ :param str logname: the target logname (default: *None*).
2226
+ :param int port: the port number on the remote host.
2227
+ :param CompressionPipeline cpipeline: unused (kept for compatibility)
2228
+ """
2229
+ if cpipeline is not None:
2230
+ raise OSError("cpipeline is not supported by this method.")
2231
+ return self.ftserv_batchget(
2232
+ source, destination, hostname, logname, port=port
2233
+ )
2234
+
2235
+ def smartftget(
2236
+ self,
2237
+ source,
2238
+ destination,
2239
+ hostname=None,
2240
+ logname=None,
2241
+ port=None,
2242
+ cpipeline=None,
2243
+ fmt=None,
2244
+ ):
2245
+ """Select the best alternative between ``ftget`` and ``rawftget``.
2246
+
2247
+ :param str source: the remote path to get data
2248
+ :param destination: The destination of data (either a path to file or a
2249
+ File-like object)
2250
+ :type destination: str or File-like object
2251
+ :param str hostname: The target hostname (see :class:`~vortex.tools.net.StdFtp`
2252
+ for the default)
2253
+ :param str logname: the target logname (see :class:`~vortex.tools.net.StdFtp`
2254
+ for the default)
2255
+ :param int port: the port number on the remote host.
2256
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
2257
+ uncompress the data during the file transfer.
2258
+ :param str fmt: The format of data.
2259
+
2260
+ ``rawftget`` will be used if all of the following conditions are met:
2261
+
2262
+ * ``self.ftraw`` is *True*
2263
+ * **cpipeline** is None
2264
+ * **source** is a string (as opposed to a File like object)
2265
+ * **destination** is a string (as opposed to a File like object)
2266
+ """
2267
+ if self.rawftget_worthy(source, destination, cpipeline):
2268
+ # FtServ is uninteresting when dealing with compression
2269
+ return self.rawftget(
2270
+ source,
2271
+ destination,
2272
+ hostname=hostname,
2273
+ logname=logname,
2274
+ port=port,
2275
+ cpipeline=cpipeline,
2276
+ fmt=fmt,
2277
+ )
2278
+ else:
2279
+ if port is None:
2280
+ port = DEFAULT_FTP_PORT
2281
+ return self.ftget(
2282
+ source,
2283
+ destination,
2284
+ hostname=hostname,
2285
+ logname=logname,
2286
+ port=port,
2287
+ cpipeline=cpipeline,
2288
+ fmt=fmt,
2289
+ )
2290
+
2291
+ def smartbatchftget(
2292
+ self,
2293
+ source,
2294
+ destination,
2295
+ hostname=None,
2296
+ logname=None,
2297
+ port=None,
2298
+ cpipeline=None,
2299
+ fmt=None,
2300
+ ):
2301
+ """
2302
+ Select the best alternative between ``ftget`` and ``batchrawftget``
2303
+ when retrieving several files.
2304
+
2305
+ :param source: A list of remote paths to get data
2306
+ :param destination: A list of destinations for the data (either a path to
2307
+ file or a File-like object)
2308
+ :type destination: str or File-like object
2309
+ :param str hostname: The target hostname (see :class:`~vortex.tools.net.StdFtp`
2310
+ for the default)
2311
+ :param str logname: the target logname (see :class:`~vortex.tools.net.StdFtp`
2312
+ for the default)
2313
+ :param int port: the port number on the remote host.
2314
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
2315
+ uncompress the data during the file transfer.
2316
+ :param str fmt: The format of data.
2317
+ """
2318
+ if all(
2319
+ [
2320
+ self.rawftget_worthy(s, d, cpipeline)
2321
+ for s, d in zip(source, destination)
2322
+ ]
2323
+ ):
2324
+ # FtServ is uninteresting when dealing with compression
2325
+ return self.batchrawftget(
2326
+ source,
2327
+ destination,
2328
+ hostname=hostname,
2329
+ logname=logname,
2330
+ port=None,
2331
+ cpipeline=cpipeline,
2332
+ fmt=fmt,
2333
+ )
2334
+ else:
2335
+ rc = True
2336
+ if port is None:
2337
+ port = DEFAULT_FTP_PORT
2338
+ with self.ftppool():
2339
+ for s, d in zip(source, destination):
2340
+ rc = rc and self.ftget(
2341
+ s,
2342
+ d,
2343
+ hostname=hostname,
2344
+ logname=logname,
2345
+ port=port,
2346
+ cpipeline=cpipeline,
2347
+ fmt=fmt,
2348
+ )
2349
+ return rc
2350
+
2351
+ def ssh(self, hostname, logname=None, *args, **kw):
2352
+ """Return an :class:`~vortex.tools.net.AssistedSsh` object.
2353
+
2354
+ :param str hostname: the remote host's name for SSH
2355
+ :param str logname: the logname on the remote host
2356
+
2357
+ Parameters provided with **args** or **kw** will be passed diorectly to the
2358
+ :class:`~vortex.tools.net.AssistedSsh` constructor.
2359
+
2360
+ See the :class:`~vortex.tools.net.AssistedSsh` class documentation for
2361
+ more information and examples.
2362
+ """
2363
+ return AssistedSsh(self, hostname, logname, *args, **kw)
2364
+
2365
+ @fmtshcmd
2366
+ def scpput(
2367
+ self, source, destination, hostname, logname=None, cpipeline=None
2368
+ ):
2369
+ """Perform an scp to the specified target.
2370
+
2371
+ :param source: The source of data (either a path to file or a
2372
+ File-like object)
2373
+ :type source: str or File-like object
2374
+ :param str destination: The path where to upload the data.
2375
+ :param str hostname: The target hostname.
2376
+ :param str logname: the target logname (default: current user)
2377
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
2378
+ compress the data during the file transfer (default: *None*).
2379
+ """
2380
+ logname = self.fix_ftuser(
2381
+ hostname, logname, fatal=False, defaults_to_user=False
2382
+ )
2383
+ msg = "[hostname={!s} logname={!s}]".format(hostname, logname)
2384
+ ssh = self.ssh(hostname, logname)
2385
+ if isinstance(source, str) and cpipeline is None:
2386
+ self.stderr("scpput", source, destination, msg)
2387
+ return ssh.scpput(source, destination)
2388
+ else:
2389
+ self.stderr("scpput_stream", source, destination, msg)
2390
+ if cpipeline is None:
2391
+ return ssh.scpput_stream(source, destination)
2392
+ else:
2393
+ with cpipeline.compress2stream(source) as csource:
2394
+ return ssh.scpput_stream(csource, destination)
2395
+
2396
+ @fmtshcmd
2397
+ def scpget(
2398
+ self, source, destination, hostname, logname=None, cpipeline=None
2399
+ ):
2400
+ """Perform an scp to get the specified source.
2401
+
2402
+ :param str source: the remote path to get data
2403
+ :param destination: The destination of data (either a path to file or a
2404
+ File-like object)
2405
+ :type destination: str or File-like object
2406
+ :param str hostname: The target hostname.
2407
+ :param str logname: the target logname (default: current user)
2408
+ :param CompressionPipeline cpipeline: If not *None*, the object used to
2409
+ uncompress the data during the file transfer (default: *None*).
2410
+ """
2411
+ logname = self.fix_ftuser(
2412
+ hostname, logname, fatal=False, defaults_to_user=False
2413
+ )
2414
+ msg = "[hostname={!s} logname={!s}]".format(hostname, logname)
2415
+ ssh = self.ssh(hostname, logname)
2416
+ if isinstance(destination, str) and cpipeline is None:
2417
+ self.stderr("scpget", source, destination, msg)
2418
+ return ssh.scpget(source, destination)
2419
+ else:
2420
+ self.stderr("scpget_stream", source, destination, msg)
2421
+ if cpipeline is None:
2422
+ return ssh.scpget_stream(source, destination)
2423
+ else:
2424
+ with cpipeline.stream2uncompress(destination) as cdestination:
2425
+ return ssh.scpget_stream(source, cdestination)
2426
+
2427
+ def softlink(self, source, destination):
2428
+ """Set a symbolic link if **source** is not **destination**."""
2429
+ self.stderr("softlink", source, destination)
2430
+ if source == destination:
2431
+ return False
2432
+ else:
2433
+ return self.symlink(source, destination)
2434
+
2435
+ def size(self, filepath):
2436
+ """Returns the actual size in bytes of the specified **filepath**."""
2437
+ filepath = self.path.expanduser(filepath)
2438
+ self.stderr("size", filepath)
2439
+ try:
2440
+ return self.stat(filepath).st_size
2441
+ except Exception:
2442
+ return -1
2443
+
2444
+ def treesize(self, objpath):
2445
+ """Size in byte of the whole **objpath** directory (or file).
2446
+
2447
+ Links are not followed, and directory sizes are taken into account:
2448
+ should return the same as ``du -sb``.
2449
+
2450
+ Raises ``OSError`` if **objpath** does not exist.
2451
+ """
2452
+ objpath = self.path.expanduser(objpath)
2453
+ if self.path.isdir(objpath):
2454
+ total_size = self.size(objpath)
2455
+ for dirpath, dirnames, filenames in self.walk(objpath):
2456
+ for f in filenames + dirnames:
2457
+ total_size += self.lstat(
2458
+ self.path.join(dirpath, f)
2459
+ ).st_size
2460
+ return total_size
2461
+ return self.lstat(objpath).st_size
2462
+
2463
+ def mkdir(self, dirpath, fatal=True):
2464
+ """Normalises path name of **dirpath** and recursively creates this directory."""
2465
+ normdir = self.path.normpath(self.path.expanduser(dirpath))
2466
+ if normdir and not self.path.isdir(normdir):
2467
+ logger.debug("Cocooning directory %s", normdir)
2468
+ self.stderr("mkdir", normdir)
2469
+ try:
2470
+ self.makedirs(normdir)
2471
+ return True
2472
+ except OSError:
2473
+ # The directory may have been created exactly at the same time
2474
+ # by another process...
2475
+ if fatal and not self.path.isdir(normdir):
2476
+ raise
2477
+ return self.path.isdir(normdir)
2478
+ else:
2479
+ return True
2480
+
2481
+ def filecocoon(self, destination):
2482
+ """Normalises path name of ``destination`` and creates **destination**'s directory."""
2483
+ return self.mkdir(self.path.dirname(self.path.expanduser(destination)))
2484
+
2485
+ _SAFE_SUFFIX_RE = re.compile("_[a-f0-9]{32}$")
2486
+
2487
+ def safe_filesuffix(self):
2488
+ """Returns a file suffix that should be unique across the system."""
2489
+ return "_" + uuid.uuid1().hex
2490
+
2491
+ def safe_fileaddsuffix(self, name):
2492
+ """Returns a file path that will look like name + a unique suffix."""
2493
+ d_name = self.path.dirname(name)
2494
+ b_name = self.path.basename(name)
2495
+ b_name = self._SAFE_SUFFIX_RE.sub("", b_name)
2496
+ return self.path.join(d_name, b_name + self.safe_filesuffix())
2497
+
2498
+ def _validate_symlink_below(self, symlink, valid_below):
2499
+ """
2500
+ Check that **symlink** is relative and that its target is below
2501
+ the **valid_below** directory.
2502
+
2503
+ :note: **valid_below** needs to be an absolute canonical path
2504
+ (this is user responsability)
2505
+ """
2506
+ link_to = self._os.readlink(symlink)
2507
+ # Is it relative ?
2508
+ if re.match(
2509
+ "^([^{0:s}]|..{0:s}|.{0:s})".format(re.escape(os.path.sep)),
2510
+ link_to,
2511
+ ):
2512
+ symlink_dir = self.path.realpath(
2513
+ self.path.abspath(self.path.dirname(symlink))
2514
+ )
2515
+ abspath_to = self.path.normpath(
2516
+ self.path.join(symlink_dir, link_to)
2517
+ )
2518
+ # Valid ?
2519
+ valid = (
2520
+ self.path.commonprefix([valid_below, abspath_to])
2521
+ == valid_below
2522
+ )
2523
+ return (
2524
+ self.path.relpath(abspath_to, start=symlink_dir)
2525
+ if valid
2526
+ else None
2527
+ )
2528
+ else:
2529
+ return None
2530
+
2531
+ def _copydatatree(self, src, dst, keep_symlinks_below=None):
2532
+ """Recursively copy a directory tree using copyfile.
2533
+
2534
+ This is a variant of shutil's copytree. But, unlike with copytree,
2535
+ only data are copied (the permissions, access times, ... are ignored).
2536
+
2537
+ The destination directory must not already exist.
2538
+ """
2539
+ self.stderr("_copydatatree", src, dst)
2540
+ with self.mute_stderr():
2541
+ keep_symlinks_below = keep_symlinks_below or self.path.realpath(
2542
+ self.path.abspath(src)
2543
+ )
2544
+ names = self._os.listdir(src)
2545
+ self._os.makedirs(dst)
2546
+ errors = []
2547
+ for name in names:
2548
+ srcname = self._os.path.join(src, name)
2549
+ dstname = self._os.path.join(dst, name)
2550
+ try:
2551
+ if self.path.isdir(srcname):
2552
+ self._copydatatree(
2553
+ srcname,
2554
+ dstname,
2555
+ keep_symlinks_below=keep_symlinks_below,
2556
+ )
2557
+ elif self._os.path.islink(srcname):
2558
+ linkto = self._validate_symlink_below(
2559
+ srcname, keep_symlinks_below
2560
+ )
2561
+ if linkto is not None:
2562
+ self._os.symlink(linkto, dstname)
2563
+ else:
2564
+ self._sh.copyfile(srcname, dstname)
2565
+ else:
2566
+ # Will raise a SpecialFileError for unsupported file types
2567
+ self._sh.copyfile(srcname, dstname)
2568
+ # catch the Error from the recursive copytree so that we can
2569
+ # continue with other files
2570
+ except CopyTreeError as err:
2571
+ errors.extend(err.args[0])
2572
+ except OSError as why:
2573
+ errors.append((srcname, dstname, str(why)))
2574
+ if errors:
2575
+ raise CopyTreeError(errors)
2576
+ return dst
2577
+
2578
+ def rawcp(self, source, destination):
2579
+ """Perform a simple ``copyfile`` or ``copytree`` command depending on **source**.
2580
+
2581
+ When copying a file, the operation is atomic. When copying a directory
2582
+ it is not (although the non-atomic portion is very limited).
2583
+ """
2584
+ source = self.path.expanduser(source)
2585
+ destination = self.path.expanduser(destination)
2586
+ self.stderr("rawcp", source, destination)
2587
+ tmp = self.safe_fileaddsuffix(destination)
2588
+ if self.path.isdir(source):
2589
+ self._copydatatree(source, tmp)
2590
+ # Move fails if a directory already exists ; so be careful...
2591
+ with self.secure_directory_move(destination):
2592
+ self.move(tmp, destination)
2593
+ return self.path.isdir(destination)
2594
+ else:
2595
+ self.copyfile(source, tmp)
2596
+ # Preserve the execution permissions...
2597
+ if self.xperm(source):
2598
+ self.xperm(tmp, force=True)
2599
+ self.move(tmp, destination) # Move is atomic for a file
2600
+ if self._cmpaftercp:
2601
+ return filecmp.cmp(source, destination)
2602
+ else:
2603
+ return bool(self.size(source) == self.size(destination))
2604
+
2605
+ def hybridcp(self, source, destination, silent=False):
2606
+ """Copy the **source** file to a safe **destination**.
2607
+
2608
+ **source** and/or **destination** may be File-like objects.
2609
+
2610
+ If **destination** is a real-word file name (i.e. not e File-like object),
2611
+ the operation is atomic.
2612
+ """
2613
+ self.stderr("hybridcp", source, destination)
2614
+ if isinstance(source, str):
2615
+ if not self.path.exists(source):
2616
+ if not silent:
2617
+ logger.error("Missing source %s", source)
2618
+ return False
2619
+ source = open(self.path.expanduser(source), "rb")
2620
+ xsource = True
2621
+ else:
2622
+ xsource = False
2623
+ try:
2624
+ source.seek(0)
2625
+ except AttributeError:
2626
+ logger.warning(
2627
+ "Could not rewind io source before cp: " + str(source)
2628
+ )
2629
+ if isinstance(destination, str):
2630
+ if self.filecocoon(destination):
2631
+ # Write to a temp file
2632
+ original_dest = self.path.expanduser(destination)
2633
+ tmp_dest = self.safe_fileaddsuffix(
2634
+ self.path.expanduser(destination)
2635
+ )
2636
+ destination = open(tmp_dest, "wb")
2637
+ xdestination = True
2638
+ else:
2639
+ logger.error(
2640
+ "Could not create a cocoon for file %s", destination
2641
+ )
2642
+ return False
2643
+ else:
2644
+ destination.seek(0)
2645
+ xdestination = False
2646
+ rc = self.copyfileobj(source, destination)
2647
+ if rc is None:
2648
+ rc = True
2649
+ if xsource:
2650
+ source.close()
2651
+ if xdestination:
2652
+ destination.close()
2653
+ # Move the tmp_file to the real destination
2654
+ if not self.move(
2655
+ tmp_dest, original_dest
2656
+ ): # Move is atomic for a file
2657
+ logger.error(
2658
+ "Cannot move the tmp file to the final destination %s",
2659
+ original_dest,
2660
+ )
2661
+ return False
2662
+ return rc
2663
+
2664
+ def is_samefs(self, path1, path2):
2665
+ """Check whether two paths are located on the same filesystem."""
2666
+ st1 = self.stat(path1)
2667
+ st2 = self.stat(self.path.dirname(self.path.realpath(path2)))
2668
+ return st1.st_dev == st2.st_dev and not self.path.islink(path1)
2669
+
2670
+ def _rawcp_instead_of_hardlink(self, source, destination, securecopy=True):
2671
+ self.stderr("rawcp_instead_of_hardlink", source, destination)
2672
+ if securecopy:
2673
+ rc = self.rawcp(source, destination)
2674
+ else:
2675
+ # Do not bother with a temporary file, create a direct copy
2676
+ self.copyfile(source, destination)
2677
+ # Preserve the execution permissions...
2678
+ if self.xperm(source):
2679
+ self.xperm(destination, force=True)
2680
+ rc = bool(self.size(source) == self.size(destination))
2681
+ return rc
2682
+
2683
+ def _safe_hardlink(self, source, destination, securecopy=True):
2684
+ """Create a (unique) hardlink in a secure way.
2685
+
2686
+ i.e. if the "Too many links" OS error is raised, we try to replace
2687
+ the original file by a copy of itself. If that also fails because of
2688
+ the lack of file permissions, a "simple" rawcp is made.
2689
+
2690
+ :param bool securecopy: while creating the copy of the source file
2691
+ (because of a "Too many links" OS error), create
2692
+ a temporary filename and move it afterward to the
2693
+ *destination*: longer but safer.
2694
+ """
2695
+ try:
2696
+ self._os.link(source, destination)
2697
+ except OSError as e:
2698
+ if e.errno == errno.EMLINK:
2699
+ # Too many links
2700
+ logger.warning(
2701
+ "Too many links for the source file (%s).", source
2702
+ )
2703
+ if self.usr_file(source):
2704
+ rc = self._rawcp_instead_of_hardlink(
2705
+ source, destination, securecopy=securecopy
2706
+ )
2707
+ if rc:
2708
+ try:
2709
+ logger.warning(
2710
+ "Replacing the orignal file with a copy..."
2711
+ )
2712
+ self.move(destination, source)
2713
+ except OSError as ebis:
2714
+ if ebis.errno == errno.EACCES:
2715
+ # Permission denied
2716
+ logger.warning(
2717
+ "No permissions to create a copy of the source file (%s)",
2718
+ source,
2719
+ )
2720
+ logger.warning(
2721
+ "Going on with the copy instead of the link..."
2722
+ )
2723
+ else:
2724
+ raise
2725
+ else:
2726
+ # Ok, a copy was created for the source file
2727
+ self.link(source, destination)
2728
+ rc = self.path.samefile(source, destination)
2729
+ else:
2730
+ raise
2731
+ else:
2732
+ rc = self.path.samefile(source, destination)
2733
+ return rc
2734
+
2735
+ def hardlink(
2736
+ self,
2737
+ source,
2738
+ destination,
2739
+ link_threshold=0,
2740
+ readonly=True,
2741
+ securecopy=True,
2742
+ keep_symlinks_below=None,
2743
+ ):
2744
+ """Create hardlinks for both single files or directories.
2745
+
2746
+ :param int link_threshold: if the source file size is smaller than
2747
+ **link_threshold** a copy is made (instead
2748
+ of a hardlink)
2749
+ :param bool readonly: ensure that all of the created links are readonly
2750
+ :param bool securecopy: while creating the copy of the source file
2751
+ (because of a "Too many links" OS error or **link_threshold**),
2752
+ create a temporary filename and move it afterward to the
2753
+ *destination*: longer but safer.
2754
+ :param str keep_symlinks_below: Preserve relative symlinks that have
2755
+ a target below the **keep_symlinks_below**
2756
+ directory (if omitted, **source** is used)
2757
+ """
2758
+ if self.path.isdir(source):
2759
+ self.stderr(
2760
+ "hardlink",
2761
+ source,
2762
+ destination,
2763
+ "#",
2764
+ "directory,",
2765
+ "readonly={!s}".format(readonly),
2766
+ )
2767
+ keep_symlinks_below = keep_symlinks_below or self.path.realpath(
2768
+ self.path.abspath(source)
2769
+ )
2770
+ with self.mute_stderr():
2771
+ # Mimics 'cp -al'
2772
+ names = self._os.listdir(source)
2773
+ self._os.makedirs(destination)
2774
+ rc = True
2775
+ for name in names:
2776
+ srcname = self._os.path.join(source, name)
2777
+ dstname = self._os.path.join(destination, name)
2778
+ if self._os.path.islink(srcname):
2779
+ linkto = self._validate_symlink_below(
2780
+ srcname, keep_symlinks_below
2781
+ )
2782
+ if linkto is None:
2783
+ link_target = self.path.join(
2784
+ self.path.dirname(srcname),
2785
+ self._os.readlink(srcname),
2786
+ )
2787
+ rc = self.hardlink(
2788
+ link_target,
2789
+ dstname,
2790
+ link_threshold=link_threshold,
2791
+ readonly=readonly,
2792
+ securecopy=securecopy,
2793
+ keep_symlinks_below=keep_symlinks_below,
2794
+ )
2795
+ else:
2796
+ self._os.symlink(linkto, dstname)
2797
+ elif self.path.isdir(srcname):
2798
+ rc = self.hardlink(
2799
+ srcname,
2800
+ dstname,
2801
+ link_threshold=link_threshold,
2802
+ readonly=readonly,
2803
+ securecopy=securecopy,
2804
+ keep_symlinks_below=keep_symlinks_below,
2805
+ )
2806
+ else:
2807
+ if (
2808
+ link_threshold
2809
+ and self.size(srcname) < link_threshold
2810
+ ):
2811
+ rc = self._rawcp_instead_of_hardlink(
2812
+ srcname, dstname, securecopy=securecopy
2813
+ )
2814
+ else:
2815
+ rc = self._safe_hardlink(
2816
+ srcname, dstname, securecopy=securecopy
2817
+ )
2818
+ if readonly and rc:
2819
+ self.readonly(dstname)
2820
+ if not rc:
2821
+ logger.error(
2822
+ "Error while processing %s (rc=%s)",
2823
+ srcname,
2824
+ str(rc),
2825
+ )
2826
+ break
2827
+ if rc:
2828
+ self._sh.copystat(source, destination)
2829
+ self.wperm(destination, force=True)
2830
+ return rc
2831
+ else:
2832
+ if link_threshold and self.size(source) < link_threshold:
2833
+ rc = self._rawcp_instead_of_hardlink(
2834
+ source, destination, securecopy=securecopy
2835
+ )
2836
+ else:
2837
+ self.stderr("hardlink", source, destination)
2838
+ rc = self._safe_hardlink(
2839
+ source, destination, securecopy=securecopy
2840
+ )
2841
+ if readonly and rc:
2842
+ self.readonly(destination)
2843
+ return rc
2844
+
2845
+ def _smartcp_cross_users_links_fallback(
2846
+ self,
2847
+ source,
2848
+ destination,
2849
+ smartcp_threshold,
2850
+ silent,
2851
+ exc,
2852
+ tmp_destination=None,
2853
+ ):
2854
+ """Catch errors related to Kernel configuration."""
2855
+ if (exc.errno == errno.EPERM) and not self.usr_file(source):
2856
+ # This is expected to fail if the fs.protected_hardlinks
2857
+ # Linux kernel setting is 1.
2858
+ if tmp_destination is not None:
2859
+ self.remove(tmp_destination)
2860
+ logger.info("Force System's allow_cross_users_links to False")
2861
+ self.allow_cross_users_links = False
2862
+ logger.info("Re-running the smartcp command")
2863
+ return self.smartcp(
2864
+ source,
2865
+ destination,
2866
+ smartcp_threshold=smartcp_threshold,
2867
+ silent=silent,
2868
+ )
2869
+ else:
2870
+ raise
2871
+
2872
+ def smartcp(self, source, destination, smartcp_threshold=0, silent=False):
2873
+ """
2874
+ Hard link the **source** file to a safe **destination** (if possible).
2875
+ Otherwise, let the standard copy do the job.
2876
+
2877
+ **source** and/or **destination** may be File-like objects.
2878
+
2879
+ When working on a file, the operation is atomic. When working on a
2880
+ directory some restrictions apply (see :meth:`rawcp`)
2881
+ """
2882
+ self.stderr("smartcp", source, destination)
2883
+ if not isinstance(source, str) or not isinstance(destination, str):
2884
+ return self.hybridcp(source, destination)
2885
+ source = self.path.expanduser(source)
2886
+ if not self.path.exists(source):
2887
+ if not silent:
2888
+ logger.error("Missing source %s", source)
2889
+ return False
2890
+ if self.filecocoon(destination):
2891
+ destination = self.path.expanduser(destination)
2892
+ if self.path.islink(source):
2893
+ # Solve the symbolic link: this may avoid a rawcp
2894
+ source = self.path.realpath(source)
2895
+ if self.is_samefs(source, destination) and (
2896
+ self.allow_cross_users_links or self.usr_file(source)
2897
+ ):
2898
+ tmp_destination = self.safe_fileaddsuffix(destination)
2899
+ if self.path.isdir(source):
2900
+ try:
2901
+ rc = self.hardlink(
2902
+ source,
2903
+ tmp_destination,
2904
+ link_threshold=smartcp_threshold,
2905
+ securecopy=False,
2906
+ )
2907
+ except OSError as e:
2908
+ rc = self._smartcp_cross_users_links_fallback(
2909
+ source,
2910
+ destination,
2911
+ smartcp_threshold,
2912
+ silent,
2913
+ e,
2914
+ tmp_destination=tmp_destination,
2915
+ )
2916
+ else:
2917
+ if rc:
2918
+ # Move fails if a directory already exists ; so be careful...
2919
+ with self.secure_directory_move(destination):
2920
+ rc = self.move(tmp_destination, destination)
2921
+ if not rc:
2922
+ logger.error(
2923
+ "Cannot move the tmp directory to the final destination %s",
2924
+ destination,
2925
+ )
2926
+ self.remove(
2927
+ tmp_destination
2928
+ ) # Anyway, try to clean-up things
2929
+ else:
2930
+ logger.error(
2931
+ "Cannot copy the data to the tmp directory %s",
2932
+ tmp_destination,
2933
+ )
2934
+ self.remove(
2935
+ tmp_destination
2936
+ ) # Anyway, try to clean-up things
2937
+ return rc
2938
+ else:
2939
+ try:
2940
+ rc = self.hardlink(
2941
+ source,
2942
+ tmp_destination,
2943
+ link_threshold=smartcp_threshold,
2944
+ securecopy=False,
2945
+ )
2946
+ except OSError as e:
2947
+ rc = self._smartcp_cross_users_links_fallback(
2948
+ source, destination, smartcp_threshold, silent, e
2949
+ )
2950
+ else:
2951
+ rc = rc and self.move(
2952
+ tmp_destination, destination
2953
+ ) # Move is atomic for a file
2954
+ # On some systems, the temporary file may remain (if the
2955
+ # destination's inode is identical to the tmp_destination's
2956
+ # inode). The following call to remove will remove leftovers.
2957
+ self.remove(tmp_destination)
2958
+ return rc
2959
+ else:
2960
+ rc = self.rawcp(
2961
+ source, destination
2962
+ ) # Rawcp is atomic as much as possible
2963
+ if rc:
2964
+ if self.path.isdir(destination):
2965
+ for copiedfile in self.ffind(destination):
2966
+ if not self.path.islink(
2967
+ copiedfile
2968
+ ): # This make no sense to chmod symlinks
2969
+ self.chmod(copiedfile, 0o444)
2970
+ else:
2971
+ self.readonly(destination)
2972
+ return rc
2973
+ else:
2974
+ logger.error("Could not create a cocoon for file %s", destination)
2975
+ return False
2976
+
2977
+ @fmtshcmd
2978
+ def cp(
2979
+ self,
2980
+ source,
2981
+ destination,
2982
+ intent="inout",
2983
+ smartcp=True,
2984
+ smartcp_threshold=0,
2985
+ silent=False,
2986
+ ):
2987
+ """Copy the **source** file to a safe **destination**.
2988
+
2989
+ :param source: The source of data (either a path to file or a
2990
+ File-like object)
2991
+ :type source: str or File-like object
2992
+ :param destination: The destination of data (either a path to file or a
2993
+ File-like object)
2994
+ :type destination: str or File-like object
2995
+ :param str intent: 'in' for a read-only copy. 'inout' for a read-write copy
2996
+ (default: 'inout').
2997
+ :param bool smartcp: use :meth:`smartcp` as much as possible (default: *True*)
2998
+ :param int smartcp_threshold: Should smartcp be used, it will only be activated if
2999
+ the source file size is above *smartcp_threshold* Bytes.
3000
+ :param bool silent: do not complain on error (default: *False*).
3001
+
3002
+ It relies on :meth:`hybridcp`, :meth:`smartcp` or :meth:`rawcp`
3003
+ depending on **source**, **destination** and **intent**.
3004
+
3005
+ The fastest option should be used...
3006
+ """
3007
+ self.stderr("cp", source, destination)
3008
+ if not isinstance(source, str) or not isinstance(destination, str):
3009
+ return self.hybridcp(source, destination, silent=silent)
3010
+ if not self.path.exists(source):
3011
+ if not silent:
3012
+ logger.error("Missing source %s", source)
3013
+ return False
3014
+ if smartcp and intent == "in":
3015
+ return self.smartcp(
3016
+ source,
3017
+ destination,
3018
+ smartcp_threshold=smartcp_threshold,
3019
+ silent=silent,
3020
+ )
3021
+ if self.filecocoon(destination):
3022
+ return self.rawcp(source, destination)
3023
+ else:
3024
+ logger.error("Could not create a cocoon for file %s", destination)
3025
+ return False
3026
+
3027
+ def glob(self, *args):
3028
+ """Glob file system entries according to ``args``. Returns a list."""
3029
+ entries = []
3030
+ for entry in args:
3031
+ if entry.startswith(":"):
3032
+ entries.append(entry[1:])
3033
+ else:
3034
+ entries.extend(glob.glob(self.path.expanduser(entry)))
3035
+ return entries
3036
+
3037
+ def rmall(self, *args, **kw):
3038
+ """Unlink the specified **args** objects with globbing."""
3039
+ rc = True
3040
+ for pname in args:
3041
+ for objpath in self.glob(pname):
3042
+ rc = self.remove(objpath, **kw) and rc
3043
+
3044
+ def safepath(self, thispath, safedirs):
3045
+ """
3046
+ Boolean to check if **thispath** is a subpath of a **safedirs**
3047
+ with sufficient depth (or not a subpath at all)
3048
+ """
3049
+ safe = True
3050
+ if len(thispath.split(self._os.sep)) < self._rmtreemin + 1:
3051
+ logger.warning(
3052
+ "Unsafe starting point depth %s (min is %s)",
3053
+ thispath,
3054
+ self._rmtreemin,
3055
+ )
3056
+ safe = False
3057
+ else:
3058
+ for safepack in safedirs:
3059
+ (safedir, d) = safepack
3060
+ rp = self.path.relpath(thispath, safedir)
3061
+ if not rp.startswith(".."):
3062
+ if len(rp.split(self._os.sep)) < d:
3063
+ logger.warning(
3064
+ "Unsafe access to %s relative to %s",
3065
+ thispath,
3066
+ safedir,
3067
+ )
3068
+ safe = False
3069
+ return safe
3070
+
3071
+ def rmsafe(self, pathlist, safedirs):
3072
+ """
3073
+ Recursive unlinks of the specified **pathlist** objects (if safe according
3074
+ to :meth:`safepath`).
3075
+ """
3076
+ ok = True
3077
+ if isinstance(pathlist, str):
3078
+ pathlist = [pathlist]
3079
+ for pname in pathlist:
3080
+ for entry in filter(
3081
+ lambda x: self.safepath(x, safedirs), self.glob(pname)
3082
+ ):
3083
+ ok = self.remove(entry) and ok
3084
+ return ok
3085
+
3086
+ def _globcmd(self, cmd, args, **kw):
3087
+ """Globbing files or directories as arguments before running ``cmd``."""
3088
+ cmd.extend([opt for opt in args if opt.startswith("-")])
3089
+ cmdlen = len(cmd)
3090
+ cmdargs = False
3091
+ globtries = [
3092
+ self.path.expanduser(x) for x in args if not x.startswith("-")
3093
+ ]
3094
+ for pname in globtries:
3095
+ cmdargs = True
3096
+ cmd.extend(self.glob(pname))
3097
+ if cmdargs and len(cmd) == cmdlen:
3098
+ logger.warning("Could not find any matching pattern %s", globtries)
3099
+ return False
3100
+ else:
3101
+ kw.setdefault("ok", [0])
3102
+ return self.spawn(cmd, **kw)
3103
+
3104
+ @_kw2spawn
3105
+ def wc(self, *args, **kw):
3106
+ """Word count on globbed files."""
3107
+ return self._globcmd(["wc"], args, **kw)
3108
+
3109
+ @_kw2spawn
3110
+ def ls(self, *args, **kw):
3111
+ """Clone of the eponymous unix command."""
3112
+ return self._globcmd(["ls"], args, **kw)
3113
+
3114
+ @_kw2spawn
3115
+ def ll(self, *args, **kw):
3116
+ """Clone of the eponymous unix alias (ls -l)."""
3117
+ kw["output"] = True
3118
+ llresult = self._globcmd(["ls", "-l"], args, **kw)
3119
+ if llresult:
3120
+ for lline in [x for x in llresult if not x.startswith("total")]:
3121
+ print(lline)
3122
+ else:
3123
+ return False
3124
+
3125
+ @_kw2spawn
3126
+ def dir(self, *args, **kw):
3127
+ """Proxy to ``ls('-l')``."""
3128
+ return self._globcmd(["ls", "-l"], args, **kw)
3129
+
3130
+ @_kw2spawn
3131
+ def cat(self, *args, **kw):
3132
+ """Clone of the eponymous unix command."""
3133
+ return self._globcmd(["cat"], args, **kw)
3134
+
3135
+ @fmtshcmd
3136
+ @_kw2spawn
3137
+ def diff(self, *args, **kw):
3138
+ """Clone of the eponymous unix command."""
3139
+ kw.setdefault("ok", [0, 1])
3140
+ kw.setdefault("output", False)
3141
+ return self._globcmd(["cmp"], args, **kw)
3142
+
3143
+ @_kw2spawn
3144
+ def rmglob(self, *args, **kw):
3145
+ """Wrapper of the shell's ``rm`` command through the :meth:`globcmd` method."""
3146
+ return self._globcmd(["rm"], args, **kw)
3147
+
3148
+ @fmtshcmd
3149
+ def move(self, source, destination):
3150
+ """Move the ``source`` file or directory (using shutil).
3151
+
3152
+ :param str source: The source object (file, directory, ...)
3153
+ :param str destination: The destination object (file, directory, ...)
3154
+ """
3155
+ self.stderr("move", source, destination)
3156
+ try:
3157
+ self._sh.move(source, destination)
3158
+ except Exception:
3159
+ logger.critical("Could not move <%s> to <%s>", source, destination)
3160
+ raise
3161
+ else:
3162
+ return True
3163
+
3164
+ @contextlib.contextmanager
3165
+ def secure_directory_move(self, destination):
3166
+ with self.lockdir_context(
3167
+ destination + ".vortex-lockdir", sloppy=True
3168
+ ):
3169
+ do_cleanup = isinstance(destination, str) and self.path.exists(
3170
+ destination
3171
+ )
3172
+ if do_cleanup:
3173
+ # Warning: Not an atomic portion of code (sorry)
3174
+ tmp_destination = self.safe_fileaddsuffix(destination)
3175
+ self.move(destination, tmp_destination)
3176
+ yield do_cleanup
3177
+ # End of none atomic part
3178
+ else:
3179
+ yield do_cleanup
3180
+ if do_cleanup:
3181
+ self.remove(tmp_destination)
3182
+
3183
+ @fmtshcmd
3184
+ def mv(self, source, destination):
3185
+ """Move the ``source`` file or directory (using shutil or hybridcp).
3186
+
3187
+ :param source: The source object (file, directory, File-like object, ...)
3188
+ :param destination: The destination object (file, directory, File-like object, ...)
3189
+ """
3190
+ self.stderr("mv", source, destination)
3191
+ if not isinstance(source, str) or not isinstance(destination, str):
3192
+ self.hybridcp(source, destination)
3193
+ if isinstance(source, str):
3194
+ return self.remove(source)
3195
+ else:
3196
+ return self.move(source, destination)
3197
+
3198
+ @_kw2spawn
3199
+ def mvglob(self, *args):
3200
+ """Wrapper of the shell's ``mv`` command through the :meth:`globcmd` method."""
3201
+ return self._globcmd(["mv"], args)
3202
+
3203
+ def listdir(self, *args):
3204
+ """Proxy to standard :mod:`os` directory listing function."""
3205
+ if not args:
3206
+ args = (".",)
3207
+ self.stderr("listdir", *args)
3208
+ return self._os.listdir(self.path.expanduser(args[0]))
3209
+
3210
+ def pyls(self, *args):
3211
+ """
3212
+ Proxy to globbing after removing any option. A bit like the
3213
+ :meth:`ls` method except that that shell's ``ls`` command is not actually
3214
+ called.
3215
+ """
3216
+ rl = [x for x in args if not x.startswith("-")]
3217
+ if not rl:
3218
+ rl.append("*")
3219
+ self.stderr("pyls", *rl)
3220
+ return self.glob(*rl)
3221
+
3222
+ def ldirs(self, *args):
3223
+ """
3224
+ Proxy to directories globbing after removing any option. A bit like the
3225
+ :meth:`ls` method except that that shell's ``ls`` command is not actually
3226
+ called.
3227
+ """
3228
+ rl = [x for x in args if not x.startswith("-")]
3229
+ if not rl:
3230
+ rl.append("*")
3231
+ self.stderr("ldirs", *rl)
3232
+ return [x for x in self.glob(*rl) if self.path.isdir(x)]
3233
+
3234
+ @_kw2spawn
3235
+ def gzip(self, *args, **kw):
3236
+ """Simple gzip compression of a file."""
3237
+ cmd = ["gzip", "-vf", args[0]]
3238
+ cmd.extend(args[1:])
3239
+ return self.spawn(cmd, **kw)
3240
+
3241
+ @_kw2spawn
3242
+ def gunzip(self, *args, **kw):
3243
+ """Simple gunzip of a gzip-compressed file."""
3244
+ cmd = ["gunzip", args[0]]
3245
+ cmd.extend(args[1:])
3246
+ return self.spawn(cmd, **kw)
3247
+
3248
+ def is_tarfile(self, filename):
3249
+ """Return a boolean according to the tar status of the **filename**."""
3250
+ return tarfile.is_tarfile(self.path.expanduser(filename))
3251
+
3252
+ def taropts(self, tarfile, opts, verbose=True, autocompress=True):
3253
+ """Build a proper string sequence of tar options."""
3254
+ zopt = set(opts)
3255
+ if verbose:
3256
+ zopt.add("v")
3257
+ else:
3258
+ zopt.discard("v")
3259
+ if autocompress:
3260
+ if tarfile.endswith("gz"):
3261
+ # includes the conventional "*.tgz"
3262
+ zopt.add("z")
3263
+ else:
3264
+ zopt.discard("z")
3265
+ if tarfile.endswith("bz") or tarfile.endswith("bz2"):
3266
+ # includes the conventional "*.tbz"
3267
+ zopt.add("j")
3268
+ else:
3269
+ zopt.discard("j")
3270
+ return "".join(zopt)
3271
+
3272
+ @_kw2spawn
3273
+ def tar(self, *args, **kw):
3274
+ """Create a file archive (always c-something).
3275
+
3276
+ :example: ``self.tar('destination.tar', 'directory1', 'directory2')``
3277
+ """
3278
+ opts = self.taropts(
3279
+ args[0],
3280
+ "cf",
3281
+ kw.pop("verbose", True),
3282
+ kw.pop("autocompress", True),
3283
+ )
3284
+ cmd = ["tar", opts, args[0]]
3285
+ cmd.extend(self.glob(*args[1:]))
3286
+ return self.spawn(cmd, **kw)
3287
+
3288
+ @_kw2spawn
3289
+ def untar(self, *args, **kw):
3290
+ """Unpack a file archive (always x-something).
3291
+
3292
+ :example: ``self.untar('source.tar')``
3293
+ :example: ``self.untar('source.tar', 'to_untar1', 'to_untar2')``
3294
+ """
3295
+ opts = self.taropts(
3296
+ args[0],
3297
+ "xf",
3298
+ kw.pop("verbose", True),
3299
+ kw.pop("autocompress", True),
3300
+ )
3301
+ cmd = ["tar", opts, args[0]]
3302
+ cmd.extend(args[1:])
3303
+ return self.spawn(cmd, **kw)
3304
+
3305
+ def smartuntar(self, source, destination, **kw):
3306
+ """Unpack a file archive in the appropriate directory.
3307
+
3308
+ If **uniquelevel_ignore** is *True* (default: *False*) and the tar file
3309
+ contains only one directory, it will be extracted and renamed to
3310
+ **destination**. Otherwise, **destination** will be created and the tar's
3311
+ content will be extracted inside it.
3312
+
3313
+ This is done in a relatively safe way since it is checked that no existing
3314
+ files/directories are overwritten.
3315
+ """
3316
+ uniquelevel_ignore = kw.pop("uniquelevel_ignore", False)
3317
+ fullsource = self.path.realpath(source)
3318
+ self.mkdir(destination)
3319
+ loctmp = tempfile.mkdtemp(prefix="untar_", dir=destination)
3320
+ with self.cdcontext(loctmp, clean_onexit=True):
3321
+ output_setting = kw.pop("output", True)
3322
+ output_txt = self.untar(fullsource, output=output_setting, **kw)
3323
+ if output_setting and output_txt:
3324
+ logger.info("Untar command output:\n%s", "\n".join(output_txt))
3325
+ unpacked = self.glob("*")
3326
+ unpacked_prefix = "."
3327
+ # If requested, ignore the first level of directory
3328
+ if (
3329
+ uniquelevel_ignore
3330
+ and len(unpacked) == 1
3331
+ and self.path.isdir(self.path.join(unpacked[0]))
3332
+ ):
3333
+ unpacked_prefix = unpacked[0]
3334
+ logger.info(
3335
+ "Moving contents one level up: %s", unpacked_prefix
3336
+ )
3337
+ with self.cdcontext(unpacked_prefix):
3338
+ unpacked = self.glob("*")
3339
+ for untaritem in unpacked:
3340
+ itemtarget = self.path.join(
3341
+ "..", self.path.basename(untaritem)
3342
+ )
3343
+ if self.path.exists(itemtarget):
3344
+ logger.error(
3345
+ "Some previous item exists before untar [%s]",
3346
+ untaritem,
3347
+ )
3348
+ else:
3349
+ self.mv(
3350
+ self.path.join(unpacked_prefix, untaritem), itemtarget
3351
+ )
3352
+ return unpacked
3353
+
3354
+ def is_tarname(self, objname):
3355
+ """Check if a ``objname`` is a string with ``.tar`` suffix."""
3356
+ return isinstance(objname, str) and (
3357
+ objname.endswith(".tar")
3358
+ or objname.endswith(".tar.gz")
3359
+ or objname.endswith(".tgz")
3360
+ or objname.endswith(".tar.bz2")
3361
+ or objname.endswith(".tbz")
3362
+ )
3363
+
3364
+ def tarname_radix(self, objname):
3365
+ """Remove any ``.tar`` specific suffix."""
3366
+ if not self.is_tarname(objname):
3367
+ return objname
3368
+ radix = self.path.splitext(objname)[0]
3369
+ if radix.endswith(".tar"):
3370
+ radix = radix[:-4]
3371
+ return radix
3372
+
3373
+ def tarname_splitext(self, objname):
3374
+ """Like os.path.splitext, but for tar names (e.g. might return ``.tar.gz``)."""
3375
+ if not self.is_tarname(objname):
3376
+ return (objname, "")
3377
+ radix = self.tarname_radix(objname)
3378
+ ext = objname.replace(radix, "")
3379
+ return (radix, ext)
3380
+
3381
+ @fmtshcmd
3382
+ def forcepack(self, source, destination=None): # @UnusedVariable
3383
+ """Return the path to a "packed" data (i.e. a ready to send single file)."""
3384
+ return source
3385
+
3386
+ @fmtshcmd
3387
+ def forceunpack(self, source): # @UnusedVariable
3388
+ """Unpack the data "inplace" (if needed, depending on the format)."""
3389
+ return True
3390
+
3391
+ def blind_dump(self, gateway, obj, destination, bytesdump=False, **opts):
3392
+ """
3393
+ Use **gateway** for a blind dump of the **obj** in file **destination**,
3394
+ (either a file descriptor or a filename).
3395
+ """
3396
+ rc = None
3397
+ if hasattr(destination, "write"):
3398
+ rc = gateway.dump(obj, destination, **opts)
3399
+ else:
3400
+ if self.filecocoon(destination):
3401
+ with open(
3402
+ self.path.expanduser(destination),
3403
+ "w" + ("b" if bytesdump else ""),
3404
+ ) as fd:
3405
+ rc = gateway.dump(obj, fd, **opts)
3406
+ return rc
3407
+
3408
+ def pickle_dump(self, obj, destination, **opts):
3409
+ """
3410
+ Dump a pickled representation of specified **obj** in file **destination**,
3411
+ (either a file descriptor or a filename).
3412
+ """
3413
+ return self.blind_dump(
3414
+ pickle, obj, destination, bytesdump=True, **opts
3415
+ )
3416
+
3417
+ def json_dump(self, obj, destination, **opts):
3418
+ """
3419
+ Dump a json representation of specified **obj** in file **destination**,
3420
+ (either a file descriptor or a filename).
3421
+ """
3422
+ return self.blind_dump(json, obj, destination, **opts)
3423
+
3424
+ def blind_load(self, source, gateway, bytesload=False):
3425
+ """
3426
+ Use **gateway** for a blind load the representation stored in file **source**,
3427
+ (either a file descriptor or a filename).
3428
+ """
3429
+ if hasattr(source, "read"):
3430
+ obj = gateway.load(source)
3431
+ else:
3432
+ with open(
3433
+ self.path.expanduser(source), "r" + ("b" if bytesload else "")
3434
+ ) as fd:
3435
+ obj = gateway.load(fd)
3436
+ return obj
3437
+
3438
+ def pickle_load(self, source):
3439
+ """
3440
+ Load from a pickled representation stored in file **source**,
3441
+ (either a file descriptor or a filename).
3442
+ """
3443
+ return self.blind_load(source, gateway=pickle, bytesload=True)
3444
+
3445
+ def json_load(self, source):
3446
+ """
3447
+ Load from a json representation stored in file **source**,
3448
+ (either a file descriptor or a filename).
3449
+ """
3450
+ return self.blind_load(source, gateway=json)
3451
+
3452
+ def pickle_clone(self, obj):
3453
+ """Clone an object (**obj**) through pickling / unpickling."""
3454
+ return pickle.loads(pickle.dumps(obj))
3455
+
3456
+ def utlines(self, *args):
3457
+ """Return number of significant code or configuration lines in specified directories."""
3458
+ lookfiles = [
3459
+ x
3460
+ for x in self.ffind(*args)
3461
+ if self.path.splitext[1] in [".py", ".ini", ".tpl", ".rst"]
3462
+ ]
3463
+ return len(
3464
+ [
3465
+ x
3466
+ for x in self.cat(*lookfiles)
3467
+ if re.search(r"\S", x) and re.search(r"[^\'\"\)\],\s]", x)
3468
+ ]
3469
+ )
3470
+
3471
+ def _signal_intercept_init(self):
3472
+ """Initialise the signal handler object (but do not activate it)."""
3473
+ self._sighandler = SignalInterruptHandler(emitlogs=False)
3474
+
3475
+ def signal_intercept_on(self):
3476
+ """Activate the signal's catching.
3477
+
3478
+ See :class:`bronx.system.interrupt.SignalInterruptHandler` documentation.
3479
+ """
3480
+ self._sighandler.activate()
3481
+
3482
+ def signal_intercept_off(self):
3483
+ """Deactivate the signal's catching.
3484
+
3485
+ See :class:`bronx.system.interrupt.SignalInterruptHandler` documentation.
3486
+ """
3487
+ self._sighandler.deactivate()
3488
+
3489
+ _LDD_REGEX = re.compile(
3490
+ r"^\s*([^\s]+)\s+=>\s*(?:([^\s]+)\s+\(0x.+\)|not found)$"
3491
+ )
3492
+
3493
+ def ldd(self, filename):
3494
+ """Call ldd on **filename**.
3495
+
3496
+ Return the mapping between the library name and its physical path.
3497
+ """
3498
+ if self.path.isfile(filename):
3499
+ ldd_out = self.spawn(("ldd", filename))
3500
+ libs = dict()
3501
+ for ldd_match in [self._LDD_REGEX.match(l) for l in ldd_out]:
3502
+ if ldd_match is not None:
3503
+ libs[ldd_match.group(1)] = ldd_match.group(2) or None
3504
+ return libs
3505
+ else:
3506
+ raise ValueError("{} is not a regular file".format(filename))
3507
+
3508
+ def generic_compress(self, pipelinedesc, source, destination=None):
3509
+ """Compress a file using the :class:`CompressionPipeline` class.
3510
+
3511
+ See the :class:`CompressionPipeline` class documentation for more details.
3512
+
3513
+ :example: "generic_compress('bzip2', 'toto')" will create a toto.bz2 file.
3514
+ """
3515
+ cp = CompressionPipeline(self, pipelinedesc)
3516
+ if destination is None:
3517
+ if isinstance(source, str):
3518
+ destination = source + cp.suffix
3519
+ else:
3520
+ raise ValueError(
3521
+ "If destination is omitted, source must be a filename."
3522
+ )
3523
+ return cp.compress2file(source, destination)
3524
+
3525
+ def generic_uncompress(self, pipelinedesc, source, destination=None):
3526
+ """Uncompress a file using the :class:`CompressionPipeline` class.
3527
+
3528
+ See the :class:`CompressionPipeline` class documentation for more details.
3529
+
3530
+ :example: "generic_uncompress('bzip2', 'toto.bz2')" will create a toto file.
3531
+ """
3532
+ cp = CompressionPipeline(self, pipelinedesc)
3533
+ if destination is None:
3534
+ if isinstance(source, str):
3535
+ if source.endswith(cp.suffix):
3536
+ destination = source[: -len(cp.suffix)]
3537
+ else:
3538
+ raise ValueError(
3539
+ "Source do not exhibit the appropriate suffix ({:s})".format(
3540
+ cp.suffix
3541
+ )
3542
+ )
3543
+ else:
3544
+ raise ValueError(
3545
+ "If destination is omitted, source must be a filename."
3546
+ )
3547
+ return cp.file2uncompress(source, destination)
3548
+
3549
+ def find_mount_point(self, path):
3550
+ """Return the mount point of *path*.
3551
+
3552
+ :param str path: path where to look for a mount point
3553
+ :return: the path to the mount point
3554
+ :rtype: str
3555
+ """
3556
+ if not self._os.path.exists(path):
3557
+ logger.warning("Path does not exist: <%s>", path)
3558
+
3559
+ path = self._os.path.abspath(path)
3560
+ while not self._os.path.ismount(path):
3561
+ path = self._os.path.dirname(path)
3562
+
3563
+ return path
3564
+
3565
+ def _lockdir_create(self, ldir, blocking=False, timeout=300, sleeptime=2):
3566
+ """Pseudo-lock mechanism based on atomic directory creation: acquire lock.
3567
+
3568
+ :param str ldir: The target directory that acts as a lock
3569
+ :param bool blocking: Block (at most **timeout** seconds) until the
3570
+ lock can be acquired
3571
+ :param float timeout: Block at most timeout seconds (if **blocking** is True)
3572
+ :param float sleeptime: When blocking, wait **sleeptime** seconds between to
3573
+ attempts to acquire the lock.
3574
+ """
3575
+ rc = None
3576
+ t0 = time.time()
3577
+ while rc is None or (
3578
+ not rc and blocking and time.time() - t0 < timeout
3579
+ ):
3580
+ if rc is not None:
3581
+ self.sleep(sleeptime)
3582
+ try:
3583
+ # Important note: os' original mkdir function is used on purpose
3584
+ # since we need to get an error if the target directory already
3585
+ # exists
3586
+ self._os.mkdir(ldir)
3587
+ except FileExistsError:
3588
+ rc = False
3589
+ else:
3590
+ rc = True
3591
+ return rc
3592
+
3593
+ def _lockdir_destroy(self, ldir):
3594
+ """Pseudo-lock mechanism based on atomic directory creation: release lock.
3595
+
3596
+ :param str ldir: The target directory that acts as a lock
3597
+ """
3598
+ try:
3599
+ self.rmdir(ldir)
3600
+ except FileNotFoundError:
3601
+ logger.warning("'%s' did not exists... that's odd", ldir)
3602
+
3603
+ @contextlib.contextmanager
3604
+ def lockdir_context(
3605
+ self,
3606
+ ldir,
3607
+ sloppy=False,
3608
+ timeout=120,
3609
+ sleeptime_min=0.1,
3610
+ sleeptime_max=0.3,
3611
+ ):
3612
+ """Try to acquire a lock directory and after that remove it.
3613
+
3614
+ :param bool sloppy: If the lock can be acquired after *timeout* second, go on anyway
3615
+ :param float timeout: Block at most timeout seconds
3616
+ :param float sleeptime_min: When blocking, wait at least **sleeptime_min** seconds
3617
+ between to attempts to acquire the lock.
3618
+ :param float sleeptime_max: When blocking, wait at most **sleeptime_max** seconds
3619
+ between to attempts to acquire the lock.
3620
+ """
3621
+ sleeptime = (
3622
+ sleeptime_min + (sleeptime_max - sleeptime_min) * random.random()
3623
+ )
3624
+ self.filecocoon(ldir)
3625
+ rc = self._lockdir_create(
3626
+ ldir, blocking=True, timeout=timeout, sleeptime=sleeptime
3627
+ )
3628
+ try:
3629
+ if not rc:
3630
+ msg = "Could not acquire lockdir < {:s} >. Already exists.".format(
3631
+ ldir
3632
+ )
3633
+ if sloppy:
3634
+ logger.warning(msg + ".. but going on.")
3635
+ else:
3636
+ raise OSError(msg)
3637
+ yield
3638
+ finally:
3639
+ if rc or sloppy:
3640
+ self._lockdir_destroy(ldir)
3641
+
3642
+ @property
3643
+ def _appwide_lockbase(self):
3644
+ """Compute the path to the application wide locks base directory."""
3645
+ if self.glove is not None:
3646
+ myglove = self.glove
3647
+ rcdir = myglove.configrc
3648
+ lockdir = self.path.join(
3649
+ rcdir,
3650
+ "appwide_locks",
3651
+ "{0.vapp:s}-{0.vconf:s}".format(myglove),
3652
+ )
3653
+ self.mkdir(lockdir)
3654
+ return lockdir
3655
+ else:
3656
+ raise RuntimeError("A glove must be defined")
3657
+
3658
+ def _appwide_lockdir_path(self, label):
3659
+ """Compute the path to the lock directory."""
3660
+ return self.path.join(self._appwide_lockbase, label)
3661
+
3662
+ def appwide_lock(self, label, blocking=False, timeout=300, sleeptime=2):
3663
+ """Pseudo-lock mechanism based on atomic directory creation: acquire lock.
3664
+
3665
+ The lock is located in a directory that depends on the vapp and vconf
3666
+ attributes of the current glove. The user must provide a **label** that
3667
+ helps to identify the lock purpose (it may include the xpid, ...).
3668
+
3669
+ :param str label: The name of the desired lock
3670
+ :param bool blocking: Block (at most **timeout** seconds) until the
3671
+ lock can be acquired
3672
+ :param float timeout: Block at most timeout seconds (if **blocking** is True)
3673
+ :param float sleeptime: When blocking, wait **sleeptime** seconds between to
3674
+ attempts to acquire the lock.
3675
+ """
3676
+ ldir = self._appwide_lockdir_path(label)
3677
+ return self._lockdir_create(
3678
+ ldir, blocking=blocking, timeout=timeout, sleeptime=sleeptime
3679
+ )
3680
+
3681
+ def appwide_unlock(self, label):
3682
+ """Pseudo-lock mechanism based on atomic directory creation: release lock.
3683
+
3684
+ :param str label: The name of the desired lock
3685
+ """
3686
+ ldir = self._appwide_lockdir_path(label)
3687
+ self._lockdir_destroy(ldir)
3688
+
3689
+
3690
+ class Python34:
3691
+ """Python features starting from version 3.4."""
3692
+
3693
+ def netcdf_diff(self, netcdf1, netcdf2, **kw):
3694
+ """Difference between two NetCDF files.
3695
+
3696
+ Use the netCDF4 package to do so...
3697
+
3698
+ :param netcdf1: first file to compare
3699
+ :param netcdf2: second file to compare
3700
+ """
3701
+
3702
+ # Optional, netcdf comparison tool
3703
+ b_netcdf_checker = ExternalCodeImportChecker("netdcf")
3704
+ from bronx.datagrip import netcdf as b_netcdf
3705
+
3706
+ if b_netcdf_checker.is_available():
3707
+ # Unfortunately, the netCDF4 package seems to leak memory,
3708
+ # using multiprocessing to mitigate this mess :-(
3709
+
3710
+ def _compare_function(nc1, nc2, outcome):
3711
+ """Function started by the subprocess."""
3712
+ outcome.value = int(b_netcdf.netcdf_file_diff(nc1, nc2))
3713
+
3714
+ rc = multiprocessing.Value("i", 0)
3715
+ p = multiprocessing.Process(
3716
+ target=_compare_function, args=(netcdf1, netcdf2, rc)
3717
+ )
3718
+ p.start()
3719
+ p.join()
3720
+ return bool(rc.value)
3721
+ else:
3722
+ logger.error(
3723
+ "Unable to load the 'bronx.datagrip.netcdf' package. "
3724
+ + "The netcdf library and/or 'netCDF4' python package are probably missing."
3725
+ )
3726
+ return False
3727
+
3728
+ # Let's make this method compatible with fmtshcmd...
3729
+ netcdf_diff.func_extern = True
3730
+
3731
+
3732
+ _python34_fp = footprints.Footprint(
3733
+ info="An abstract footprint to be used with the Python34 Mixin",
3734
+ only=dict(after_python=PythonSimplifiedVersion("3.4.0")),
3735
+ )
3736
+
3737
+
3738
+ class Garbage(OSExtended):
3739
+ """
3740
+ Default system class for weird systems.
3741
+
3742
+ Hopefully an extended system will be loaded later on...
3743
+ """
3744
+
3745
+ _abstract = True
3746
+ _footprint = dict(
3747
+ info="Garbage base system",
3748
+ attr=dict(
3749
+ sysname=dict(
3750
+ outcast=["Linux", "Darwin", "UnitTestLinux", "UnitTestable"]
3751
+ )
3752
+ ),
3753
+ priority=dict(level=footprints.priorities.top.DEFAULT),
3754
+ )
3755
+
3756
+ def __init__(self, *args, **kw):
3757
+ """Gateway to parent method after debug logging."""
3758
+ logger.debug("Garbage system init %s", self.__class__)
3759
+ super().__init__(*args, **kw)
3760
+
3761
+
3762
+ class Garbage34p(Garbage, Python34):
3763
+ """Default system class for weird systems with python version >= 3.4"""
3764
+
3765
+ _footprint = [
3766
+ _python34_fp,
3767
+ dict(info="Garbage base system withh a blazing Python version"),
3768
+ ]
3769
+
3770
+
3771
+ class Linux(OSExtended):
3772
+ """Abstract default system class for most Linux based systems."""
3773
+
3774
+ _abstract = True
3775
+ _footprint = dict(
3776
+ info="Abstract Linux base system",
3777
+ attr=dict(sysname=dict(values=["Linux"])),
3778
+ )
3779
+
3780
+ def __init__(self, *args, **kw):
3781
+ """
3782
+ Before going through parent initialisation (see :class:`OSExtended`),
3783
+ pickle this attributes:
3784
+
3785
+ * **psopts** - as default option for the ps command (default: ``-w -f -a``).
3786
+ """
3787
+ logger.debug("Linux system init %s", self.__class__)
3788
+ self._psopts = kw.pop("psopts", ["-w", "-f", "-a"])
3789
+ super().__init__(*args, **kw)
3790
+ self.__dict__["_cpusinfo"] = LinuxCpusInfo()
3791
+ try:
3792
+ self.__dict__["_numainfo"] = LibNumaNodesInfo()
3793
+ except (OSError, NotImplementedError):
3794
+ # On very few Linux systems, libnuma is not available...
3795
+ pass
3796
+ self.__dict__["_memoryinfo"] = LinuxMemInfo()
3797
+ self.__dict__["_netstatsinfo"] = LinuxNetstats()
3798
+
3799
+ @property
3800
+ def realkind(self):
3801
+ return "linux"
3802
+
3803
+ def cpus_ids_per_blocks(self, blocksize=1, topology="raw", hexmask=False):
3804
+ """Get the list of CPUs IDs for nicely ordered for subsequent binding.
3805
+
3806
+ :param int blocksize: the number of thread consumed by one task
3807
+ :param str topology: The task distribution scheme
3808
+ :param bool hexmask: Return a list of CPU masks in hexadecimal
3809
+ """
3810
+ if topology.startswith("numa"):
3811
+ if topology.endswith("_discardsmt"):
3812
+ topology = topology[:-11]
3813
+ smtlayout = None
3814
+ else:
3815
+ smtlayout = self.cpus_info.physical_cores_smtthreads
3816
+ try:
3817
+ cpulist = getattr(self.numa_info, topology + "_cpulist")(
3818
+ blocksize, smtlayout=smtlayout
3819
+ )
3820
+ except AttributeError:
3821
+ raise ValueError("Unknown topology ({:s}).".format(topology))
3822
+ else:
3823
+ try:
3824
+ cpulist = getattr(self.cpus_info, topology + "_cpulist")(
3825
+ blocksize
3826
+ )
3827
+ except AttributeError:
3828
+ raise ValueError("Unknown topology ({:s}).".format(topology))
3829
+ cpulist = list(cpulist)
3830
+ cpulist = [
3831
+ [cpulist[(taskid * blocksize + i)] for i in range(blocksize)]
3832
+ for taskid in range(len(cpulist) // blocksize)
3833
+ ]
3834
+ if hexmask:
3835
+ cpulist = [hex(sum([1 << i for i in item])) for item in cpulist]
3836
+ return cpulist
3837
+
3838
+ def cpus_ids_dispenser(self, topology="raw"):
3839
+ """Get a dispenser of CPUs IDs for nicely ordered for subsequent binding.
3840
+
3841
+ :param str topology: The task distribution scheme
3842
+ """
3843
+ if topology.startswith("numa"):
3844
+ if topology.endswith("_discardsmt"):
3845
+ topology = topology[:-11]
3846
+ smtlayout = None
3847
+ else:
3848
+ smtlayout = self.cpus_info.physical_cores_smtthreads
3849
+ try:
3850
+ cpudisp = getattr(self.numa_info, topology + "_cpu_dispenser")(
3851
+ smtlayout=smtlayout
3852
+ )
3853
+ except AttributeError:
3854
+ raise ValueError("Unknown topology ({:s}).".format(topology))
3855
+ else:
3856
+ try:
3857
+ cpudisp = getattr(
3858
+ self.cpus_info, topology + "_cpu_dispenser"
3859
+ )()
3860
+ except AttributeError:
3861
+ raise ValueError("Unknown topology ({:s}).".format(topology))
3862
+ return cpudisp
3863
+
3864
+ def cpus_affinity_get(
3865
+ self, taskid, blocksize=1, topology="socketpacked", method="taskset"
3866
+ ):
3867
+ """Get the necessary command/environment to set the CPUs affinity.
3868
+
3869
+ :param int taskid: the task number
3870
+ :param int blocksize: the number of thread consumed by one task
3871
+ :param str method: The binding method
3872
+ :param str topology: The task distribution scheme
3873
+ :return: A 3-elements tuple. (bool: BindingPossible,
3874
+ list: Starting command prefix, dict: Environment update)
3875
+ """
3876
+ if method not in ("taskset", "gomp", "omp", "ompverbose"):
3877
+ raise ValueError("Unknown binding method ({:s}).".format(method))
3878
+ if method == "taskset":
3879
+ if not self.which("taskset"):
3880
+ logger.warning(
3881
+ "The taskset is program is missing. Going on without binding."
3882
+ )
3883
+ return (False, list(), dict())
3884
+ cpulist = self.cpus_ids_per_blocks(
3885
+ blocksize=blocksize, topology=topology
3886
+ )
3887
+ cpus = cpulist[taskid % len(cpulist)]
3888
+ cmdl = list()
3889
+ env = dict()
3890
+ if method == "taskset":
3891
+ cmdl += ["taskset", "--cpu-list", ",".join([str(c) for c in cpus])]
3892
+ elif method == "gomp":
3893
+ env["GOMP_CPU_AFFINITY"] = " ".join([str(c) for c in cpus])
3894
+ elif method.startswith("omp"):
3895
+ env["OMP_PLACES"] = ",".join(["{{{:d}}}".format(c) for c in cpus])
3896
+ if method.endswith("verbose"):
3897
+ env["OMP_DISPLAY_ENV"] = "TRUE"
3898
+ env["OMP_DISPLAY_AFFINITY"] = "TRUE"
3899
+ return (True, cmdl, env)
3900
+
3901
+
3902
+ class Linux34p(Linux, Python34):
3903
+ """Linux system with python version >= 3.4"""
3904
+
3905
+ _footprint = [
3906
+ _python34_fp,
3907
+ dict(info="Linux based system with a blazing Python version"),
3908
+ ]
3909
+
3910
+
3911
+ class LinuxDebug(Linux34p):
3912
+ """Special system class for crude debugging on Linux based systems."""
3913
+
3914
+ _footprint = dict(
3915
+ info="Linux debug system",
3916
+ attr=dict(
3917
+ version=dict(
3918
+ optional=False,
3919
+ values=["dbug", "debug"],
3920
+ remap=dict(dbug="debug"),
3921
+ )
3922
+ ),
3923
+ )
3924
+
3925
+ def __init__(self, *args, **kw):
3926
+ """Gateway to parent method after debug logging."""
3927
+ logger.debug("LinuxDebug system init %s", self.__class__)
3928
+ super().__init__(*args, **kw)
3929
+
3930
+ @property
3931
+ def realkind(self):
3932
+ return "linuxdebug"
3933
+
3934
+
3935
+ class Macosx(OSExtended):
3936
+ """Mac under MacOSX."""
3937
+
3938
+ _abstract = True
3939
+ _footprint = dict(
3940
+ info="Apple Mac computer under Macosx",
3941
+ attr=dict(
3942
+ sysname=dict(values=["Darwin"]),
3943
+ ),
3944
+ priority=dict(level=footprints.priorities.top.TOOLBOX),
3945
+ )
3946
+
3947
+ def __init__(self, *args, **kw):
3948
+ """
3949
+ Before going through parent initialisation (see :class:`OSExtended`),
3950
+ pickle this attributes:
3951
+
3952
+ * **psopts** - as default option for the ps command (default: ``-w -f -a``).
3953
+ """
3954
+ logger.debug("Darwin system init %s", self.__class__)
3955
+ self._psopts = kw.pop("psopts", ["-w", "-f", "-a"])
3956
+ super().__init__(*args, **kw)
3957
+
3958
+ @property
3959
+ def realkind(self):
3960
+ return "darwin"
3961
+
3962
+ @property
3963
+ def default_syslog(self):
3964
+ """Address to use in logging.handler.SysLogHandler()."""
3965
+ return "/var/run/syslog"
3966
+
3967
+
3968
+ class Macosx34p(Macosx, Python34):
3969
+ """Mac under MacOSX with python version >= 3.4"""
3970
+
3971
+ _footprint = [
3972
+ _python34_fp,
3973
+ dict(
3974
+ info="Apple Mac computer under Macosx with a blazing Python version"
3975
+ ),
3976
+ ]