atex 0.13__py3-none-any.whl → 0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
atex/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """
2
2
  Ad-hoc Test EXecutor
3
3
 
4
- Some documentation here.
4
+ Some intro documentation here.
5
5
  """
6
6
 
7
7
  import importlib as _importlib
@@ -1,3 +1,4 @@
1
+ import abc as _abc
1
2
  import importlib as _importlib
2
3
  import pkgutil as _pkgutil
3
4
 
@@ -7,29 +8,29 @@ class Aggregator:
7
8
  TODO: generic description, not JSON-specific
8
9
  """
9
10
 
11
+ @_abc.abstractmethod
10
12
  def ingest(self, platform, test_name, test_results, test_files):
11
13
  """
12
- Process 'test_results' (string/Path) for as results reported by a test
13
- ran by Executor, along with 'test_files' as files uploaded by that test,
14
- aggregating them under 'platform' (string) as 'test_name' (string).
14
+ Process `test_results` (string/Path) for as results reported by a test
15
+ ran by Executor, along with `test_files` as files uploaded by that test,
16
+ aggregating them under `platform` (string) as `test_name` (string).
15
17
 
16
- This is DESTRUCTIVE, the input results/files are consumed in the
18
+ This is **destructive**, the input results/files are consumed in the
17
19
  process.
18
20
  """
19
- raise NotImplementedError(f"'ingest' not implemented for {self.__class__.__name__}")
20
21
 
22
+ @_abc.abstractmethod
21
23
  def start(self):
22
24
  """
23
25
  Start the Aggregator instance, opening any files / allocating resources
24
26
  as necessary.
25
27
  """
26
- raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
27
28
 
29
+ @_abc.abstractmethod
28
30
  def stop(self):
29
31
  """
30
32
  Stop the Aggregator instance, freeing all allocated resources.
31
33
  """
32
- raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
33
34
 
34
35
  def __enter__(self):
35
36
  try:
atex/aggregator/json.py CHANGED
@@ -20,30 +20,31 @@ class JSONAggregator(Aggregator):
20
20
  Collects reported results in a line-JSON output file and uploaded files
21
21
  (logs) from multiple test runs under a shared directory.
22
22
 
23
- Note that the aggregated JSON file *does not* use the test-based JSON format
24
- described by executor/RESULTS.md - both use JSON, but are very different.
23
+ Note that the aggregated JSON file **does not** use the test-based JSON
24
+ format described by `executor/RESULTS.md` - both use JSON, but are very
25
+ different.
25
26
 
26
27
  This aggergated format uses a top-level array (on each line) with a fixed
27
28
  field order:
28
29
 
29
30
  platform, status, test name, subtest name, files, note
30
31
 
31
- All these are strings except 'files', which is another (nested) array
32
+ All these are strings except `files`, which is another (nested) array
32
33
  of strings.
33
34
 
34
- If 'testout' is present in an input test result, it is prepended to
35
- the list of 'files'.
36
- If a field is missing in the source result, it is translated to a null
35
+ If `testout` is present in an input test result, it is prepended to
36
+ the list of `files`.
37
+ If a field is missing in the source result, it is translated to a `null`
37
38
  value.
38
39
  """
39
40
 
40
41
  def __init__(self, target, files):
41
42
  """
42
- 'target' is a string/Path to a .json file for all ingested
43
- results to be aggregated (written) to.
43
+ - `target` is a string/Path to a `.json` file for all ingested
44
+ results to be aggregated (written) to.
44
45
 
45
- 'files' is a string/Path of the top-level parent for all
46
- per-platform / per-test files uploaded by tests.
46
+ - `files` is a string/Path of the top-level parent for all per-platform
47
+ / per-test files uploaded by tests.
47
48
  """
48
49
  self.lock = threading.RLock()
49
50
  self.target = Path(target)
@@ -67,7 +68,7 @@ class JSONAggregator(Aggregator):
67
68
  def _get_test_files_path(self, platform, test_name):
68
69
  """
69
70
  Return a directory path to where uploaded files should be stored
70
- for a particular 'platform' and 'test_name'.
71
+ for a particular `platform` and `test_name`.
71
72
  """
72
73
  platform_files = self.files / platform
73
74
  platform_files.mkdir(exist_ok=True)
@@ -81,8 +82,8 @@ class JSONAggregator(Aggregator):
81
82
  @staticmethod
82
83
  def _move_test_files(test_files, target_dir):
83
84
  """
84
- Move (or otherwise process) 'test_files' as directory of files uploaded
85
- by the test, into the pre-computed 'target_dir' location (inside
85
+ Move (or otherwise process) `test_files` as directory of files uploaded
86
+ by the test, into the pre-computed `target_dir` location (inside
86
87
  a hierarchy of all files from all tests).
87
88
  """
88
89
  _verbatim_move(test_files, target_dir)
@@ -210,25 +211,26 @@ class GzipJSONAggregator(CompressedJSONAggregator):
210
211
  compress_files=True, compress_files_suffix=".gz", compress_files_exclude=None,
211
212
  ):
212
213
  """
213
- 'target' is a string/Path to a .json.gz file for all ingested
214
- results to be aggregated (written) to.
214
+ - `target` is a string/Path to a `.json.gz` file for all ingested
215
+ results to be aggregated (written) to.
215
216
 
216
- 'files' is a string/Path of the top-level parent for all
217
- per-platform / per-test files uploaded by tests.
217
+ - `files` is a string/Path of the top-level parent for all per-platform
218
+ / per-test files uploaded by tests.
218
219
 
219
- 'compress_level' specifies how much effort should be spent compressing,
220
- (1 = fast, 9 = slow).
220
+ - `compress_level` specifies how much effort should be spent compressing,
221
+ (1 = fast, 9 = slow).
221
222
 
222
- If 'compress_files' is True, compress also any files uploaded by tests.
223
+ - If `compress_files` is `True`, compress also any files uploaded by
224
+ tests.
223
225
 
224
- The 'compress_files_suffix' is appended to any processed test-uploaded
225
- files, and the respective 'files' results array is modified with the
226
- new file names (as if the test uploaded compressed files already).
227
- Set to "" (empty string) to use original file names and just compress
228
- them transparently in-place.
226
+ - The `compress_files_suffix` is appended to any processed test-uploaded
227
+ files, and the respective `files` results array is modified with the
228
+ new file names (as if the test uploaded compressed files already).
229
+ Set to `""` (empty string) to use original file names and just
230
+ compress them transparently in-place.
229
231
 
230
- 'compress_files_exclude' is a tuple/list of strings (input 'files'
231
- names) to skip when compressing. Their names also won't be modified.
232
+ - `compress_files_exclude` is a tuple/list of strings (input `files`
233
+ names) to skip when compressing. Their names also won't be modified.
232
234
  """
233
235
  super().__init__(target, files)
234
236
  self.level = compress_level
@@ -251,26 +253,27 @@ class LZMAJSONAggregator(CompressedJSONAggregator):
251
253
  compress_files=True, compress_files_suffix=".xz", compress_files_exclude=None,
252
254
  ):
253
255
  """
254
- 'target' is a string/Path to a .json.xz file for all ingested
255
- results to be aggregated (written) to.
256
+ - `target` is a string/Path to a `.json.xz` file for all ingested
257
+ results to be aggregated (written) to.
256
258
 
257
- 'files' is a string/Path of the top-level parent for all
258
- per-platform / per-test files uploaded by tests.
259
+ - `files` is a string/Path of the top-level parent for all per-platform
260
+ / per-test files uploaded by tests.
259
261
 
260
- 'compress_preset' specifies how much effort should be spent compressing,
261
- (1 = fast, 9 = slow). Optionally ORed with lzma.PRESET_EXTREME to spend
262
- even more CPU time compressing.
262
+ - `compress_preset` specifies how much effort should be spent
263
+ compressing (1 = fast, 9 = slow). Optionally ORed with
264
+ `lzma.PRESET_EXTREME` to spend even more CPU time compressing.
263
265
 
264
- If 'compress_files' is True, compress also any files uploaded by tests.
266
+ - If `compress_files` is `True`, compress also any files uploaded by
267
+ tests.
265
268
 
266
- The 'compress_files_suffix' is appended to any processed test-uploaded
267
- files, and the respective 'files' results array is modified with the
268
- new file names (as if the test uploaded compressed files already).
269
- Set to "" (empty string) to use original file names and just compress
270
- them transparently in-place.
269
+ - The `compress_files_suffix` is appended to any processed test-uploaded
270
+ files, and the respective `files` results array is modified with the
271
+ new file names (as if the test uploaded compressed files already).
272
+ Set to `""` (empty string) to use original file names and just
273
+ compress them transparently in-place.
271
274
 
272
- 'compress_files_exclude' is a tuple/list of strings (input 'files'
273
- names) to skip when compressing. Their names also won't be modified.
275
+ - `compress_files_exclude` is a tuple/list of strings (input `files`
276
+ names) to skip when compressing. Their names also won't be modified.
274
277
  """
275
278
  super().__init__(target, files)
276
279
  self.preset = compress_preset
atex/cli/__init__.py CHANGED
@@ -18,7 +18,7 @@ these keys:
18
18
  - function (or other callable) that will be called when invoked by the user,
19
19
  gets passed one non-kw argument: argparse-style Namespace
20
20
 
21
- This module-level dict must be named 'CLI_SPEC'.
21
+ This module-level dict must be named `CLI_SPEC`.
22
22
  """
23
23
 
24
24
  import sys
@@ -27,21 +27,16 @@ import pkgutil
27
27
  import argparse
28
28
  import logging
29
29
 
30
- from .. import util
31
-
32
30
 
33
31
  def setup_logging(level):
34
- if level <= util.EXTRADEBUG:
35
- fmt = "%(asctime)s %(name)s: %(filename)s:%(lineno)s: %(funcName)s(): %(message)s"
36
- # also print urllib3 headers
32
+ # also print urllib3 headers
33
+ if level <= logging.DEBUG:
37
34
  import http.client # noqa: PLC0415
38
35
  http.client.HTTPConnection.debuglevel = 5
39
- else:
40
- fmt = "%(asctime)s %(name)s: %(message)s"
41
36
  logging.basicConfig(
42
37
  level=level,
43
38
  stream=sys.stderr,
44
- format=fmt,
39
+ format="%(asctime)s %(name)s: %(message)s",
45
40
  datefmt="%Y-%m-%d %H:%M:%S",
46
41
  )
47
42
 
@@ -59,16 +54,16 @@ def main():
59
54
 
60
55
  log_grp = parser.add_mutually_exclusive_group()
61
56
  log_grp.add_argument(
62
- "--debug", "-d", action="store_const", dest="loglevel", const=logging.DEBUG,
63
- help="enable extra debugging (logging.DEBUG)",
57
+ "--debug", "-d", action="append", dest="debug_loggers", metavar="LOGGER", default=[],
58
+ help="set logging.DEBUG for a given logger name",
64
59
  )
65
60
  log_grp.add_argument(
66
- "--extra-debug", "-D", action="store_const", dest="loglevel", const=util.EXTRADEBUG,
67
- help="enable extra debugging (atex.util.EXTRADEBUG)",
61
+ "--debug-all", "-D", action="store_const", dest="loglevel", const=logging.DEBUG,
62
+ help="set logging.DEBUG globally",
68
63
  )
69
64
  log_grp.add_argument(
70
65
  "--quiet", "-q", action="store_const", dest="loglevel", const=logging.WARNING,
71
- help="be quiet during normal operation (logging.WARNING)",
66
+ help="set logging.WARNING globally (suppress INFO)",
72
67
  )
73
68
  parser.set_defaults(loglevel=logging.INFO)
74
69
 
@@ -89,6 +84,9 @@ def main():
89
84
  args = parser.parse_args()
90
85
 
91
86
  setup_logging(args.loglevel)
87
+ # per-logger overrides
88
+ for logger in args.debug_loggers:
89
+ logging.getLogger(logger).setLevel(logging.DEBUG)
92
90
 
93
91
  try:
94
92
  mains[args._module](args)
atex/cli/testingfarm.py CHANGED
@@ -169,7 +169,7 @@ def stats(args):
169
169
 
170
170
 
171
171
  def reserve(args):
172
- util.info(f"Reserving {args.compose} on {args.arch} for {args.timeout} minutes")
172
+ print(f"Reserving {args.compose} on {args.arch} for {args.timeout} minutes")
173
173
 
174
174
  if args.hvm:
175
175
  hardware = {"virtualization": {"is-supported": True}}
@@ -192,7 +192,7 @@ def reserve(args):
192
192
  api=api,
193
193
  )
194
194
  with res as m:
195
- util.info(f"Got machine: {m}")
195
+ print(f"Got machine: {m}")
196
196
  while True:
197
197
  try:
198
198
  res.request.assert_alive()
@@ -223,20 +223,20 @@ def watch_pipeline(args):
223
223
  api = _get_api(args)
224
224
  request = tf.Request(id=args.request_id, api=api)
225
225
 
226
- util.info(f"Waiting for {args.request_id} to be 'running'")
226
+ print(f"Waiting for {args.request_id} to be 'running'")
227
227
  try:
228
228
  request.wait_for_state("running")
229
229
  except tf.GoneAwayError:
230
- util.info(f"Request {args.request_id} already finished")
230
+ print(f"Request {args.request_id} already finished")
231
231
  return
232
232
 
233
- util.info("Querying pipeline.log")
233
+ print("Querying pipeline.log")
234
234
  try:
235
235
  for line in tf.PipelineLogStreamer(request):
236
236
  sys.stdout.write(line)
237
237
  sys.stdout.write("\n")
238
238
  except tf.GoneAwayError:
239
- util.info(f"Request {args.request_id} finished, exiting")
239
+ print(f"Request {args.request_id} finished, exiting")
240
240
 
241
241
 
242
242
  def parse_args(parser):
@@ -1,3 +1,4 @@
1
+ import abc as _abc
1
2
  import importlib as _importlib
2
3
  import pkgutil as _pkgutil
3
4
 
@@ -30,9 +31,9 @@ class Connection:
30
31
  ...
31
32
 
32
33
  Note that internal connection handling must be implemented as thread-aware,
33
- ie. disconnect() might be called from a different thread while connect()
34
- or cmd() are still running.
35
- Similarly, multiple threads may run cmd() or rsync() independently.
34
+ ie. `disconnect()` might be called from a different thread while `connect()`
35
+ or `cmd()` are still running.
36
+ Similarly, multiple threads may run `cmd()` or `rsync()` independently.
36
37
 
37
38
  TODO: document that any exceptions raised by a Connection should be children
38
39
  of ConnectionError
@@ -52,52 +53,53 @@ class Connection:
52
53
  def __exit__(self, exc_type, exc_value, traceback):
53
54
  self.disconnect()
54
55
 
56
+ @_abc.abstractmethod
55
57
  def connect(self, block=True):
56
58
  """
57
59
  Establish a persistent connection to the remote.
58
60
 
59
- If 'block' is True, wait for the connection to be up,
61
+ If `block` is True, wait for the connection to be up,
60
62
  otherwise raise BlockingIOError if the connection is still down.
61
63
  """
62
- raise NotImplementedError(f"'connect' not implemented for {self.__class__.__name__}")
63
64
 
65
+ @_abc.abstractmethod
64
66
  def disconnect(self):
65
67
  """
66
68
  Destroy the persistent connection to the remote.
67
69
  """
68
- raise NotImplementedError(f"'disconnect' not implemented for {self.__class__.__name__}")
69
70
 
71
+ @_abc.abstractmethod
70
72
  def cmd(self, command, *, func=_util.subprocess_run, **func_args):
71
73
  """
72
74
  Execute a single command on the remote, using subprocess-like semantics.
73
75
 
74
- 'command' is the command with arguments, as a tuple/list.
76
+ - `command` is the command with arguments, as a tuple/list.
75
77
 
76
- 'func' is the subprocess function to use (.run(), .Popen, etc.).
78
+ - `func` is the subprocess function to use (`.run()`, `.Popen()`, etc.).
77
79
 
78
- 'func_args' are further keyword arguments to pass to 'func'.
80
+ - `func_args` are further keyword arguments to pass to `func`.
79
81
  """
80
- raise NotImplementedError(f"'cmd' not implemented for {self.__class__.__name__}")
81
82
 
83
+ @_abc.abstractmethod
82
84
  def rsync(self, *args, func=_util.subprocess_run, **func_args):
83
85
  """
84
- Synchronize local/remote files/directories via 'rsync'.
86
+ Synchronize local/remote files/directories via `rsync`.
87
+
88
+ Pass `*args` like `rsync(1)` CLI arguments, incl. option arguments, ie.
85
89
 
86
- Pass *args like rsync(1) CLI arguments, incl. option arguments, ie.
87
90
  .rsync("-vr", "local_path/", "remote:remote_path")
88
91
  .rsync("-z", "remote:remote_file" ".")
89
92
 
90
93
  To indicate remote path, use any string followed by a colon, the remote
91
- name does not matter as an internally-handled '-e' option dictates all
94
+ name does not matter as an internally-handled `-e` option dictates all
92
95
  the connection details.
93
96
 
94
- 'func' is a subprocess function to use (.run(), .Popen, etc.).
97
+ - `func` is a subprocess function to use (`.run()`, `.Popen()`, etc.).
95
98
 
96
- 'func_args' are further keyword arguments to pass to 'func'.
99
+ - `func_args` are further keyword arguments to pass to `func`.
97
100
 
98
- The remote must have rsync(1) already installed.
101
+ The remote must have the `rsync` command already installed.
99
102
  """
100
- raise NotImplementedError(f"'rsync' not implemented for {self.__class__.__name__}")
101
103
 
102
104
 
103
105
  _submodules = [
atex/connection/podman.py CHANGED
@@ -1,5 +1,5 @@
1
1
  """
2
- Connection API implementation using the 'podman' CLI client.
2
+ Connection API implementation using the `podman` CLI client.
3
3
  """
4
4
 
5
5
  import subprocess
@@ -14,21 +14,10 @@ class PodmanConnectionError(ConnectionError):
14
14
 
15
15
  class PodmanConnection(Connection):
16
16
  """
17
- Implements the Connection API via 'podman container exec' on an
17
+ Implements the Connection API via `podman container exec` on an
18
18
  already-running container, it does not handle any image pulling,
19
19
  container creation, starting or stopping.
20
20
  """
21
-
22
- # def __init__(self, container, *, user=None, workdir=None):
23
- # """
24
- # 'container' is a string with either the full or partial podman
25
- # container ID, or a container name, as recognized by podman CLI.
26
- #
27
- # 'user' is a string with a username or UID, possibly including a GID,
28
- # passed to the podman CLI as --user.
29
- #
30
- # 'workdir' is a string specifying the CWD inside the container.
31
- # """
32
21
  def __init__(self, container):
33
22
  self.container = container
34
23
 
atex/connection/ssh.py CHANGED
@@ -1,15 +1,15 @@
1
1
  """
2
- Connection API implementation using the OpenSSH ssh(1) client.
2
+ Connection API implementation using the OpenSSH `ssh(1)` client.
3
3
 
4
4
  Any SSH options are passed via dictionaries of options, and later translated
5
- to '-o' client CLI options, incl. Hostname, User, Port, IdentityFile, etc.
5
+ to `-o` client CLI options, incl. Hostname, User, Port, IdentityFile, etc.
6
6
  No "typical" ssh CLI switches are used.
7
7
 
8
8
  This allows for a nice flexibility from Python code - this module provides
9
9
  some sensible option defaults (for scripted use), but you are free to
10
10
  overwrite any options via class or function arguments (where appropriate).
11
11
 
12
- Note that .cmd() quotes arguments to really execute individual arguments
12
+ Note that `.cmd()` quotes arguments to really execute individual arguments
13
13
  as individual arguments in the remote shell, so you need to give it a proper
14
14
  iterable (like for other Connections), not a single string with spaces.
15
15
  """
@@ -17,6 +17,7 @@ iterable (like for other Connections), not a single string with spaces.
17
17
  import os
18
18
  import time
19
19
  import shlex
20
+ import logging
20
21
  import tempfile
21
22
  import threading
22
23
  import subprocess
@@ -25,6 +26,8 @@ from pathlib import Path
25
26
  from .. import util
26
27
  from . import Connection
27
28
 
29
+ logger = logging.getLogger("atex.connection.ssh")
30
+
28
31
 
29
32
  DEFAULT_OPTIONS = {
30
33
  "LogLevel": "ERROR",
@@ -48,7 +51,6 @@ class DisconnectedError(SSHError):
48
51
  """
49
52
  Raised when an already-connected ssh session goes away (breaks connection).
50
53
  """
51
- pass
52
54
 
53
55
 
54
56
  class NotConnectedError(SSHError):
@@ -56,19 +58,17 @@ class NotConnectedError(SSHError):
56
58
  Raised when an operation on ssh connection is requested, but the connection
57
59
  is not yet open (or has been closed/disconnected).
58
60
  """
59
- pass
60
61
 
61
62
 
62
63
  class ConnectError(SSHError):
63
64
  """
64
65
  Raised when a to-be-opened ssh connection fails to open.
65
66
  """
66
- pass
67
67
 
68
68
 
69
69
  def _shell_cmd(command, sudo=None):
70
70
  """
71
- Make a command line for running 'command' on the target system.
71
+ Make a command line for running `command` on the target system.
72
72
  """
73
73
  quoted_args = (shlex.quote(str(arg)) for arg in command)
74
74
  if sudo:
@@ -81,7 +81,7 @@ def _shell_cmd(command, sudo=None):
81
81
 
82
82
  def _options_to_cli(options):
83
83
  """
84
- Assemble an ssh(1) or sshpass(1) command line with -o options.
84
+ Assemble an `ssh(1)` or `sshpass(1)` command line with `-o` options.
85
85
  """
86
86
  list_opts = []
87
87
  for key, value in options.items():
@@ -94,7 +94,7 @@ def _options_to_cli(options):
94
94
 
95
95
  def _options_to_ssh(options, password=None, extra_cli_flags=()):
96
96
  """
97
- Assemble an ssh(1) or sshpass(1) command line with -o options.
97
+ Prefix `sshpass(1)` if password was specified.
98
98
  """
99
99
  cli_opts = _options_to_cli(options)
100
100
  if password:
@@ -111,7 +111,7 @@ def _options_to_ssh(options, password=None, extra_cli_flags=()):
111
111
  # return a string usable for rsync -e
112
112
  def _options_to_rsync_e(options, password=None):
113
113
  """
114
- Return a string usable for the rsync -e argument.
114
+ Return a string usable for the rsync `-e` argument.
115
115
  """
116
116
  cli_opts = _options_to_cli(options)
117
117
  batch_mode = "-oBatchMode=no" if password else "-oBatchMode=yes"
@@ -121,9 +121,10 @@ def _options_to_rsync_e(options, password=None):
121
121
  def _rsync_host_cmd(*args, options, password=None, sudo=None):
122
122
  """
123
123
  Assemble a rsync command line, noting that
124
- - 'sshpass' must be before 'rsync', not inside the '-e' argument
125
- - 'ignored_arg' must be passed by user as destination, not inside '-e'
126
- - 'sudo' is part of '--rsync-path', yet another argument
124
+
125
+ - `sshpass` must be before `rsync`, not inside the `-e` argument
126
+ - `ignored_arg` must be passed by user as destination, not inside `-e`
127
+ - `sudo` is part of `--rsync-path`, yet another argument
127
128
  """
128
129
  return (
129
130
  *(("sshpass", "-p", password) if password else ()),
@@ -136,25 +137,25 @@ def _rsync_host_cmd(*args, options, password=None, sudo=None):
136
137
 
137
138
  class StatelessSSHConnection(Connection):
138
139
  """
139
- Implements the Connection API using a ssh(1) client using "standalone"
140
- (stateless) logic - connect() and disconnect() are no-op, .cmd() simply
141
- executes the ssh client and .rsync() executes 'rsync -e ssh'.
140
+ Implements the Connection API using a `ssh(1)` client using "standalone"
141
+ (stateless) logic - `connect()` and `disconnect()` are no-op, `.cmd()`
142
+ simply executes the ssh client and `.rsync()` executes `rsync -e ssh`.
142
143
 
143
- Compared to ManagedSSHConnection, this may be slow for many .cmd() calls,
144
+ Compared to ManagedSSHConnection, this may be slow for many `.cmd()` calls,
144
145
  but every call is stateless, there is no persistent connection.
145
146
 
146
- If you need only one .cmd(), this will be faster than ManagedSSHConnection.
147
+ For only one `.cmd()`, this class is faster than ManagedSSHConnection.
147
148
  """
148
149
 
149
150
  def __init__(self, options, *, password=None, sudo=None):
150
151
  """
151
- Prepare to connect to an SSH server specified in 'options'.
152
+ Prepare to connect to an SSH server specified in `options`.
152
153
 
153
- If 'password' is given, spawn the ssh(1) command via 'sshpass' and
154
- pass the password to it.
154
+ - If `password` is given, spawn the `ssh` command via `sshpass` and
155
+ pass the password to it.
155
156
 
156
- If 'sudo' specifies a username, call sudo(8) on the remote shell
157
- to run under a different user on the remote host.
157
+ - If `sudo` specifies a username, call `sudo(8)` on the remote shell
158
+ to run under a different user on the remote host.
158
159
  """
159
160
  self.options = DEFAULT_OPTIONS.copy()
160
161
  self.options.update(options)
@@ -164,11 +165,6 @@ class StatelessSSHConnection(Connection):
164
165
  self._master_proc = None
165
166
 
166
167
  def connect(self, block=True):
167
- """
168
- Optional, .cmd() and .rsync() work without it, but it is provided here
169
- for compatibility with the Connection API.
170
- """
171
- # TODO: just wait until .cmd(['true']) starts responding ?
172
168
  pass
173
169
 
174
170
  def disconnect(self):
@@ -217,9 +213,9 @@ class StatelessSSHConnection(Connection):
217
213
 
218
214
  class ManagedSSHConnection(Connection):
219
215
  """
220
- Implements the Connection API using one persistently-running ssh(1) client
221
- started in a 'ControlMaster' mode, with additional ssh clients using that
222
- session to execute remote commands. Similarly, .rsync() uses it too.
216
+ Implements the Connection API using one persistently-running `ssh(1)` client
217
+ started in a ControlMaster mode, with additional ssh clients using that
218
+ session to execute remote commands. Similarly, `.rsync()` uses it too.
223
219
 
224
220
  This is much faster than StatelessSSHConnection when executing multiple
225
221
  commands, but contains a complex internal state (what if ControlMaster
@@ -230,13 +226,13 @@ class ManagedSSHConnection(Connection):
230
226
 
231
227
  def __init__(self, options, *, password=None, sudo=None):
232
228
  """
233
- Prepare to connect to an SSH server specified in 'options'.
229
+ Prepare to connect to an SSH server specified in `options`.
234
230
 
235
- If 'password' is given, spawn the ssh(1) command via 'sshpass' and
236
- pass the password to it.
231
+ - If `password` is given, spawn the `ssh` command via `sshpass`
232
+ and pass the password to it.
237
233
 
238
- If 'sudo' specifies a username, call sudo(8) on the remote shell
239
- to run under a different user on the remote host.
234
+ - If `sudo` specifies a username, call `sudo(8)` on the remote shell
235
+ to run under a different user on the remote host.
240
236
  """
241
237
  self.lock = threading.RLock()
242
238
  self.options = DEFAULT_OPTIONS.copy()
@@ -265,7 +261,7 @@ class ManagedSSHConnection(Connection):
265
261
  proc = self._master_proc
266
262
  if not proc:
267
263
  return
268
- util.debug(f"disconnecting: {self.options}")
264
+ logger.info(f"disconnecting: {self.options}")
269
265
  proc.kill()
270
266
  # don't zombie forever, return EPIPE on any attempts to write to us
271
267
  proc.stdout.close()
@@ -285,7 +281,7 @@ class ManagedSSHConnection(Connection):
285
281
  sock = self._tmpdir / "control.sock"
286
282
 
287
283
  if not self._master_proc:
288
- util.debug(f"connecting: {self.options}")
284
+ logger.info(f"connecting: {self.options}")
289
285
  options = self.options.copy()
290
286
  options["SessionType"] = "none"
291
287
  options["ControlMaster"] = "yes"
@@ -331,14 +327,14 @@ class ManagedSSHConnection(Connection):
331
327
 
332
328
  def forward(self, forward_type, *spec, cancel=False):
333
329
  """
334
- Add (one or more) ssh forwarding specifications as 'spec' to an
330
+ Add (one or more) ssh forwarding specifications as `spec` to an
335
331
  already-connected instance. Each specification has to follow the
336
- format of LocalForward or RemoteForward (see ssh_config(5)).
337
- Ie. "1234 1.2.3.4:22" or "0.0.0.0:1234 1.2.3.4:22".
332
+ format of LocalForward or RemoteForward (see `ssh_config(5)`).
333
+ Ie. `1234 1.2.3.4:22` or `0.0.0.0:1234 1.2.3.4:22`.
338
334
 
339
- 'forward_type' must be either LocalForward or RemoteForward.
335
+ - `forward_type` must be either LocalForward or RemoteForward.
340
336
 
341
- If 'cancel' is True, cancel the forwarding instead of adding it.
337
+ - If `cancel` is `True`, cancel the forwarding instead of adding it.
342
338
  """
343
339
  assert forward_type in ("LocalForward", "RemoteForward")
344
340
  self.assert_master()