atex 0.9__py3-none-any.whl → 0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. atex/aggregator/__init__.py +62 -0
  2. atex/aggregator/json.py +279 -0
  3. atex/cli/__init__.py +14 -1
  4. atex/cli/fmf.py +7 -7
  5. atex/cli/libvirt.py +3 -2
  6. atex/cli/testingfarm.py +74 -3
  7. atex/connection/podman.py +2 -4
  8. atex/connection/ssh.py +7 -14
  9. atex/executor/executor.py +21 -20
  10. atex/executor/scripts.py +5 -3
  11. atex/executor/testcontrol.py +1 -1
  12. atex/orchestrator/__init__.py +76 -3
  13. atex/orchestrator/{orchestrator.py → adhoc.py} +246 -108
  14. atex/orchestrator/contest.py +94 -0
  15. atex/{provision → provisioner}/__init__.py +48 -52
  16. atex/{provision → provisioner}/libvirt/libvirt.py +34 -15
  17. atex/{provision → provisioner}/libvirt/locking.py +3 -1
  18. atex/provisioner/podman/__init__.py +2 -0
  19. atex/provisioner/podman/podman.py +169 -0
  20. atex/{provision → provisioner}/testingfarm/api.py +56 -48
  21. atex/{provision → provisioner}/testingfarm/testingfarm.py +43 -45
  22. atex/util/log.py +62 -67
  23. atex/util/subprocess.py +46 -12
  24. atex/util/threads.py +7 -0
  25. atex-0.11.dist-info/METADATA +86 -0
  26. atex-0.11.dist-info/RECORD +45 -0
  27. {atex-0.9.dist-info → atex-0.11.dist-info}/WHEEL +1 -1
  28. atex/orchestrator/aggregator.py +0 -111
  29. atex/provision/podman/__init__.py +0 -1
  30. atex/provision/podman/podman.py +0 -274
  31. atex-0.9.dist-info/METADATA +0 -178
  32. atex-0.9.dist-info/RECORD +0 -43
  33. /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
  34. /atex/{provision → provisioner}/libvirt/__init__.py +0 -0
  35. /atex/{provision → provisioner}/libvirt/setup-libvirt.sh +0 -0
  36. /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
  37. {atex-0.9.dist-info → atex-0.11.dist-info}/entry_points.txt +0 -0
  38. {atex-0.9.dist-info → atex-0.11.dist-info}/licenses/COPYING.txt +0 -0
@@ -0,0 +1,62 @@
1
+ import importlib as _importlib
2
+ import pkgutil as _pkgutil
3
+
4
+
5
+ class Aggregator:
6
+ """
7
+ TODO: generic description, not JSON-specific
8
+ """
9
+
10
+ def ingest(self, platform, test_name, test_results, test_files):
11
+ """
12
+ Process 'test_results' (string/Path) for as results reported by a test
13
+ ran by Executor, along with 'test_files' as files uploaded by that test,
14
+ aggregating them under 'platform' (string) as 'test_name' (string).
15
+
16
+ This is DESTRUCTIVE, the input results/files are consumed in the
17
+ process.
18
+ """
19
+ raise NotImplementedError(f"'ingest' not implemented for {self.__class__.__name__}")
20
+
21
+ def start(self):
22
+ """
23
+ Start the Aggregator instance, opening any files / allocating resources
24
+ as necessary.
25
+ """
26
+ raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
27
+
28
+ def stop(self):
29
+ """
30
+ Stop the Aggregator instance, freeing all allocated resources.
31
+ """
32
+ raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
33
+
34
+ def __enter__(self):
35
+ try:
36
+ self.start()
37
+ return self
38
+ except Exception:
39
+ self.stop()
40
+ raise
41
+
42
+ def __exit__(self, exc_type, exc_value, traceback):
43
+ self.stop()
44
+
45
+
46
+ _submodules = [
47
+ info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
48
+ ]
49
+
50
+ __all__ = [*_submodules, Aggregator.__name__] # noqa: PLE0604
51
+
52
+
53
+ def __dir__():
54
+ return __all__
55
+
56
+
57
+ # lazily import submodules
58
+ def __getattr__(attr):
59
+ if attr in _submodules:
60
+ return _importlib.import_module(f".{attr}", __name__)
61
+ else:
62
+ raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")
@@ -0,0 +1,279 @@
1
+ import abc
2
+ import gzip
3
+ import lzma
4
+ import json
5
+ import shutil
6
+ import threading
7
+ from pathlib import Path
8
+
9
+ from . import Aggregator
10
+
11
+
12
+ def _verbatim_move(src, dst):
13
+ def copy_without_symlinks(src, dst):
14
+ return shutil.copy2(src, dst, follow_symlinks=False)
15
+ shutil.move(src, dst, copy_function=copy_without_symlinks)
16
+
17
+
18
+ class JSONAggregator(Aggregator):
19
+ """
20
+ Collects reported results in a line-JSON output file and uploaded files
21
+ (logs) from multiple test runs under a shared directory.
22
+
23
+ Note that the aggregated JSON file *does not* use the test-based JSON format
24
+ described by executor/RESULTS.md - both use JSON, but are very different.
25
+
26
+ This aggergated format uses a top-level array (on each line) with a fixed
27
+ field order:
28
+
29
+ platform, status, test name, subtest name, files, note
30
+
31
+ All these are strings except 'files', which is another (nested) array
32
+ of strings.
33
+
34
+ If 'testout' is present in an input test result, it is prepended to
35
+ the list of 'files'.
36
+ If a field is missing in the source result, it is translated to a null
37
+ value.
38
+ """
39
+
40
+ def __init__(self, target, files):
41
+ """
42
+ 'target' is a string/Path to a .json file for all ingested
43
+ results to be aggregated (written) to.
44
+
45
+ 'files' is a string/Path of the top-level parent for all
46
+ per-platform / per-test files uploaded by tests.
47
+ """
48
+ self.lock = threading.RLock()
49
+ self.target = Path(target)
50
+ self.files = Path(files)
51
+ self.target_fobj = None
52
+
53
+ def start(self):
54
+ if self.target.exists():
55
+ raise FileExistsError(f"{self.target} already exists")
56
+ self.target_fobj = open(self.target, "w")
57
+
58
+ if self.files.exists():
59
+ raise FileExistsError(f"{self.files} already exists")
60
+ self.files.mkdir()
61
+
62
+ def stop(self):
63
+ if self.target_fobj:
64
+ self.target_fobj.close()
65
+ self.target_fobj = None
66
+
67
+ def _get_test_files_path(self, platform, test_name):
68
+ """
69
+ Return a directory path to where uploaded files should be stored
70
+ for a particular 'platform' and 'test_name'.
71
+ """
72
+ platform_files = self.files / platform
73
+ platform_files.mkdir(exist_ok=True)
74
+ test_files = platform_files / test_name.lstrip("/")
75
+ return test_files
76
+
77
+ @staticmethod
78
+ def _modify_file_list(test_files):
79
+ return test_files
80
+
81
+ @staticmethod
82
+ def _move_test_files(test_files, target_dir):
83
+ """
84
+ Move (or otherwise process) 'test_files' as directory of files uploaded
85
+ by the test, into the pre-computed 'target_dir' location (inside
86
+ a hierarchy of all files from all tests).
87
+ """
88
+ _verbatim_move(test_files, target_dir)
89
+
90
+ def _gen_test_results(self, input_fobj, platform, test_name):
91
+ """
92
+ Yield complete output JSON objects, one for each input result.
93
+ """
94
+ # 'testout' , 'files' and others are standard fields in the
95
+ # test control interface, see RESULTS.md for the Executor
96
+ for raw_line in input_fobj:
97
+ result_line = json.loads(raw_line)
98
+
99
+ file_names = []
100
+ # process the file specified by the 'testout' key
101
+ if "testout" in result_line:
102
+ file_names.append(result_line["testout"])
103
+ # process any additional files in the 'files' key
104
+ if "files" in result_line:
105
+ file_names += (f["name"] for f in result_line["files"])
106
+
107
+ file_names = self._modify_file_list(file_names)
108
+
109
+ output_line = (
110
+ platform,
111
+ result_line["status"],
112
+ test_name,
113
+ result_line.get("name"), # subtest
114
+ file_names,
115
+ result_line.get("note"),
116
+ )
117
+ yield json.dumps(output_line, indent=None)
118
+
119
+ def ingest(self, platform, test_name, test_results, test_files):
120
+ target_test_files = self._get_test_files_path(platform, test_name)
121
+ if target_test_files.exists():
122
+ raise FileExistsError(f"{target_test_files} already exists for {test_name}")
123
+
124
+ # parse the results separately, before writing any aggregated output,
125
+ # to ensure that either ALL results from the test are ingested, or none
126
+ # at all (ie. if one of the result lines contains JSON errors)
127
+ with open(test_results) as test_results_fobj:
128
+ output_results = self._gen_test_results(test_results_fobj, platform, test_name)
129
+ output_json = "\n".join(output_results) + "\n"
130
+
131
+ with self.lock:
132
+ self.target_fobj.write(output_json)
133
+ self.target_fobj.flush()
134
+
135
+ # clean up the source test_results (Aggregator should 'mv', not 'cp')
136
+ Path(test_results).unlink()
137
+
138
+ # if the test_files dir is not empty
139
+ if any(test_files.iterdir()):
140
+ self._move_test_files(test_files, target_test_files)
141
+
142
+
143
+ class CompressedJSONAggregator(JSONAggregator, abc.ABC):
144
+ compress_files = False
145
+ suffix = ""
146
+ exclude = ()
147
+
148
+ @abc.abstractmethod
149
+ def compressed_open(self, *args, **kwargs):
150
+ pass
151
+
152
+ def start(self):
153
+ if self.target.exists():
154
+ raise FileExistsError(f"{self.target_file} already exists")
155
+ self.target_fobj = self.compressed_open(self.target, "wt", newline="\n")
156
+
157
+ if self.files.exists():
158
+ raise FileExistsError(f"{self.storage_dir} already exists")
159
+ self.files.mkdir()
160
+
161
+ def _modify_file_list(self, test_files):
162
+ if self.compress_files and self.suffix:
163
+ return [
164
+ (name if name in self.exclude else f"{name}{self.suffix}")
165
+ for name in test_files
166
+ ]
167
+ else:
168
+ return super()._modify_file_list(test_files)
169
+
170
+ def _move_test_files(self, test_files, target_dir):
171
+ if not self.compress_files:
172
+ super()._move_test_files(test_files, target_dir)
173
+ return
174
+
175
+ for root, _, files in test_files.walk(top_down=False):
176
+ for file_name in files:
177
+ src_path = root / file_name
178
+ dst_path = target_dir / src_path.relative_to(test_files)
179
+
180
+ dst_path.parent.mkdir(parents=True, exist_ok=True)
181
+
182
+ # skip dirs, symlinks, device files, etc.
183
+ if not src_path.is_file(follow_symlinks=False) or file_name in self.exclude:
184
+ _verbatim_move(src_path, dst_path)
185
+ continue
186
+
187
+ if self.suffix:
188
+ dst_path = dst_path.with_name(f"{dst_path.name}{self.suffix}")
189
+
190
+ with open(src_path, "rb") as plain_fobj:
191
+ with self.compressed_open(dst_path, "wb") as compress_fobj:
192
+ shutil.copyfileobj(plain_fobj, compress_fobj, 1048576)
193
+
194
+ src_path.unlink()
195
+
196
+ # we're walking bottom-up, so the local root should be empty now
197
+ root.rmdir()
198
+
199
+
200
+ class GzipJSONAggregator(CompressedJSONAggregator):
201
+ """
202
+ Identical to JSONAggregator, but transparently Gzips either or both of
203
+ the output line-JSON file with results and the uploaded files.
204
+ """
205
+ def compressed_open(self, *args, **kwargs):
206
+ return gzip.open(*args, compresslevel=self.level, **kwargs)
207
+
208
+ def __init__(
209
+ self, target, files, *, compress_level=9,
210
+ compress_files=True, compress_files_suffix=".gz", compress_files_exclude=None,
211
+ ):
212
+ """
213
+ 'target' is a string/Path to a .json.gz file for all ingested
214
+ results to be aggregated (written) to.
215
+
216
+ 'files' is a string/Path of the top-level parent for all
217
+ per-platform / per-test files uploaded by tests.
218
+
219
+ 'compress_level' specifies how much effort should be spent compressing,
220
+ (1 = fast, 9 = slow).
221
+
222
+ If 'compress_files' is True, compress also any files uploaded by tests.
223
+
224
+ The 'compress_files_suffix' is appended to any processed test-uploaded
225
+ files, and the respective 'files' results array is modified with the
226
+ new file names (as if the test uploaded compressed files already).
227
+ Set to "" (empty string) to use original file names and just compress
228
+ them transparently in-place.
229
+
230
+ 'compress_files_exclude' is a tuple/list of strings (input 'files'
231
+ names) to skip when compressing. Their names also won't be modified.
232
+ """
233
+ super().__init__(target, files)
234
+ self.level = compress_level
235
+ self.compress_files = compress_files
236
+ self.suffix = compress_files_suffix
237
+ self.exclude = compress_files_exclude or ()
238
+
239
+
240
+ class LZMAJSONAggregator(CompressedJSONAggregator):
241
+ """
242
+ Identical to JSONAggregator, but transparently compresses (via LZMA/XZ)
243
+ either or both of the output line-JSON file with results and the uploaded
244
+ files.
245
+ """
246
+ def compressed_open(self, *args, **kwargs):
247
+ return lzma.open(*args, preset=self.preset, **kwargs)
248
+
249
+ def __init__(
250
+ self, target, files, *, compress_preset=9,
251
+ compress_files=True, compress_files_suffix=".xz", compress_files_exclude=None,
252
+ ):
253
+ """
254
+ 'target' is a string/Path to a .json.xz file for all ingested
255
+ results to be aggregated (written) to.
256
+
257
+ 'files' is a string/Path of the top-level parent for all
258
+ per-platform / per-test files uploaded by tests.
259
+
260
+ 'compress_preset' specifies how much effort should be spent compressing,
261
+ (1 = fast, 9 = slow). Optionally ORed with lzma.PRESET_EXTREME to spend
262
+ even more CPU time compressing.
263
+
264
+ If 'compress_files' is True, compress also any files uploaded by tests.
265
+
266
+ The 'compress_files_suffix' is appended to any processed test-uploaded
267
+ files, and the respective 'files' results array is modified with the
268
+ new file names (as if the test uploaded compressed files already).
269
+ Set to "" (empty string) to use original file names and just compress
270
+ them transparently in-place.
271
+
272
+ 'compress_files_exclude' is a tuple/list of strings (input 'files'
273
+ names) to skip when compressing. Their names also won't be modified.
274
+ """
275
+ super().__init__(target, files)
276
+ self.preset = compress_preset
277
+ self.compress_files = compress_files
278
+ self.suffix = compress_files_suffix
279
+ self.exclude = compress_files_exclude or ()
atex/cli/__init__.py CHANGED
@@ -27,12 +27,21 @@ import pkgutil
27
27
  import argparse
28
28
  import logging
29
29
 
30
+ from .. import util
31
+
30
32
 
31
33
  def setup_logging(level):
34
+ if level <= util.EXTRADEBUG:
35
+ fmt = "%(asctime)s %(name)s: %(filename)s:%(lineno)s: %(funcName)s(): %(message)s"
36
+ # also print urllib3 headers
37
+ import http.client # noqa: PLC0415
38
+ http.client.HTTPConnection.debuglevel = 5
39
+ else:
40
+ fmt = "%(asctime)s %(name)s: %(message)s"
32
41
  logging.basicConfig(
33
42
  level=level,
34
43
  stream=sys.stderr,
35
- format="%(asctime)s %(name)s: %(message)s",
44
+ format=fmt,
36
45
  datefmt="%Y-%m-%d %H:%M:%S",
37
46
  )
38
47
 
@@ -53,6 +62,10 @@ def main():
53
62
  "--debug", "-d", action="store_const", dest="loglevel", const=logging.DEBUG,
54
63
  help="enable extra debugging (logging.DEBUG)",
55
64
  )
65
+ log_grp.add_argument(
66
+ "--extra-debug", "-D", action="store_const", dest="loglevel", const=util.EXTRADEBUG,
67
+ help="enable extra debugging (atex.util.EXTRADEBUG)",
68
+ )
56
69
  log_grp.add_argument(
57
70
  "--quiet", "-q", action="store_const", dest="loglevel", const=logging.WARNING,
58
71
  help="be quiet during normal operation (logging.WARNING)",
atex/cli/fmf.py CHANGED
@@ -56,17 +56,17 @@ def prepare(args):
56
56
  result = make_fmftests(args)
57
57
  print("--- fmf root ---")
58
58
  print(str(result.root))
59
- print("--- prepare packages ---")
59
+ print("\n--- prepare packages ---")
60
60
  print("\n".join(result.prepare_pkgs))
61
- print("--- plan environment ---")
62
- print("\n".join("{k}={v}" for k,v in result.plan_env))
61
+ print("\n--- plan environment ---")
62
+ print("\n".join(f"{k}={v}" for k,v in result.plan_env.items()))
63
63
  for script in result.prepare_scripts:
64
- print("--- prepare script ---")
65
- print(script)
64
+ print("\n--- prepare script ---")
65
+ print(script.rstrip("\n"))
66
66
  print("----------------------")
67
67
  for script in result.finish_scripts:
68
- print("--- finish script ---")
69
- print(script)
68
+ print("\n--- finish script ---")
69
+ print(script.rstrip("\n"))
70
70
  print("----------------------")
71
71
 
72
72
 
atex/cli/libvirt.py CHANGED
@@ -1,9 +1,10 @@
1
1
  import sys
2
2
  import re
3
3
 
4
- import libvirt
4
+ from .. import util
5
+ from ..provisioner.libvirt import locking
5
6
 
6
- from ..provision.libvirt import locking
7
+ libvirt = util.import_libvirt()
7
8
 
8
9
 
9
10
  def _libvirt_open(url=None):
atex/cli/testingfarm.py CHANGED
@@ -1,9 +1,11 @@
1
1
  import sys
2
2
  import json
3
3
  import pprint
4
+ import collections
5
+ from datetime import datetime, timedelta, UTC
4
6
 
5
7
  from .. import util
6
- from ..provision.testingfarm import api as tf
8
+ from ..provisioner.testingfarm import api as tf
7
9
 
8
10
 
9
11
  def _get_api(args):
@@ -36,7 +38,6 @@ def composes(args):
36
38
  def get_request(args):
37
39
  api = _get_api(args)
38
40
  request = tf.Request(args.request_id, api=api)
39
- request.update()
40
41
  print(str(request))
41
42
 
42
43
 
@@ -79,6 +80,67 @@ def search_requests(args):
79
80
  print(f"{created} {req_id} : {envs_str}")
80
81
 
81
82
 
83
+ def stats(args):
84
+ api = _get_api(args)
85
+
86
+ def top_users_repos(requests):
87
+ tokens = collections.defaultdict(int)
88
+ repos = collections.defaultdict(int)
89
+ for req in requests:
90
+ tokens[req["token_id"]] += 1
91
+ if "fmf" in req["test"] and req["test"]["fmf"]:
92
+ repos[req["test"]["fmf"]["url"]] += 1
93
+ elif "tmt" in req["test"] and req["test"]["tmt"]:
94
+ repos[req["test"]["tmt"]["url"]] += 1
95
+
96
+ top_tokens = sorted(tokens, key=lambda x: tokens[x], reverse=True)[:10]
97
+ top_repos = sorted(repos, key=lambda x: repos[x], reverse=True)[:10]
98
+ if not top_tokens or not top_repos:
99
+ return
100
+ digits = max(len(str(tokens[top_tokens[0]])), len(str(repos[top_repos[0]])))
101
+
102
+ print("Top 10 token IDs:")
103
+ for token_id in top_tokens:
104
+ count = tokens[token_id]
105
+ print(f"{count:>{digits}} {token_id}")
106
+
107
+ print("Top 10 repo URLs:")
108
+ for repo_url in top_repos:
109
+ count = repos[repo_url]
110
+ print(f"{count:>{digits}} {repo_url}")
111
+
112
+ def request_search_results():
113
+ for state in args.states.split(","):
114
+ result = api.search_requests(
115
+ state=state,
116
+ ranch=args.ranch,
117
+ mine=False,
118
+ )
119
+ if result:
120
+ yield from result
121
+
122
+ def multiday_request_search_results():
123
+ now = datetime.now(UTC)
124
+ for day in range(0,args.days):
125
+ before = now - timedelta(days=day)
126
+ after = now - timedelta(days=day+1)
127
+ for state in args.states.split(","):
128
+ result = api.search_requests(
129
+ state=state,
130
+ created_before=before.replace(microsecond=0).isoformat(),
131
+ created_after=after.replace(microsecond=0).isoformat(),
132
+ ranch=args.ranch,
133
+ mine=False,
134
+ )
135
+ if result:
136
+ yield from result
137
+
138
+ if args.days is not None:
139
+ top_users_repos(multiday_request_search_results())
140
+ else:
141
+ top_users_repos(request_search_results())
142
+
143
+
82
144
  def reserve(args):
83
145
  util.info(f"Reserving {args.compose} on {args.arch} for {args.timeout} minutes")
84
146
 
@@ -106,7 +168,6 @@ def reserve(args):
106
168
  util.info(f"Got machine: {m}")
107
169
  while True:
108
170
  try:
109
- res.request.update()
110
171
  res.request.assert_alive()
111
172
  except tf.GoneAwayError as e:
112
173
  print(e)
@@ -198,6 +259,14 @@ def parse_args(parser):
198
259
  cmd.add_argument("--after", help="only requests created after ISO8601")
199
260
  cmd.add_argument("--json", help="full details, one request per line", action="store_true")
200
261
 
262
+ cmd = cmds.add_parser(
263
+ "stats",
264
+ help="print out TF usage statistics",
265
+ )
266
+ cmd.add_argument("--days", type=int, help="query last N days instead of all TF requests")
267
+ cmd.add_argument("ranch", help="Testing Farm ranch name")
268
+ cmd.add_argument("states", help="comma-separated TF request states")
269
+
201
270
  cmd = cmds.add_parser(
202
271
  "reserve",
203
272
  help="reserve a system and ssh into it",
@@ -233,6 +302,8 @@ def main(args):
233
302
  cancel(args)
234
303
  elif args._cmd in ("search-requests", "sr"):
235
304
  search_requests(args)
305
+ elif args._cmd == "stats":
306
+ stats(args)
236
307
  elif args._cmd == "reserve":
237
308
  reserve(args)
238
309
  elif args._cmd in ("watch-pipeline", "wp"):
atex/connection/podman.py CHANGED
@@ -8,11 +8,11 @@ from .. import util
8
8
  from . import Connection
9
9
 
10
10
 
11
- class PodmanConnError(ConnectionError):
11
+ class PodmanConnectionError(ConnectionError):
12
12
  pass
13
13
 
14
14
 
15
- class PodmanConn(Connection):
15
+ class PodmanConnection(Connection):
16
16
  """
17
17
  Implements the Connection API via 'podman container exec' on an
18
18
  already-running container, it does not handle any image pulling,
@@ -42,7 +42,6 @@ class PodmanConn(Connection):
42
42
  def cmd(self, command, *, func=util.subprocess_run, **func_args):
43
43
  return func(
44
44
  ("podman", "container", "exec", "-i", self.container, *command),
45
- skip_frames=1,
46
45
  **func_args,
47
46
  )
48
47
 
@@ -56,7 +55,6 @@ class PodmanConn(Connection):
56
55
  "-e", f"/bin/bash -c 'exec podman container exec -i {self.container} \"$@\"'",
57
56
  *args,
58
57
  ),
59
- skip_frames=1,
60
58
  check=True,
61
59
  stdin=subprocess.DEVNULL,
62
60
  **func_args,
atex/connection/ssh.py CHANGED
@@ -133,16 +133,16 @@ def _rsync_host_cmd(*args, options, password=None, sudo=None):
133
133
  )
134
134
 
135
135
 
136
- class StatelessSSHConn(Connection):
136
+ class StatelessSSHConnection(Connection):
137
137
  """
138
138
  Implements the Connection API using a ssh(1) client using "standalone"
139
139
  (stateless) logic - connect() and disconnect() are no-op, .cmd() simply
140
140
  executes the ssh client and .rsync() executes 'rsync -e ssh'.
141
141
 
142
- Compared to ManagedSSHConn, this may be slow for many .cmd() calls,
142
+ Compared to ManagedSSHConnection, this may be slow for many .cmd() calls,
143
143
  but every call is stateless, there is no persistent connection.
144
144
 
145
- If you need only one .cmd(), this will be faster than ManagedSSHConn.
145
+ If you need only one .cmd(), this will be faster than ManagedSSHConnection.
146
146
  """
147
147
 
148
148
  def __init__(self, options, *, password=None, sudo=None):
@@ -182,7 +182,6 @@ class StatelessSSHConn(Connection):
182
182
  unified_options["RemoteCommand"] = _shell_cmd(command, sudo=self.sudo)
183
183
  return func(
184
184
  _options_to_ssh(unified_options, password=self.password),
185
- skip_frames=1,
186
185
  **func_args,
187
186
  )
188
187
 
@@ -197,7 +196,6 @@ class StatelessSSHConn(Connection):
197
196
  password=self.password,
198
197
  sudo=self.sudo,
199
198
  ),
200
- skip_frames=1,
201
199
  check=True,
202
200
  stdin=subprocess.DEVNULL,
203
201
  **func_args,
@@ -216,17 +214,15 @@ class StatelessSSHConn(Connection):
216
214
  # checks .assert_master() and manually signals the running clients
217
215
  # when it gets DisconnectedError from it.
218
216
 
219
- class ManagedSSHConn(Connection):
217
+ class ManagedSSHConnection(Connection):
220
218
  """
221
219
  Implements the Connection API using one persistently-running ssh(1) client
222
220
  started in a 'ControlMaster' mode, with additional ssh clients using that
223
221
  session to execute remote commands. Similarly, .rsync() uses it too.
224
222
 
225
- This is much faster than StatelessSSHConn when executing multiple commands,
226
- but contains a complex internal state (what if ControlMaster disconnects?).
227
-
228
- Hence why this implementation provides extra non-standard-Connection methods
229
- to manage this complexity.
223
+ This is much faster than StatelessSSHConnection when executing multiple
224
+ commands, but contains a complex internal state (what if ControlMaster
225
+ disconnects?).
230
226
  """
231
227
 
232
228
  # TODO: thread safety and locking via self.lock ?
@@ -351,7 +347,6 @@ class ManagedSSHConn(Connection):
351
347
  action = "forward" if not cancel else "cancel"
352
348
  util.subprocess_run(
353
349
  _options_to_ssh(options, extra_cli_flags=("-O", action)),
354
- skip_frames=1,
355
350
  check=True,
356
351
  )
357
352
 
@@ -365,7 +360,6 @@ class ManagedSSHConn(Connection):
365
360
  unified_options["ControlPath"] = self._tmpdir / "control.sock"
366
361
  return func(
367
362
  _options_to_ssh(unified_options),
368
- skip_frames=1,
369
363
  **func_args,
370
364
  )
371
365
 
@@ -381,7 +375,6 @@ class ManagedSSHConn(Connection):
381
375
  options=unified_options,
382
376
  sudo=self.sudo,
383
377
  ),
384
- skip_frames=1,
385
378
  check=True,
386
379
  stdin=subprocess.DEVNULL,
387
380
  **func_args,