atex 0.8__py3-none-any.whl → 0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
atex/cli/fmf.py CHANGED
@@ -18,24 +18,42 @@ def _get_context(args):
18
18
  return context or None
19
19
 
20
20
 
21
+ def make_fmftests(args):
22
+ return fmf.FMFTests(
23
+ args.root,
24
+ args.plan,
25
+ names=args.test or None,
26
+ filters=args.filter or None,
27
+ conditions=args.condition or None,
28
+ excludes=args.exclude or None,
29
+ context=_get_context(args),
30
+ )
31
+
32
+
33
+ def requires(args):
34
+ result = make_fmftests(args)
35
+ all_pkgs = set()
36
+ all_pkgs.update(fmf.all_pkg_requires(result, key="require"))
37
+ all_pkgs.update(fmf.all_pkg_requires(result, key="recommend"))
38
+ for pkg in sorted(all_pkgs):
39
+ print(pkg)
40
+
41
+
21
42
  def discover(args):
22
- result = fmf.FMFTests(args.root, args.plan, context=_get_context(args))
43
+ result = make_fmftests(args)
23
44
  for name in result.tests:
24
45
  print(name)
25
46
 
26
47
 
27
48
  def show(args):
28
- result = fmf.FMFTests(args.root, args.plan, context=_get_context(args))
29
- if tests := list(result.match(args.test)):
30
- for test in tests:
31
- print(f"\n--- {test.name} ---")
32
- pprint.pprint(test.data)
33
- else:
34
- _fatal(f"Not reachable via {args.plan} discovery: {args.test}")
49
+ result = make_fmftests(args)
50
+ for name, data in result.tests.items():
51
+ print(f"\n--- {name} ---")
52
+ pprint.pprint(data)
35
53
 
36
54
 
37
55
  def prepare(args):
38
- result = fmf.FMFTests(args.root, args.plan, context=_get_context(args))
56
+ result = make_fmftests(args)
39
57
  print("--- fmf root ---")
40
58
  print(str(result.root))
41
59
  print("--- prepare packages ---")
@@ -46,37 +64,69 @@ def prepare(args):
46
64
  print("--- prepare script ---")
47
65
  print(script)
48
66
  print("----------------------")
67
+ for script in result.finish_scripts:
68
+ print("--- finish script ---")
69
+ print(script)
70
+ print("----------------------")
49
71
 
50
72
 
51
- def parse_args(parser):
73
+ def add_fmf_options(parser):
52
74
  parser.add_argument("--root", help="path to directory with fmf tests", default=".")
53
- parser.add_argument("--context", "-c", help="tmt style key=value context", action="append")
75
+ parser.add_argument("--plan", help="plan name (defaults to dummy plan)")
76
+ parser.add_argument(
77
+ "--test", "-t", help="test name regex (replacing 'test' from plan)",
78
+ action="append",
79
+ )
80
+ parser.add_argument(
81
+ "--exclude", help="test name regex (replacing 'exclude' from plan)",
82
+ action="append",
83
+ )
84
+ parser.add_argument(
85
+ "--condition", help="fmf-style python condition",
86
+ action="append",
87
+ )
88
+ parser.add_argument(
89
+ "--filter", help="fmf-style expression filter (replacing 'filter' from plan)",
90
+ action="append",
91
+ )
92
+ parser.add_argument(
93
+ "--context", "-c", help="tmt style key=value context",
94
+ action="append",
95
+ )
96
+
97
+
98
+ def parse_args(parser):
99
+ add_fmf_options(parser)
100
+
54
101
  cmds = parser.add_subparsers(
55
- dest="_cmd", help="executor feature", metavar="<cmd>", required=True,
102
+ dest="_cmd", help="fmf feature", metavar="<cmd>", required=True,
103
+ )
104
+
105
+ cmds.add_parser(
106
+ "requires", aliases=("req",),
107
+ help="list requires/recommends of the plan and its tests",
56
108
  )
57
109
 
58
- cmd = cmds.add_parser(
110
+ cmds.add_parser(
59
111
  "discover", aliases=("di",),
60
- help="list tests, post-processed by tmt plans",
112
+ help="list tests, possibly post-processed by a tmt plan",
61
113
  )
62
- cmd.add_argument("plan", help="tmt plan to use for discovery")
63
114
 
64
- cmd = cmds.add_parser(
115
+ cmds.add_parser(
65
116
  "show",
66
- help="show fmf data of a test",
117
+ help="show fmf metadata of test(s)",
67
118
  )
68
- cmd.add_argument("plan", help="tmt plan to use for discovery")
69
- cmd.add_argument("test", help="fmf style test regex")
70
119
 
71
- cmd = cmds.add_parser(
120
+ cmds.add_parser(
72
121
  "prepare",
73
- help="show prepare-related FMFTests details",
122
+ help="show prepare-related details from a plan",
74
123
  )
75
- cmd.add_argument("plan", help="tmt plan to parse")
76
124
 
77
125
 
78
126
  def main(args):
79
- if args._cmd in ("discover", "di"):
127
+ if args._cmd in ("requires", "req"):
128
+ requires(args)
129
+ elif args._cmd in ("discover", "di"):
80
130
  discover(args)
81
131
  elif args._cmd == "show":
82
132
  show(args)
atex/cli/libvirt.py ADDED
@@ -0,0 +1,127 @@
1
+ import sys
2
+ import re
3
+
4
+ import libvirt
5
+
6
+ from ..provision.libvirt import locking
7
+
8
+
9
+ def _libvirt_open(url=None):
10
+ # pass no arguments if url is None
11
+ conn = libvirt.open(*((url,) if url else ()))
12
+ print(f"Connected to {conn.getHostname()} via {conn.getURI()}\n")
13
+ return conn
14
+
15
+
16
+ def get_locks(args):
17
+ conn = _libvirt_open(args.connect)
18
+ domains = conn.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
19
+ for domain in sorted(domains, key=lambda d: d.name()):
20
+ print(f"{domain.name()}:")
21
+ for sig, stamp in locking.get_locks(domain, expired=args.expired):
22
+ print(f" {sig} {stamp}")
23
+ print()
24
+
25
+
26
+ def unlock(args):
27
+ conn = _libvirt_open(args.connect)
28
+ dom = conn.lookupByName(args.domain)
29
+ locking.unlock(dom, args.signature)
30
+
31
+
32
+ def lock(args):
33
+ conn = _libvirt_open(args.connect)
34
+ dom = conn.lookupByName(args.domain)
35
+ if locking.lock(dom, args.signature, args.timestamp):
36
+ print("Succeeded.")
37
+ sys.exit(0)
38
+ else:
39
+ print("Failed (already locked).")
40
+ sys.exit(2)
41
+
42
+
43
+ def unlock_all(args):
44
+ conn = _libvirt_open(args.connect)
45
+ if args.domains:
46
+ def domains(dom):
47
+ return bool(re.fullmatch(args.domains, dom.name()))
48
+ else:
49
+ def domains(_):
50
+ return True
51
+ locking.unlock_all(conn, args.signature, args.shutdown, domains)
52
+
53
+
54
+ def cleanup_expired(args):
55
+ conn = _libvirt_open(args.connect)
56
+ if args.domains:
57
+ def domains(dom):
58
+ return bool(re.fullmatch(args.domains, dom.name()))
59
+ else:
60
+ def domains(_):
61
+ return True
62
+ locking.cleanup_expired(conn, args.timestamp, domains)
63
+
64
+
65
+ def parse_args(parser):
66
+ parser.add_argument("--connect", "-c", help="Libvirt URL to connect to", metavar="URL")
67
+ cmds = parser.add_subparsers(
68
+ dest="_cmd", help="libvirt helper to run", metavar="<cmd>", required=True,
69
+ )
70
+
71
+ cmd = cmds.add_parser(
72
+ "get-locks",
73
+ help="List all locks (signatures)",
74
+ )
75
+ cmd.add_argument("--expired", help="List also expired locks", action="store_true")
76
+
77
+ cmd = cmds.add_parser(
78
+ "unlock",
79
+ help="Remove a lock signature from a domain",
80
+ )
81
+ cmd.add_argument("domain", help="Domain name")
82
+ cmd.add_argument("signature", help="Lock signature UUID")
83
+
84
+ cmd = cmds.add_parser(
85
+ "lock",
86
+ help="Lock a domain (exit 0:success, 2:fail)",
87
+ )
88
+ cmd.add_argument("domain", help="Domain name")
89
+ cmd.add_argument("signature", help="Lock signature UUID")
90
+ cmd.add_argument("timestamp", help="Expiration time for the lock")
91
+
92
+ cmd = cmds.add_parser(
93
+ "unlock-all",
94
+ help="Remove all lock signatures from all domains",
95
+ )
96
+ cmd.add_argument("--signature", help="Only remove this UUID")
97
+ cmd.add_argument("--shutdown", help="Also destroy the domains", action="store_true")
98
+ cmd.add_argument("--domains", help="Which domains names to impact", metavar="REGEX")
99
+
100
+ cmd = cmds.add_parser(
101
+ "cleanup-expired",
102
+ help="Remove expired lock signatures from all domains",
103
+ )
104
+ cmd.add_argument("--timestamp", help="Check against this instead of UTC now()")
105
+ cmd.add_argument("--domains", help="Which domains names to impact", metavar="REGEX")
106
+
107
+
108
+ def main(args):
109
+ if args._cmd == "get-locks":
110
+ get_locks(args)
111
+ elif args._cmd == "unlock":
112
+ unlock(args)
113
+ elif args._cmd == "lock":
114
+ lock(args)
115
+ elif args._cmd == "unlock-all":
116
+ unlock_all(args)
117
+ elif args._cmd == "cleanup-expired":
118
+ cleanup_expired(args)
119
+ else:
120
+ raise RuntimeError(f"unknown args: {args}")
121
+
122
+
123
+ CLI_SPEC = {
124
+ "help": "various utils for the Libvirt provisioner",
125
+ "args": parse_args,
126
+ "main": main,
127
+ }
atex/cli/testingfarm.py CHANGED
@@ -87,12 +87,19 @@ def reserve(args):
87
87
  else:
88
88
  hardware = None
89
89
 
90
+ if args.native_test:
91
+ test = tf.DEFAULT_RESERVE_TEST.copy()
92
+ test["name"] = "/plans/testing-farm-native"
93
+ else:
94
+ test = None
95
+
90
96
  api = _get_api(args)
91
97
  res = tf.Reserve(
92
98
  compose=args.compose,
93
99
  arch=args.arch,
94
100
  timeout=args.timeout,
95
101
  hardware=hardware,
102
+ reserve_test=test,
96
103
  api=api,
97
104
  )
98
105
  with res as m:
@@ -200,6 +207,11 @@ def parse_args(parser):
200
207
  cmd.add_argument("--timeout", "-t", help="pipeline timeout (in minutes)", type=int, default=60)
201
208
  cmd.add_argument("--ssh-key", help="path to a ssh private key file like 'id_rsa'")
202
209
  cmd.add_argument("--hvm", help="request a HVM virtualization capable HW", action="store_true")
210
+ cmd.add_argument(
211
+ "--native-test",
212
+ help="use the default testing farm reserve test",
213
+ action="store_true",
214
+ )
203
215
 
204
216
  cmd = cmds.add_parser(
205
217
  "watch-pipeline", aliases=("wp",),
@@ -1,6 +1,5 @@
1
1
  import importlib as _importlib
2
2
  import pkgutil as _pkgutil
3
- import threading as _threading
4
3
 
5
4
  from .. import util as _util
6
5
 
@@ -34,18 +33,21 @@ class Connection:
34
33
  ie. disconnect() might be called from a different thread while connect()
35
34
  or cmd() are still running.
36
35
  Similarly, multiple threads may run cmd() or rsync() independently.
37
- """
38
36
 
39
- def __init__(self):
40
- """
41
- Initialize the connection instance.
42
- If extending __init__, always call 'super().__init__()' at the top.
43
- """
44
- self.lock = _threading.RLock()
37
+ TODO: document that any exceptions raised by a Connection should be children
38
+ of ConnectionError
39
+
40
+ If any connection-related error happens, a ConnectionError (or an exception
41
+ derived from it) must be raised.
42
+ """
45
43
 
46
44
  def __enter__(self):
47
- self.connect()
48
- return self
45
+ try:
46
+ self.connect()
47
+ return self
48
+ except Exception:
49
+ self.disconnect()
50
+ raise
49
51
 
50
52
  def __exit__(self, exc_type, exc_value, traceback):
51
53
  self.disconnect()
@@ -65,7 +67,7 @@ class Connection:
65
67
  """
66
68
  raise NotImplementedError(f"'disconnect' not implemented for {self.__class__.__name__}")
67
69
 
68
- def cmd(self, command, func=_util.subprocess_run, **func_args):
70
+ def cmd(self, command, *, func=_util.subprocess_run, **func_args):
69
71
  """
70
72
  Execute a single command on the remote, using subprocess-like semantics.
71
73
 
@@ -0,0 +1,63 @@
1
+ """
2
+ Connection API implementation using the 'podman' CLI client.
3
+ """
4
+
5
+ import subprocess
6
+
7
+ from .. import util
8
+ from . import Connection
9
+
10
+
11
+ class PodmanConnError(ConnectionError):
12
+ pass
13
+
14
+
15
+ class PodmanConn(Connection):
16
+ """
17
+ Implements the Connection API via 'podman container exec' on an
18
+ already-running container, it does not handle any image pulling,
19
+ container creation, starting or stopping.
20
+ """
21
+
22
+ # def __init__(self, container, *, user=None, workdir=None):
23
+ # """
24
+ # 'container' is a string with either the full or partial podman
25
+ # container ID, or a container name, as recognized by podman CLI.
26
+ #
27
+ # 'user' is a string with a username or UID, possibly including a GID,
28
+ # passed to the podman CLI as --user.
29
+ #
30
+ # 'workdir' is a string specifying the CWD inside the container.
31
+ # """
32
+ def __init__(self, container):
33
+ self.container = container
34
+
35
+ def connect(self, block=True):
36
+ pass
37
+
38
+ def disconnect(self):
39
+ pass
40
+
41
+ # have options as kwarg to be compatible with other functions here
42
+ def cmd(self, command, *, func=util.subprocess_run, **func_args):
43
+ return func(
44
+ ("podman", "container", "exec", "-i", self.container, *command),
45
+ skip_frames=1,
46
+ **func_args,
47
+ )
48
+
49
+ def rsync(self, *args, func=util.subprocess_run, **func_args):
50
+ return func(
51
+ (
52
+ "rsync",
53
+ # use shell to strip off the destination argument rsync passes
54
+ # cmd[0]=/bin/bash cmd[1]=-c cmd[2]=exec podman ... cmd[3]=destination
55
+ # cmd[4]=rsync cmd[5]=--server cmd[6]=-vve.LsfxCIvu cmd[7]=. cmd[8]=.
56
+ "-e", f"/bin/bash -c 'exec podman container exec -i {self.container} \"$@\"'",
57
+ *args,
58
+ ),
59
+ skip_frames=1,
60
+ check=True,
61
+ stdin=subprocess.DEVNULL,
62
+ **func_args,
63
+ )
atex/connection/ssh.py CHANGED
@@ -18,6 +18,7 @@ import os
18
18
  import time
19
19
  import shlex
20
20
  import tempfile
21
+ import threading
21
22
  import subprocess
22
23
  from pathlib import Path
23
24
 
@@ -38,7 +39,7 @@ DEFAULT_OPTIONS = {
38
39
  }
39
40
 
40
41
 
41
- class SSHError(Exception):
42
+ class SSHError(ConnectionError):
42
43
  pass
43
44
 
44
45
 
@@ -154,7 +155,6 @@ class StatelessSSHConn(Connection):
154
155
  If 'sudo' specifies a username, call sudo(8) on the remote shell
155
156
  to run under a different user on the remote host.
156
157
  """
157
- super().__init__()
158
158
  self.options = DEFAULT_OPTIONS.copy()
159
159
  self.options.update(options)
160
160
  self.password = password
@@ -162,7 +162,7 @@ class StatelessSSHConn(Connection):
162
162
  self._tmpdir = None
163
163
  self._master_proc = None
164
164
 
165
- def connect(self):
165
+ def connect(self, block=True):
166
166
  """
167
167
  Optional, .cmd() and .rsync() work without it, but it is provided here
168
168
  for compatibility with the Connection API.
@@ -178,7 +178,8 @@ class StatelessSSHConn(Connection):
178
178
  unified_options = self.options.copy()
179
179
  if options:
180
180
  unified_options.update(options)
181
- unified_options["RemoteCommand"] = _shell_cmd(command, sudo=self.sudo)
181
+ if command:
182
+ unified_options["RemoteCommand"] = _shell_cmd(command, sudo=self.sudo)
182
183
  return func(
183
184
  _options_to_ssh(unified_options, password=self.password),
184
185
  skip_frames=1,
@@ -240,7 +241,7 @@ class ManagedSSHConn(Connection):
240
241
  If 'sudo' specifies a username, call sudo(8) on the remote shell
241
242
  to run under a different user on the remote host.
242
243
  """
243
- super().__init__()
244
+ self.lock = threading.RLock()
244
245
  self.options = DEFAULT_OPTIONS.copy()
245
246
  self.options.update(options)
246
247
  self.password = password
@@ -267,8 +268,9 @@ class ManagedSSHConn(Connection):
267
268
  proc = self._master_proc
268
269
  if not proc:
269
270
  return
271
+ util.debug(f"disconnecting: {self.options}")
270
272
  proc.kill()
271
- # don"t zombie forever, return EPIPE on any attempts to write to us
273
+ # don't zombie forever, return EPIPE on any attempts to write to us
272
274
  proc.stdout.close()
273
275
  proc.wait()
274
276
  (self._tmpdir / "control.sock").unlink(missing_ok=True)
@@ -286,19 +288,26 @@ class ManagedSSHConn(Connection):
286
288
  sock = self._tmpdir / "control.sock"
287
289
 
288
290
  if not self._master_proc:
291
+ util.debug(f"connecting: {self.options}")
289
292
  options = self.options.copy()
290
293
  options["SessionType"] = "none"
291
294
  options["ControlMaster"] = "yes"
292
295
  options["ControlPath"] = sock
293
296
  self._master_proc = util.subprocess_Popen(
294
- _options_to_ssh(options),
297
+ _options_to_ssh(options, password=self.password),
295
298
  stdin=subprocess.DEVNULL,
296
299
  stdout=subprocess.PIPE,
297
300
  stderr=subprocess.STDOUT,
298
301
  cwd=str(self._tmpdir),
302
+ start_new_session=True, # resist Ctrl-C
299
303
  )
300
304
  os.set_blocking(self._master_proc.stdout.fileno(), False)
301
305
 
306
+ # NOTE: ideally, we would .read() before checking .poll() because
307
+ # if the process writes a lot, it gets stuck in the pipe
308
+ # (in kernel) and the process never ends; but output-appending
309
+ # code would be obscure, and ssh(1) never outputs that much ..
310
+
302
311
  proc = self._master_proc
303
312
  if block:
304
313
  while proc.poll() is None:
@@ -309,7 +318,6 @@ class ManagedSSHConn(Connection):
309
318
  code = proc.poll()
310
319
  out = proc.stdout.read()
311
320
  self._master_proc = None
312
- # TODO: ConnectError should probably be generalized for Connection
313
321
  raise ConnectError(
314
322
  f"SSH ControlMaster failed to start on {self._tmpdir} with {code}:\n{out}",
315
323
  )
@@ -318,54 +326,45 @@ class ManagedSSHConn(Connection):
318
326
  if code is not None:
319
327
  out = proc.stdout.read()
320
328
  self._master_proc = None
321
- # TODO: ConnectError should probably be generalized for Connection
322
329
  raise ConnectError(
323
330
  f"SSH ControlMaster failed to start on {self._tmpdir} with {code}:\n{out}",
324
331
  )
325
332
  elif not sock.exists():
326
333
  raise BlockingIOError("SSH ControlMaster not yet ready")
327
334
 
328
- def add_local_forward(self, *spec):
335
+ def forward(self, forward_type, *spec, cancel=False):
329
336
  """
330
337
  Add (one or more) ssh forwarding specifications as 'spec' to an
331
338
  already-connected instance. Each specification has to follow the
332
- format of ssh client's LocalForward option (see ssh_config(5)).
333
- """
334
- self.assert_master()
335
- options = self.options.copy()
336
- options["LocalForward"] = spec
337
- options["ControlPath"] = self._tmpdir / "control.sock"
338
- util.subprocess_run(
339
- _options_to_ssh(options, extra_cli_flags=("-O", "forward")),
340
- skip_frames=1,
341
- check=True,
342
- )
339
+ format of LocalForward or RemoteForward (see ssh_config(5)).
340
+ Ie. "1234 1.2.3.4:22" or "0.0.0.0:1234 1.2.3.4:22".
343
341
 
344
- def add_remote_forward(self, *spec):
345
- """
346
- Add (one or more) ssh forwarding specifications as 'spec' to an
347
- already-connected instance. Each specification has to follow the
348
- format of ssh client's RemoteForward option (see ssh_config(5)).
342
+ 'forward_type' must be either LocalForward or RemoteForward.
343
+
344
+ If 'cancel' is True, cancel the forwarding instead of adding it.
349
345
  """
346
+ assert forward_type in ("LocalForward", "RemoteForward")
350
347
  self.assert_master()
351
- options = self.options.copy()
352
- options["RemoteForward"] = spec
348
+ options = DEFAULT_OPTIONS.copy()
349
+ options[forward_type] = spec
353
350
  options["ControlPath"] = self._tmpdir / "control.sock"
351
+ action = "forward" if not cancel else "cancel"
354
352
  util.subprocess_run(
355
- _options_to_ssh(options, extra_cli_flags=("-O", "forward")),
353
+ _options_to_ssh(options, extra_cli_flags=("-O", action)),
356
354
  skip_frames=1,
357
355
  check=True,
358
356
  )
359
357
 
360
- def cmd(self, command, options=None, func=util.subprocess_run, **func_args):
358
+ def cmd(self, command, *, options=None, func=util.subprocess_run, **func_args):
361
359
  self.assert_master()
362
360
  unified_options = self.options.copy()
363
361
  if options:
364
362
  unified_options.update(options)
365
- unified_options["RemoteCommand"] = _shell_cmd(command, sudo=self.sudo)
363
+ if command:
364
+ unified_options["RemoteCommand"] = _shell_cmd(command, sudo=self.sudo)
366
365
  unified_options["ControlPath"] = self._tmpdir / "control.sock"
367
366
  return func(
368
- _options_to_ssh(unified_options, password=self.password),
367
+ _options_to_ssh(unified_options),
369
368
  skip_frames=1,
370
369
  **func_args,
371
370
  )
@@ -380,7 +379,6 @@ class ManagedSSHConn(Connection):
380
379
  _rsync_host_cmd(
381
380
  *args,
382
381
  options=unified_options,
383
- password=self.password,
384
382
  sudo=self.sudo,
385
383
  ),
386
384
  skip_frames=1,