atex 0.8__py3-none-any.whl → 0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex/aggregator/__init__.py +60 -0
- atex/aggregator/json.py +96 -0
- atex/cli/__init__.py +11 -1
- atex/cli/fmf.py +73 -23
- atex/cli/libvirt.py +128 -0
- atex/cli/testingfarm.py +60 -3
- atex/connection/__init__.py +13 -11
- atex/connection/podman.py +61 -0
- atex/connection/ssh.py +38 -47
- atex/executor/executor.py +144 -119
- atex/executor/reporter.py +66 -71
- atex/executor/scripts.py +13 -5
- atex/executor/testcontrol.py +43 -30
- atex/fmf.py +94 -74
- atex/orchestrator/__init__.py +76 -2
- atex/orchestrator/adhoc.py +465 -0
- atex/{provision → provisioner}/__init__.py +54 -42
- atex/provisioner/libvirt/__init__.py +2 -0
- atex/provisioner/libvirt/libvirt.py +472 -0
- atex/provisioner/libvirt/locking.py +170 -0
- atex/{provision → provisioner}/libvirt/setup-libvirt.sh +21 -1
- atex/provisioner/podman/__init__.py +2 -0
- atex/provisioner/podman/podman.py +169 -0
- atex/{provision → provisioner}/testingfarm/api.py +121 -69
- atex/{provision → provisioner}/testingfarm/testingfarm.py +44 -52
- atex/util/libvirt.py +18 -0
- atex/util/log.py +53 -43
- atex/util/named_mapping.py +158 -0
- atex/util/subprocess.py +46 -12
- atex/util/threads.py +71 -20
- atex-0.10.dist-info/METADATA +86 -0
- atex-0.10.dist-info/RECORD +44 -0
- atex/orchestrator/aggregator.py +0 -106
- atex/orchestrator/orchestrator.py +0 -324
- atex/provision/libvirt/__init__.py +0 -24
- atex/provision/podman/README +0 -59
- atex/provision/podman/host_container.sh +0 -74
- atex-0.8.dist-info/METADATA +0 -197
- atex-0.8.dist-info/RECORD +0 -37
- /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
- /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import importlib as _importlib
|
|
2
|
+
import pkgutil as _pkgutil
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Aggregator:
|
|
6
|
+
"""
|
|
7
|
+
TODO: generic description, not JSON-specific
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def ingest(self, platform, test_name, results_file, files_dir):
|
|
11
|
+
"""
|
|
12
|
+
Process 'results_file' (string/Path) for reported results and append
|
|
13
|
+
them to the overall aggregated line-JSON file, recursively copying over
|
|
14
|
+
the dir structure under 'files_dir' (string/Path) under the respective
|
|
15
|
+
platform and test name in the aggregated storage dir.
|
|
16
|
+
"""
|
|
17
|
+
raise NotImplementedError(f"'ingest' not implemented for {self.__class__.__name__}")
|
|
18
|
+
|
|
19
|
+
def start(self):
|
|
20
|
+
"""
|
|
21
|
+
Start the Aggregator instance, opening any files / allocating resources
|
|
22
|
+
as necessary.
|
|
23
|
+
"""
|
|
24
|
+
raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
|
|
25
|
+
|
|
26
|
+
def stop(self):
|
|
27
|
+
"""
|
|
28
|
+
Stop the Aggregator instance, freeing all allocated resources.
|
|
29
|
+
"""
|
|
30
|
+
raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
|
|
31
|
+
|
|
32
|
+
def __enter__(self):
|
|
33
|
+
try:
|
|
34
|
+
self.start()
|
|
35
|
+
return self
|
|
36
|
+
except Exception:
|
|
37
|
+
self.close()
|
|
38
|
+
raise
|
|
39
|
+
|
|
40
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
41
|
+
self.stop()
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
_submodules = [
|
|
45
|
+
info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
__all__ = [*_submodules, Aggregator.__name__] # noqa: PLE0604
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def __dir__():
|
|
52
|
+
return __all__
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# lazily import submodules
|
|
56
|
+
def __getattr__(attr):
|
|
57
|
+
if attr in _submodules:
|
|
58
|
+
return _importlib.import_module(f".{attr}", __name__)
|
|
59
|
+
else:
|
|
60
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")
|
atex/aggregator/json.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import gzip
|
|
2
|
+
import json
|
|
3
|
+
import shutil
|
|
4
|
+
import threading
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from . import Aggregator
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class JSONAggregator(Aggregator):
|
|
11
|
+
"""
|
|
12
|
+
Collects reported results as a GZIP-ed line-JSON and files (logs) from
|
|
13
|
+
multiple test runs under a shared directory.
|
|
14
|
+
|
|
15
|
+
Note that the aggregated JSON file *does not* use the test-based JSON format
|
|
16
|
+
described by executor/RESULTS.md - both use JSON, but are very different.
|
|
17
|
+
|
|
18
|
+
This aggergated format uses a top-level array (on each line) with a fixed
|
|
19
|
+
field order:
|
|
20
|
+
|
|
21
|
+
platform, status, test name, subtest name, files, note
|
|
22
|
+
|
|
23
|
+
All these are strings except 'files', which is another (nested) array
|
|
24
|
+
of strings.
|
|
25
|
+
|
|
26
|
+
If a field is missing in the source result, it is translated to a null
|
|
27
|
+
value.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, json_file, storage_dir):
|
|
31
|
+
"""
|
|
32
|
+
'json_file' is a string/Path to a .json.gz file with aggregated results.
|
|
33
|
+
|
|
34
|
+
'storage_dir' is a string/Path of the top-level parent for all
|
|
35
|
+
per-platform / per-test files uploaded by tests.
|
|
36
|
+
"""
|
|
37
|
+
self.lock = threading.RLock()
|
|
38
|
+
self.storage_dir = Path(storage_dir)
|
|
39
|
+
self.json_file = Path(json_file)
|
|
40
|
+
self.json_gzip_fobj = None
|
|
41
|
+
|
|
42
|
+
def start(self):
|
|
43
|
+
if self.json_file.exists():
|
|
44
|
+
raise FileExistsError(f"{self.json_file} already exists")
|
|
45
|
+
self.json_gzip_fobj = gzip.open(self.json_file, "wt", newline="\n")
|
|
46
|
+
|
|
47
|
+
if self.storage_dir.exists():
|
|
48
|
+
raise FileExistsError(f"{self.storage_dir} already exists")
|
|
49
|
+
self.storage_dir.mkdir()
|
|
50
|
+
|
|
51
|
+
def stop(self):
|
|
52
|
+
if self.json_gzip_fobj:
|
|
53
|
+
self.json_gzip_fobj.close()
|
|
54
|
+
self.json_gzip_fobj = None
|
|
55
|
+
|
|
56
|
+
def ingest(self, platform, test_name, results_file, files_dir):
|
|
57
|
+
platform_dir = self.storage_dir / platform
|
|
58
|
+
test_dir = platform_dir / test_name.lstrip("/")
|
|
59
|
+
if test_dir.exists():
|
|
60
|
+
raise FileExistsError(f"{test_dir} already exists for {test_name}")
|
|
61
|
+
|
|
62
|
+
# parse the results separately, before writing any aggregated output,
|
|
63
|
+
# to ensure that either all results from the test are ingested, or none
|
|
64
|
+
# at all (ie. if one of the result lines contains JSON errors)
|
|
65
|
+
output_lines = []
|
|
66
|
+
with open(results_file) as results_fobj:
|
|
67
|
+
for raw_line in results_fobj:
|
|
68
|
+
result_line = json.loads(raw_line)
|
|
69
|
+
|
|
70
|
+
file_names = []
|
|
71
|
+
if "testout" in result_line:
|
|
72
|
+
file_names.append(result_line["testout"])
|
|
73
|
+
if "files" in result_line:
|
|
74
|
+
file_names += (f["name"] for f in result_line["files"])
|
|
75
|
+
|
|
76
|
+
output_line = (
|
|
77
|
+
platform,
|
|
78
|
+
result_line["status"],
|
|
79
|
+
test_name,
|
|
80
|
+
result_line.get("name"), # subtest
|
|
81
|
+
file_names,
|
|
82
|
+
result_line.get("note"),
|
|
83
|
+
)
|
|
84
|
+
encoded = json.dumps(output_line, indent=None)
|
|
85
|
+
output_lines.append(encoded)
|
|
86
|
+
|
|
87
|
+
output_str = "\n".join(output_lines) + "\n"
|
|
88
|
+
|
|
89
|
+
with self.lock:
|
|
90
|
+
self.json_gzip_fobj.write(output_str)
|
|
91
|
+
self.json_gzip_fobj.flush()
|
|
92
|
+
|
|
93
|
+
Path(results_file).unlink()
|
|
94
|
+
|
|
95
|
+
platform_dir.mkdir(exist_ok=True)
|
|
96
|
+
shutil.move(files_dir, test_dir)
|
atex/cli/__init__.py
CHANGED
|
@@ -27,12 +27,18 @@ import pkgutil
|
|
|
27
27
|
import argparse
|
|
28
28
|
import logging
|
|
29
29
|
|
|
30
|
+
from .. import util
|
|
31
|
+
|
|
30
32
|
|
|
31
33
|
def setup_logging(level):
|
|
34
|
+
if level <= util.EXTRADEBUG:
|
|
35
|
+
fmt = "%(asctime)s %(name)s: %(filename)s:%(lineno)s: %(funcName)s(): %(message)s"
|
|
36
|
+
else:
|
|
37
|
+
fmt = "%(asctime)s %(name)s: %(message)s"
|
|
32
38
|
logging.basicConfig(
|
|
33
39
|
level=level,
|
|
34
40
|
stream=sys.stderr,
|
|
35
|
-
format=
|
|
41
|
+
format=fmt,
|
|
36
42
|
datefmt="%Y-%m-%d %H:%M:%S",
|
|
37
43
|
)
|
|
38
44
|
|
|
@@ -53,6 +59,10 @@ def main():
|
|
|
53
59
|
"--debug", "-d", action="store_const", dest="loglevel", const=logging.DEBUG,
|
|
54
60
|
help="enable extra debugging (logging.DEBUG)",
|
|
55
61
|
)
|
|
62
|
+
log_grp.add_argument(
|
|
63
|
+
"--extra-debug", "-D", action="store_const", dest="loglevel", const=util.EXTRADEBUG,
|
|
64
|
+
help="enable extra debugging (atex.util.EXTRADEBUG)",
|
|
65
|
+
)
|
|
56
66
|
log_grp.add_argument(
|
|
57
67
|
"--quiet", "-q", action="store_const", dest="loglevel", const=logging.WARNING,
|
|
58
68
|
help="be quiet during normal operation (logging.WARNING)",
|
atex/cli/fmf.py
CHANGED
|
@@ -18,24 +18,42 @@ def _get_context(args):
|
|
|
18
18
|
return context or None
|
|
19
19
|
|
|
20
20
|
|
|
21
|
+
def make_fmftests(args):
|
|
22
|
+
return fmf.FMFTests(
|
|
23
|
+
args.root,
|
|
24
|
+
args.plan,
|
|
25
|
+
names=args.test or None,
|
|
26
|
+
filters=args.filter or None,
|
|
27
|
+
conditions=args.condition or None,
|
|
28
|
+
excludes=args.exclude or None,
|
|
29
|
+
context=_get_context(args),
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def requires(args):
|
|
34
|
+
result = make_fmftests(args)
|
|
35
|
+
all_pkgs = set()
|
|
36
|
+
all_pkgs.update(fmf.all_pkg_requires(result, key="require"))
|
|
37
|
+
all_pkgs.update(fmf.all_pkg_requires(result, key="recommend"))
|
|
38
|
+
for pkg in sorted(all_pkgs):
|
|
39
|
+
print(pkg)
|
|
40
|
+
|
|
41
|
+
|
|
21
42
|
def discover(args):
|
|
22
|
-
result =
|
|
43
|
+
result = make_fmftests(args)
|
|
23
44
|
for name in result.tests:
|
|
24
45
|
print(name)
|
|
25
46
|
|
|
26
47
|
|
|
27
48
|
def show(args):
|
|
28
|
-
result =
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
pprint.pprint(test.data)
|
|
33
|
-
else:
|
|
34
|
-
_fatal(f"Not reachable via {args.plan} discovery: {args.test}")
|
|
49
|
+
result = make_fmftests(args)
|
|
50
|
+
for name, data in result.tests.items():
|
|
51
|
+
print(f"\n--- {name} ---")
|
|
52
|
+
pprint.pprint(data)
|
|
35
53
|
|
|
36
54
|
|
|
37
55
|
def prepare(args):
|
|
38
|
-
result =
|
|
56
|
+
result = make_fmftests(args)
|
|
39
57
|
print("--- fmf root ---")
|
|
40
58
|
print(str(result.root))
|
|
41
59
|
print("--- prepare packages ---")
|
|
@@ -46,37 +64,69 @@ def prepare(args):
|
|
|
46
64
|
print("--- prepare script ---")
|
|
47
65
|
print(script)
|
|
48
66
|
print("----------------------")
|
|
67
|
+
for script in result.finish_scripts:
|
|
68
|
+
print("--- finish script ---")
|
|
69
|
+
print(script)
|
|
70
|
+
print("----------------------")
|
|
49
71
|
|
|
50
72
|
|
|
51
|
-
def
|
|
73
|
+
def add_fmf_options(parser):
|
|
52
74
|
parser.add_argument("--root", help="path to directory with fmf tests", default=".")
|
|
53
|
-
parser.add_argument("--
|
|
75
|
+
parser.add_argument("--plan", help="plan name (defaults to dummy plan)")
|
|
76
|
+
parser.add_argument(
|
|
77
|
+
"--test", "-t", help="test name regex (replacing 'test' from plan)",
|
|
78
|
+
action="append",
|
|
79
|
+
)
|
|
80
|
+
parser.add_argument(
|
|
81
|
+
"--exclude", help="test name regex (replacing 'exclude' from plan)",
|
|
82
|
+
action="append",
|
|
83
|
+
)
|
|
84
|
+
parser.add_argument(
|
|
85
|
+
"--condition", help="fmf-style python condition",
|
|
86
|
+
action="append",
|
|
87
|
+
)
|
|
88
|
+
parser.add_argument(
|
|
89
|
+
"--filter", help="fmf-style expression filter (replacing 'filter' from plan)",
|
|
90
|
+
action="append",
|
|
91
|
+
)
|
|
92
|
+
parser.add_argument(
|
|
93
|
+
"--context", "-c", help="tmt style key=value context",
|
|
94
|
+
action="append",
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def parse_args(parser):
|
|
99
|
+
add_fmf_options(parser)
|
|
100
|
+
|
|
54
101
|
cmds = parser.add_subparsers(
|
|
55
|
-
dest="_cmd", help="
|
|
102
|
+
dest="_cmd", help="fmf feature", metavar="<cmd>", required=True,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
cmds.add_parser(
|
|
106
|
+
"requires", aliases=("req",),
|
|
107
|
+
help="list requires/recommends of the plan and its tests",
|
|
56
108
|
)
|
|
57
109
|
|
|
58
|
-
|
|
110
|
+
cmds.add_parser(
|
|
59
111
|
"discover", aliases=("di",),
|
|
60
|
-
help="list tests, post-processed by tmt
|
|
112
|
+
help="list tests, possibly post-processed by a tmt plan",
|
|
61
113
|
)
|
|
62
|
-
cmd.add_argument("plan", help="tmt plan to use for discovery")
|
|
63
114
|
|
|
64
|
-
|
|
115
|
+
cmds.add_parser(
|
|
65
116
|
"show",
|
|
66
|
-
help="show fmf
|
|
117
|
+
help="show fmf metadata of test(s)",
|
|
67
118
|
)
|
|
68
|
-
cmd.add_argument("plan", help="tmt plan to use for discovery")
|
|
69
|
-
cmd.add_argument("test", help="fmf style test regex")
|
|
70
119
|
|
|
71
|
-
|
|
120
|
+
cmds.add_parser(
|
|
72
121
|
"prepare",
|
|
73
|
-
help="show prepare-related
|
|
122
|
+
help="show prepare-related details from a plan",
|
|
74
123
|
)
|
|
75
|
-
cmd.add_argument("plan", help="tmt plan to parse")
|
|
76
124
|
|
|
77
125
|
|
|
78
126
|
def main(args):
|
|
79
|
-
if args._cmd in ("
|
|
127
|
+
if args._cmd in ("requires", "req"):
|
|
128
|
+
requires(args)
|
|
129
|
+
elif args._cmd in ("discover", "di"):
|
|
80
130
|
discover(args)
|
|
81
131
|
elif args._cmd == "show":
|
|
82
132
|
show(args)
|
atex/cli/libvirt.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
from .. import util
|
|
5
|
+
from ..provisioner.libvirt import locking
|
|
6
|
+
|
|
7
|
+
libvirt = util.import_libvirt()
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _libvirt_open(url=None):
|
|
11
|
+
# pass no arguments if url is None
|
|
12
|
+
conn = libvirt.open(*((url,) if url else ()))
|
|
13
|
+
print(f"Connected to {conn.getHostname()} via {conn.getURI()}\n")
|
|
14
|
+
return conn
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def get_locks(args):
|
|
18
|
+
conn = _libvirt_open(args.connect)
|
|
19
|
+
domains = conn.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
|
|
20
|
+
for domain in sorted(domains, key=lambda d: d.name()):
|
|
21
|
+
print(f"{domain.name()}:")
|
|
22
|
+
for sig, stamp in locking.get_locks(domain, expired=args.expired):
|
|
23
|
+
print(f" {sig} {stamp}")
|
|
24
|
+
print()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def unlock(args):
|
|
28
|
+
conn = _libvirt_open(args.connect)
|
|
29
|
+
dom = conn.lookupByName(args.domain)
|
|
30
|
+
locking.unlock(dom, args.signature)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def lock(args):
|
|
34
|
+
conn = _libvirt_open(args.connect)
|
|
35
|
+
dom = conn.lookupByName(args.domain)
|
|
36
|
+
if locking.lock(dom, args.signature, args.timestamp):
|
|
37
|
+
print("Succeeded.")
|
|
38
|
+
sys.exit(0)
|
|
39
|
+
else:
|
|
40
|
+
print("Failed (already locked).")
|
|
41
|
+
sys.exit(2)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def unlock_all(args):
|
|
45
|
+
conn = _libvirt_open(args.connect)
|
|
46
|
+
if args.domains:
|
|
47
|
+
def domains(dom):
|
|
48
|
+
return bool(re.fullmatch(args.domains, dom.name()))
|
|
49
|
+
else:
|
|
50
|
+
def domains(_):
|
|
51
|
+
return True
|
|
52
|
+
locking.unlock_all(conn, args.signature, args.shutdown, domains)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def cleanup_expired(args):
|
|
56
|
+
conn = _libvirt_open(args.connect)
|
|
57
|
+
if args.domains:
|
|
58
|
+
def domains(dom):
|
|
59
|
+
return bool(re.fullmatch(args.domains, dom.name()))
|
|
60
|
+
else:
|
|
61
|
+
def domains(_):
|
|
62
|
+
return True
|
|
63
|
+
locking.cleanup_expired(conn, args.timestamp, domains)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def parse_args(parser):
|
|
67
|
+
parser.add_argument("--connect", "-c", help="Libvirt URL to connect to", metavar="URL")
|
|
68
|
+
cmds = parser.add_subparsers(
|
|
69
|
+
dest="_cmd", help="libvirt helper to run", metavar="<cmd>", required=True,
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
cmd = cmds.add_parser(
|
|
73
|
+
"get-locks",
|
|
74
|
+
help="List all locks (signatures)",
|
|
75
|
+
)
|
|
76
|
+
cmd.add_argument("--expired", help="List also expired locks", action="store_true")
|
|
77
|
+
|
|
78
|
+
cmd = cmds.add_parser(
|
|
79
|
+
"unlock",
|
|
80
|
+
help="Remove a lock signature from a domain",
|
|
81
|
+
)
|
|
82
|
+
cmd.add_argument("domain", help="Domain name")
|
|
83
|
+
cmd.add_argument("signature", help="Lock signature UUID")
|
|
84
|
+
|
|
85
|
+
cmd = cmds.add_parser(
|
|
86
|
+
"lock",
|
|
87
|
+
help="Lock a domain (exit 0:success, 2:fail)",
|
|
88
|
+
)
|
|
89
|
+
cmd.add_argument("domain", help="Domain name")
|
|
90
|
+
cmd.add_argument("signature", help="Lock signature UUID")
|
|
91
|
+
cmd.add_argument("timestamp", help="Expiration time for the lock")
|
|
92
|
+
|
|
93
|
+
cmd = cmds.add_parser(
|
|
94
|
+
"unlock-all",
|
|
95
|
+
help="Remove all lock signatures from all domains",
|
|
96
|
+
)
|
|
97
|
+
cmd.add_argument("--signature", help="Only remove this UUID")
|
|
98
|
+
cmd.add_argument("--shutdown", help="Also destroy the domains", action="store_true")
|
|
99
|
+
cmd.add_argument("--domains", help="Which domains names to impact", metavar="REGEX")
|
|
100
|
+
|
|
101
|
+
cmd = cmds.add_parser(
|
|
102
|
+
"cleanup-expired",
|
|
103
|
+
help="Remove expired lock signatures from all domains",
|
|
104
|
+
)
|
|
105
|
+
cmd.add_argument("--timestamp", help="Check against this instead of UTC now()")
|
|
106
|
+
cmd.add_argument("--domains", help="Which domains names to impact", metavar="REGEX")
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def main(args):
|
|
110
|
+
if args._cmd == "get-locks":
|
|
111
|
+
get_locks(args)
|
|
112
|
+
elif args._cmd == "unlock":
|
|
113
|
+
unlock(args)
|
|
114
|
+
elif args._cmd == "lock":
|
|
115
|
+
lock(args)
|
|
116
|
+
elif args._cmd == "unlock-all":
|
|
117
|
+
unlock_all(args)
|
|
118
|
+
elif args._cmd == "cleanup-expired":
|
|
119
|
+
cleanup_expired(args)
|
|
120
|
+
else:
|
|
121
|
+
raise RuntimeError(f"unknown args: {args}")
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
CLI_SPEC = {
|
|
125
|
+
"help": "various utils for the Libvirt provisioner",
|
|
126
|
+
"args": parse_args,
|
|
127
|
+
"main": main,
|
|
128
|
+
}
|
atex/cli/testingfarm.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import sys
|
|
2
2
|
import json
|
|
3
3
|
import pprint
|
|
4
|
+
import collections
|
|
4
5
|
|
|
5
6
|
from .. import util
|
|
6
|
-
from ..
|
|
7
|
+
from ..provisioner.testingfarm import api as tf
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
def _get_api(args):
|
|
@@ -36,7 +37,6 @@ def composes(args):
|
|
|
36
37
|
def get_request(args):
|
|
37
38
|
api = _get_api(args)
|
|
38
39
|
request = tf.Request(args.request_id, api=api)
|
|
39
|
-
request.update()
|
|
40
40
|
print(str(request))
|
|
41
41
|
|
|
42
42
|
|
|
@@ -79,6 +79,44 @@ def search_requests(args):
|
|
|
79
79
|
print(f"{created} {req_id} : {envs_str}")
|
|
80
80
|
|
|
81
81
|
|
|
82
|
+
def stats(args):
|
|
83
|
+
api = _get_api(args)
|
|
84
|
+
|
|
85
|
+
def top_users_repos(requests):
|
|
86
|
+
tokens = collections.defaultdict(int)
|
|
87
|
+
repos = collections.defaultdict(int)
|
|
88
|
+
for req in requests:
|
|
89
|
+
tokens[req["token_id"]] += 1
|
|
90
|
+
if "fmf" in req["test"] and req["test"]["fmf"]:
|
|
91
|
+
repos[req["test"]["fmf"]["url"]] += 1
|
|
92
|
+
elif "tmt" in req["test"] and req["test"]["tmt"]:
|
|
93
|
+
repos[req["test"]["tmt"]["url"]] += 1
|
|
94
|
+
|
|
95
|
+
print("Top 10 token IDs:")
|
|
96
|
+
for token_id in sorted(tokens, key=lambda x: tokens[x], reverse=True)[:10]:
|
|
97
|
+
count = tokens[token_id]
|
|
98
|
+
print(f"{count:>5} {token_id}")
|
|
99
|
+
|
|
100
|
+
print("Top 10 repo URLs:")
|
|
101
|
+
for repo_url in sorted(repos, key=lambda x: repos[x], reverse=True)[:10]:
|
|
102
|
+
count = repos[repo_url]
|
|
103
|
+
print(f"{count:>5} {repo_url}")
|
|
104
|
+
|
|
105
|
+
def chain_without_none(*iterables):
|
|
106
|
+
for itr in iterables:
|
|
107
|
+
if itr is None:
|
|
108
|
+
continue
|
|
109
|
+
for item in itr:
|
|
110
|
+
if item is not None:
|
|
111
|
+
yield item
|
|
112
|
+
|
|
113
|
+
queued_and_running = chain_without_none(
|
|
114
|
+
api.search_requests(state="queued", ranch=args.ranch, mine=False),
|
|
115
|
+
api.search_requests(state="running", ranch=args.ranch, mine=False),
|
|
116
|
+
)
|
|
117
|
+
top_users_repos(queued_and_running)
|
|
118
|
+
|
|
119
|
+
|
|
82
120
|
def reserve(args):
|
|
83
121
|
util.info(f"Reserving {args.compose} on {args.arch} for {args.timeout} minutes")
|
|
84
122
|
|
|
@@ -87,19 +125,25 @@ def reserve(args):
|
|
|
87
125
|
else:
|
|
88
126
|
hardware = None
|
|
89
127
|
|
|
128
|
+
if args.native_test:
|
|
129
|
+
test = tf.DEFAULT_RESERVE_TEST.copy()
|
|
130
|
+
test["name"] = "/plans/testing-farm-native"
|
|
131
|
+
else:
|
|
132
|
+
test = None
|
|
133
|
+
|
|
90
134
|
api = _get_api(args)
|
|
91
135
|
res = tf.Reserve(
|
|
92
136
|
compose=args.compose,
|
|
93
137
|
arch=args.arch,
|
|
94
138
|
timeout=args.timeout,
|
|
95
139
|
hardware=hardware,
|
|
140
|
+
reserve_test=test,
|
|
96
141
|
api=api,
|
|
97
142
|
)
|
|
98
143
|
with res as m:
|
|
99
144
|
util.info(f"Got machine: {m}")
|
|
100
145
|
while True:
|
|
101
146
|
try:
|
|
102
|
-
res.request.update()
|
|
103
147
|
res.request.assert_alive()
|
|
104
148
|
except tf.GoneAwayError as e:
|
|
105
149
|
print(e)
|
|
@@ -191,6 +235,12 @@ def parse_args(parser):
|
|
|
191
235
|
cmd.add_argument("--after", help="only requests created after ISO8601")
|
|
192
236
|
cmd.add_argument("--json", help="full details, one request per line", action="store_true")
|
|
193
237
|
|
|
238
|
+
cmd = cmds.add_parser(
|
|
239
|
+
"stats",
|
|
240
|
+
help="print out TF usage statistics",
|
|
241
|
+
)
|
|
242
|
+
cmd.add_argument("ranch", help="Testing Farm ranch name")
|
|
243
|
+
|
|
194
244
|
cmd = cmds.add_parser(
|
|
195
245
|
"reserve",
|
|
196
246
|
help="reserve a system and ssh into it",
|
|
@@ -200,6 +250,11 @@ def parse_args(parser):
|
|
|
200
250
|
cmd.add_argument("--timeout", "-t", help="pipeline timeout (in minutes)", type=int, default=60)
|
|
201
251
|
cmd.add_argument("--ssh-key", help="path to a ssh private key file like 'id_rsa'")
|
|
202
252
|
cmd.add_argument("--hvm", help="request a HVM virtualization capable HW", action="store_true")
|
|
253
|
+
cmd.add_argument(
|
|
254
|
+
"--native-test",
|
|
255
|
+
help="use the default testing farm reserve test",
|
|
256
|
+
action="store_true",
|
|
257
|
+
)
|
|
203
258
|
|
|
204
259
|
cmd = cmds.add_parser(
|
|
205
260
|
"watch-pipeline", aliases=("wp",),
|
|
@@ -221,6 +276,8 @@ def main(args):
|
|
|
221
276
|
cancel(args)
|
|
222
277
|
elif args._cmd in ("search-requests", "sr"):
|
|
223
278
|
search_requests(args)
|
|
279
|
+
elif args._cmd == "stats":
|
|
280
|
+
stats(args)
|
|
224
281
|
elif args._cmd == "reserve":
|
|
225
282
|
reserve(args)
|
|
226
283
|
elif args._cmd in ("watch-pipeline", "wp"):
|
atex/connection/__init__.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import importlib as _importlib
|
|
2
2
|
import pkgutil as _pkgutil
|
|
3
|
-
import threading as _threading
|
|
4
3
|
|
|
5
4
|
from .. import util as _util
|
|
6
5
|
|
|
@@ -34,18 +33,21 @@ class Connection:
|
|
|
34
33
|
ie. disconnect() might be called from a different thread while connect()
|
|
35
34
|
or cmd() are still running.
|
|
36
35
|
Similarly, multiple threads may run cmd() or rsync() independently.
|
|
37
|
-
"""
|
|
38
36
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
37
|
+
TODO: document that any exceptions raised by a Connection should be children
|
|
38
|
+
of ConnectionError
|
|
39
|
+
|
|
40
|
+
If any connection-related error happens, a ConnectionError (or an exception
|
|
41
|
+
derived from it) must be raised.
|
|
42
|
+
"""
|
|
45
43
|
|
|
46
44
|
def __enter__(self):
|
|
47
|
-
|
|
48
|
-
|
|
45
|
+
try:
|
|
46
|
+
self.connect()
|
|
47
|
+
return self
|
|
48
|
+
except Exception:
|
|
49
|
+
self.disconnect()
|
|
50
|
+
raise
|
|
49
51
|
|
|
50
52
|
def __exit__(self, exc_type, exc_value, traceback):
|
|
51
53
|
self.disconnect()
|
|
@@ -65,7 +67,7 @@ class Connection:
|
|
|
65
67
|
"""
|
|
66
68
|
raise NotImplementedError(f"'disconnect' not implemented for {self.__class__.__name__}")
|
|
67
69
|
|
|
68
|
-
def cmd(self, command, func=_util.subprocess_run, **func_args):
|
|
70
|
+
def cmd(self, command, *, func=_util.subprocess_run, **func_args):
|
|
69
71
|
"""
|
|
70
72
|
Execute a single command on the remote, using subprocess-like semantics.
|
|
71
73
|
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Connection API implementation using the 'podman' CLI client.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import subprocess
|
|
6
|
+
|
|
7
|
+
from .. import util
|
|
8
|
+
from . import Connection
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class PodmanConnectionError(ConnectionError):
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PodmanConnection(Connection):
|
|
16
|
+
"""
|
|
17
|
+
Implements the Connection API via 'podman container exec' on an
|
|
18
|
+
already-running container, it does not handle any image pulling,
|
|
19
|
+
container creation, starting or stopping.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
# def __init__(self, container, *, user=None, workdir=None):
|
|
23
|
+
# """
|
|
24
|
+
# 'container' is a string with either the full or partial podman
|
|
25
|
+
# container ID, or a container name, as recognized by podman CLI.
|
|
26
|
+
#
|
|
27
|
+
# 'user' is a string with a username or UID, possibly including a GID,
|
|
28
|
+
# passed to the podman CLI as --user.
|
|
29
|
+
#
|
|
30
|
+
# 'workdir' is a string specifying the CWD inside the container.
|
|
31
|
+
# """
|
|
32
|
+
def __init__(self, container):
|
|
33
|
+
self.container = container
|
|
34
|
+
|
|
35
|
+
def connect(self, block=True):
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
def disconnect(self):
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
# have options as kwarg to be compatible with other functions here
|
|
42
|
+
def cmd(self, command, *, func=util.subprocess_run, **func_args):
|
|
43
|
+
return func(
|
|
44
|
+
("podman", "container", "exec", "-i", self.container, *command),
|
|
45
|
+
**func_args,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
def rsync(self, *args, func=util.subprocess_run, **func_args):
|
|
49
|
+
return func(
|
|
50
|
+
(
|
|
51
|
+
"rsync",
|
|
52
|
+
# use shell to strip off the destination argument rsync passes
|
|
53
|
+
# cmd[0]=/bin/bash cmd[1]=-c cmd[2]=exec podman ... cmd[3]=destination
|
|
54
|
+
# cmd[4]=rsync cmd[5]=--server cmd[6]=-vve.LsfxCIvu cmd[7]=. cmd[8]=.
|
|
55
|
+
"-e", f"/bin/bash -c 'exec podman container exec -i {self.container} \"$@\"'",
|
|
56
|
+
*args,
|
|
57
|
+
),
|
|
58
|
+
check=True,
|
|
59
|
+
stdin=subprocess.DEVNULL,
|
|
60
|
+
**func_args,
|
|
61
|
+
)
|