atex 0.8__py3-none-any.whl → 0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex/aggregator/__init__.py +60 -0
- atex/aggregator/json.py +96 -0
- atex/cli/__init__.py +11 -1
- atex/cli/fmf.py +73 -23
- atex/cli/libvirt.py +128 -0
- atex/cli/testingfarm.py +60 -3
- atex/connection/__init__.py +13 -11
- atex/connection/podman.py +61 -0
- atex/connection/ssh.py +38 -47
- atex/executor/executor.py +144 -119
- atex/executor/reporter.py +66 -71
- atex/executor/scripts.py +13 -5
- atex/executor/testcontrol.py +43 -30
- atex/fmf.py +94 -74
- atex/orchestrator/__init__.py +76 -2
- atex/orchestrator/adhoc.py +465 -0
- atex/{provision → provisioner}/__init__.py +54 -42
- atex/provisioner/libvirt/__init__.py +2 -0
- atex/provisioner/libvirt/libvirt.py +472 -0
- atex/provisioner/libvirt/locking.py +170 -0
- atex/{provision → provisioner}/libvirt/setup-libvirt.sh +21 -1
- atex/provisioner/podman/__init__.py +2 -0
- atex/provisioner/podman/podman.py +169 -0
- atex/{provision → provisioner}/testingfarm/api.py +121 -69
- atex/{provision → provisioner}/testingfarm/testingfarm.py +44 -52
- atex/util/libvirt.py +18 -0
- atex/util/log.py +53 -43
- atex/util/named_mapping.py +158 -0
- atex/util/subprocess.py +46 -12
- atex/util/threads.py +71 -20
- atex-0.10.dist-info/METADATA +86 -0
- atex-0.10.dist-info/RECORD +44 -0
- atex/orchestrator/aggregator.py +0 -106
- atex/orchestrator/orchestrator.py +0 -324
- atex/provision/libvirt/__init__.py +0 -24
- atex/provision/podman/README +0 -59
- atex/provision/podman/host_container.sh +0 -74
- atex-0.8.dist-info/METADATA +0 -197
- atex-0.8.dist-info/RECORD +0 -37
- /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
- /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
atex/executor/reporter.py
CHANGED
|
@@ -1,70 +1,72 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import json
|
|
3
|
-
import ctypes
|
|
4
|
-
import ctypes.util
|
|
5
|
-
import contextlib
|
|
6
3
|
from pathlib import Path
|
|
7
4
|
|
|
8
5
|
from .. import util
|
|
9
6
|
|
|
10
7
|
|
|
11
|
-
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
|
|
12
|
-
|
|
13
|
-
# int linkat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath, int flags)
|
|
14
|
-
libc.linkat.argtypes = (
|
|
15
|
-
ctypes.c_int,
|
|
16
|
-
ctypes.c_char_p,
|
|
17
|
-
ctypes.c_int,
|
|
18
|
-
ctypes.c_char_p,
|
|
19
|
-
ctypes.c_int,
|
|
20
|
-
)
|
|
21
|
-
libc.linkat.restype = ctypes.c_int
|
|
22
|
-
|
|
23
|
-
# fcntl.h:#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
|
|
24
|
-
AT_EMPTY_PATH = 0x1000
|
|
25
|
-
|
|
26
|
-
# fcntl.h:#define AT_FDCWD -100 /* Special value used to indicate
|
|
27
|
-
AT_FDCWD = -100
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def linkat(*args):
|
|
31
|
-
if (ret := libc.linkat(*args)) == -1:
|
|
32
|
-
errno = ctypes.get_errno()
|
|
33
|
-
raise OSError(errno, os.strerror(errno))
|
|
34
|
-
return ret
|
|
35
|
-
|
|
36
|
-
|
|
37
8
|
class Reporter:
|
|
38
9
|
"""
|
|
39
10
|
Collects reported results (in a format specified by RESULTS.md) for
|
|
40
11
|
a specific test, storing them persistently.
|
|
41
12
|
"""
|
|
42
13
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
14
|
+
# internal name, stored inside 'output_dir' and hardlinked to
|
|
15
|
+
# 'testout'-JSON-key-specified result entries; deleted on exit
|
|
16
|
+
TESTOUT = "testout.temp"
|
|
46
17
|
|
|
47
|
-
|
|
18
|
+
def __init__(self, output_dir, results_file, files_dir):
|
|
48
19
|
"""
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
self.json_fobj = None
|
|
20
|
+
'output_dir' is a destination dir (string or Path) for results reported
|
|
21
|
+
and files uploaded.
|
|
52
22
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
23
|
+
'results_file' is a file name inside 'output_dir' the results will be
|
|
24
|
+
reported into.
|
|
25
|
+
|
|
26
|
+
'files_dir' is a dir name inside 'output_dir' any files will be
|
|
27
|
+
uploaded to.
|
|
28
|
+
"""
|
|
29
|
+
output_dir = Path(output_dir)
|
|
30
|
+
self.testout_file = output_dir / self.TESTOUT
|
|
31
|
+
self.results_file = output_dir / results_file
|
|
32
|
+
self.files_dir = output_dir / files_dir
|
|
33
|
+
self.output_dir = output_dir
|
|
34
|
+
self.results_fobj = None
|
|
35
|
+
self.testout_fobj = None
|
|
36
|
+
|
|
37
|
+
def start(self):
|
|
38
|
+
if self.testout_file.exists():
|
|
39
|
+
raise FileExistsError(f"{self.testout_file} already exists")
|
|
40
|
+
self.testout_fobj = open(self.testout_file, "wb")
|
|
41
|
+
|
|
42
|
+
if self.results_file.exists():
|
|
43
|
+
raise FileExistsError(f"{self.results_file} already exists")
|
|
44
|
+
self.results_fobj = open(self.results_file, "w", newline="\n")
|
|
57
45
|
|
|
58
46
|
if self.files_dir.exists():
|
|
59
47
|
raise FileExistsError(f"{self.files_dir} already exists")
|
|
60
48
|
self.files_dir.mkdir()
|
|
61
49
|
|
|
62
|
-
|
|
50
|
+
def stop(self):
|
|
51
|
+
if self.results_fobj:
|
|
52
|
+
self.results_fobj.close()
|
|
53
|
+
self.results_fobj = None
|
|
54
|
+
|
|
55
|
+
if self.testout_fobj:
|
|
56
|
+
self.testout_fobj.close()
|
|
57
|
+
self.testout_fobj = None
|
|
58
|
+
Path(self.testout_file).unlink()
|
|
59
|
+
|
|
60
|
+
def __enter__(self):
|
|
61
|
+
try:
|
|
62
|
+
self.start()
|
|
63
|
+
return self
|
|
64
|
+
except Exception:
|
|
65
|
+
self.stop()
|
|
66
|
+
raise
|
|
63
67
|
|
|
64
68
|
def __exit__(self, exc_type, exc_value, traceback):
|
|
65
|
-
|
|
66
|
-
self.json_fobj.close()
|
|
67
|
-
self.json_fobj = None
|
|
69
|
+
self.stop()
|
|
68
70
|
|
|
69
71
|
def report(self, result_line):
|
|
70
72
|
"""
|
|
@@ -72,35 +74,28 @@ class Reporter:
|
|
|
72
74
|
|
|
73
75
|
'result_line' is a dict in the format specified by RESULTS.md.
|
|
74
76
|
"""
|
|
75
|
-
json.dump(result_line, self.
|
|
76
|
-
self.
|
|
77
|
-
self.
|
|
77
|
+
json.dump(result_line, self.results_fobj, indent=None)
|
|
78
|
+
self.results_fobj.write("\n")
|
|
79
|
+
self.results_fobj.flush()
|
|
78
80
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
flags = open_mode | os.O_TMPFILE
|
|
86
|
-
fd = os.open(self.files_dir, flags, 0o644)
|
|
87
|
-
try:
|
|
88
|
-
yield fd
|
|
89
|
-
finally:
|
|
90
|
-
os.close(fd)
|
|
81
|
+
def _dest_path(self, file_name, result_name=None):
|
|
82
|
+
result_name = util.normalize_path(result_name) if result_name else "."
|
|
83
|
+
# /path/to/files_dir / path/to/subtest / path/to/file.log
|
|
84
|
+
file_path = self.files_dir / result_name / util.normalize_path(file_name)
|
|
85
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
86
|
+
return file_path
|
|
91
87
|
|
|
92
|
-
def
|
|
88
|
+
def open_file(self, file_name, result_name=None, mode="wb"):
|
|
93
89
|
"""
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
This function can be called multiple times with the same 'fd', and
|
|
98
|
-
does not close or otherwise alter the descriptor.
|
|
90
|
+
Open a file named 'file_name' in a directory relevant to 'result_name'.
|
|
91
|
+
Returns an opened file-like object that can be used in a context manager
|
|
92
|
+
just like with regular open().
|
|
99
93
|
|
|
100
|
-
If 'result_name' is not given,
|
|
94
|
+
If 'result_name' (typically a subtest) is not given, open the file
|
|
95
|
+
for the test (name) itself.
|
|
101
96
|
"""
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
97
|
+
return open(self._dest_path(file_name, result_name), mode)
|
|
98
|
+
|
|
99
|
+
def link_testout(self, file_name, result_name=None):
|
|
100
|
+
# TODO: docstring
|
|
101
|
+
os.link(self.testout_file, self._dest_path(file_name, result_name))
|
atex/executor/scripts.py
CHANGED
|
@@ -1,6 +1,9 @@
|
|
|
1
|
+
import os
|
|
1
2
|
import collections
|
|
2
3
|
from pathlib import Path
|
|
3
4
|
|
|
5
|
+
import yaml
|
|
6
|
+
|
|
4
7
|
from .. import util, fmf
|
|
5
8
|
|
|
6
9
|
# name: fmf path to the test as string, ie. /some/test
|
|
@@ -49,7 +52,7 @@ def test_wrapper(*, test, tests_dir, test_exec):
|
|
|
49
52
|
# doing it here avoids unnecessary traffic (reading stdin) via ssh,
|
|
50
53
|
# even if it is fed from subprocess.DEVNULL on the runner
|
|
51
54
|
|
|
52
|
-
if
|
|
55
|
+
if os.environ.get("ATEX_DEBUG_TEST") == "1":
|
|
53
56
|
out += "set -x\n"
|
|
54
57
|
|
|
55
58
|
# use a subshell to limit the scope of the CWD change
|
|
@@ -102,7 +105,7 @@ def _install_packages(pkgs, extra_opts=None):
|
|
|
102
105
|
""") # noqa: E501
|
|
103
106
|
|
|
104
107
|
|
|
105
|
-
def test_setup(*, test, wrapper_exec, test_exec, **kwargs):
|
|
108
|
+
def test_setup(*, test, wrapper_exec, test_exec, test_yaml, **kwargs):
|
|
106
109
|
"""
|
|
107
110
|
Generate a bash script that should prepare the remote end for test
|
|
108
111
|
execution.
|
|
@@ -111,17 +114,17 @@ def test_setup(*, test, wrapper_exec, test_exec, **kwargs):
|
|
|
111
114
|
scripts: a test script (contents of 'test' from FMF) and a wrapper script
|
|
112
115
|
to run the test script.
|
|
113
116
|
|
|
117
|
+
'test' is a class Test instance.
|
|
118
|
+
|
|
114
119
|
'wrapper_exec' is the remote path where the wrapper script should be put.
|
|
115
120
|
|
|
116
121
|
'test_exec' is the remote path where the test script should be put.
|
|
117
122
|
|
|
118
|
-
'test' is a class Test instance.
|
|
119
|
-
|
|
120
123
|
Any 'kwargs' are passed to test_wrapper().
|
|
121
124
|
"""
|
|
122
125
|
out = "#!/bin/bash\n"
|
|
123
126
|
|
|
124
|
-
if
|
|
127
|
+
if os.environ.get("ATEX_DEBUG_TEST") == "1":
|
|
125
128
|
out += "set -xe\n"
|
|
126
129
|
else:
|
|
127
130
|
out += "exec 1>/dev/null\n"
|
|
@@ -134,6 +137,11 @@ def test_setup(*, test, wrapper_exec, test_exec, **kwargs):
|
|
|
134
137
|
if recommend := list(fmf.test_pkg_requires(test.data, "recommend")):
|
|
135
138
|
out += _install_packages(recommend, ("--skip-broken",)) + "\n"
|
|
136
139
|
|
|
140
|
+
# write out test data
|
|
141
|
+
out += f"cat > '{test_yaml}' <<'ATEX_SETUP_EOF'\n"
|
|
142
|
+
out += yaml.dump(test.data).rstrip("\n") # don't rely on trailing \n
|
|
143
|
+
out += "\nATEX_SETUP_EOF\n"
|
|
144
|
+
|
|
137
145
|
# make the wrapper script
|
|
138
146
|
out += f"cat > '{wrapper_exec}' <<'ATEX_SETUP_EOF'\n"
|
|
139
147
|
out += test_wrapper(
|
atex/executor/testcontrol.py
CHANGED
|
@@ -95,7 +95,7 @@ class TestControl:
|
|
|
95
95
|
processing test-issued commands, results and uploaded files.
|
|
96
96
|
"""
|
|
97
97
|
|
|
98
|
-
def __init__(self, *,
|
|
98
|
+
def __init__(self, *, reporter, duration, control_fd=None):
|
|
99
99
|
"""
|
|
100
100
|
'control_fd' is a non-blocking file descriptor to be read.
|
|
101
101
|
|
|
@@ -103,16 +103,15 @@ class TestControl:
|
|
|
103
103
|
and uploaded files will be written to.
|
|
104
104
|
|
|
105
105
|
'duration' is a class Duration instance.
|
|
106
|
-
|
|
107
|
-
'testout_fd' is an optional file descriptor handle which the test uses
|
|
108
|
-
to write its output to - useful here for the 'result' control word and
|
|
109
|
-
its protocol, which allows "hardlinking" the fd to a real file name.
|
|
110
106
|
"""
|
|
111
|
-
self.control_fd = control_fd
|
|
112
|
-
self.stream = NonblockLineReader(control_fd)
|
|
113
107
|
self.reporter = reporter
|
|
114
108
|
self.duration = duration
|
|
115
|
-
|
|
109
|
+
if control_fd:
|
|
110
|
+
self.control_fd = control_fd
|
|
111
|
+
self.stream = NonblockLineReader(control_fd)
|
|
112
|
+
else:
|
|
113
|
+
self.control_fd = None
|
|
114
|
+
self.stream = None
|
|
116
115
|
self.eof = False
|
|
117
116
|
self.in_progress = None
|
|
118
117
|
self.partial_results = collections.defaultdict(dict)
|
|
@@ -120,6 +119,20 @@ class TestControl:
|
|
|
120
119
|
self.reconnect = None
|
|
121
120
|
self.nameless_result_seen = False
|
|
122
121
|
|
|
122
|
+
def reassign(self, new_fd):
|
|
123
|
+
"""
|
|
124
|
+
Assign a new control file descriptor to read test control from,
|
|
125
|
+
replacing a previous one. Useful on test reconnect.
|
|
126
|
+
"""
|
|
127
|
+
err = "tried to assign new control fd while"
|
|
128
|
+
if self.in_progress:
|
|
129
|
+
raise BadControlError(f"{err} old one is reading non-control binary data")
|
|
130
|
+
elif self.stream and self.stream.bytes_read != 0:
|
|
131
|
+
raise BadControlError(f"{err} old one is in the middle of reading a control line")
|
|
132
|
+
self.eof = False
|
|
133
|
+
self.control_fd = new_fd
|
|
134
|
+
self.stream = NonblockLineReader(new_fd)
|
|
135
|
+
|
|
123
136
|
def process(self):
|
|
124
137
|
"""
|
|
125
138
|
Read from the control file descriptor and potentially perform any
|
|
@@ -143,7 +156,7 @@ class TestControl:
|
|
|
143
156
|
except BufferFullError as e:
|
|
144
157
|
raise BadControlError(str(e)) from None
|
|
145
158
|
|
|
146
|
-
util.
|
|
159
|
+
util.extradebug(f"control line: {line} // eof: {self.stream.eof}")
|
|
147
160
|
|
|
148
161
|
if self.stream.eof:
|
|
149
162
|
self.eof = True
|
|
@@ -254,28 +267,28 @@ class TestControl:
|
|
|
254
267
|
except ValueError as e:
|
|
255
268
|
raise BadReportJSONError(f"file entry {file_name} length: {str(e)}") from None
|
|
256
269
|
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
270
|
+
try:
|
|
271
|
+
with self.reporter.open_file(file_name, name) as f:
|
|
272
|
+
fd = f.fileno()
|
|
273
|
+
while file_length > 0:
|
|
261
274
|
try:
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
275
|
+
# try a more universal sendfile first, fall back to splice
|
|
276
|
+
try:
|
|
277
|
+
written = os.sendfile(fd, self.control_fd, None, file_length)
|
|
278
|
+
except OSError as e:
|
|
279
|
+
if e.errno == 22: # EINVAL
|
|
280
|
+
written = os.splice(self.control_fd, fd, file_length)
|
|
281
|
+
else:
|
|
282
|
+
raise
|
|
283
|
+
except BlockingIOError:
|
|
284
|
+
yield
|
|
285
|
+
continue
|
|
286
|
+
if written == 0:
|
|
287
|
+
raise BadControlError("EOF when reading data")
|
|
288
|
+
file_length -= written
|
|
269
289
|
yield
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
raise BadControlError("EOF when reading data")
|
|
273
|
-
file_length -= written
|
|
274
|
-
yield
|
|
275
|
-
try:
|
|
276
|
-
self.reporter.link_tmpfile_to(fd, file_name, name)
|
|
277
|
-
except FileExistsError:
|
|
278
|
-
raise BadReportJSONError(f"file '{file_name}' already exists") from None
|
|
290
|
+
except FileExistsError:
|
|
291
|
+
raise BadReportJSONError(f"file '{file_name}' already exists") from None
|
|
279
292
|
|
|
280
293
|
# either store partial result + return,
|
|
281
294
|
# or load previous partial result and merge into it
|
|
@@ -304,7 +317,7 @@ class TestControl:
|
|
|
304
317
|
if not testout:
|
|
305
318
|
raise BadReportJSONError("'testout' specified, but empty")
|
|
306
319
|
try:
|
|
307
|
-
self.reporter.
|
|
320
|
+
self.reporter.link_testout(testout, name)
|
|
308
321
|
except FileExistsError:
|
|
309
322
|
raise BadReportJSONError(f"file '{testout}' already exists") from None
|
|
310
323
|
|
atex/fmf.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import re
|
|
2
|
+
import collections
|
|
2
3
|
from pathlib import Path
|
|
3
4
|
|
|
4
5
|
# from system-wide sys.path
|
|
@@ -32,21 +33,44 @@ class FMFTests:
|
|
|
32
33
|
"""
|
|
33
34
|
# TODO: usage example ^^^^
|
|
34
35
|
|
|
35
|
-
def __init__(
|
|
36
|
+
def __init__(
|
|
37
|
+
self, fmf_tree, plan_name=None, *,
|
|
38
|
+
names=None, filters=None, conditions=None, excludes=None,
|
|
39
|
+
context=None,
|
|
40
|
+
):
|
|
36
41
|
"""
|
|
37
42
|
'fmf_tree' is filesystem path somewhere inside fmf metadata tree,
|
|
38
43
|
or a root fmf.Tree instance.
|
|
39
44
|
|
|
40
45
|
'plan_name' is fmf identifier (like /some/thing) of a tmt plan
|
|
41
|
-
to use for discovering tests.
|
|
46
|
+
to use for discovering tests. If None, a dummy (empty) plan is used.
|
|
42
47
|
|
|
43
|
-
'
|
|
44
|
-
|
|
48
|
+
'names', 'filters', 'conditions' and 'exclude' (all tuple/list)
|
|
49
|
+
are fmf tree filters (resolved by the fmf module), overriding any
|
|
50
|
+
existing tree filters in the plan's discover phase specifies, where:
|
|
51
|
+
|
|
52
|
+
'names' are test regexes like ["/some/test", "/another/test"]
|
|
53
|
+
|
|
54
|
+
'filters' are fmf-style filter expressions, as documented on
|
|
55
|
+
https://fmf.readthedocs.io/en/stable/modules.html#fmf.filter
|
|
56
|
+
|
|
57
|
+
'conditions' are python expressions whose namespace locals()
|
|
58
|
+
are set up to be a dictionary of the fmf tree. When any of the
|
|
59
|
+
expressions returns True, the tree is returned, ie.
|
|
60
|
+
["environment['FOO'] == 'BAR'"]
|
|
61
|
+
["'enabled' not in locals() or enabled"]
|
|
62
|
+
Note that KeyError is silently ignored and treated as False.
|
|
63
|
+
|
|
64
|
+
'excludes' are test regexes to exclude, format same as 'names'
|
|
65
|
+
|
|
66
|
+
'context' is a dict like {'distro': 'rhel-9.6'} used for additional
|
|
67
|
+
adjustment of the discovered fmf metadata.
|
|
45
68
|
"""
|
|
46
69
|
# list of packages to install, as extracted from plan
|
|
47
70
|
self.prepare_pkgs = []
|
|
48
71
|
# list of scripts to run, as extracted from plan
|
|
49
72
|
self.prepare_scripts = []
|
|
73
|
+
self.finish_scripts = []
|
|
50
74
|
# dict of environment, as extracted from plan
|
|
51
75
|
self.plan_env = {}
|
|
52
76
|
# dict indexed by test name, value is dict with fmf-parsed metadata
|
|
@@ -54,21 +78,28 @@ class FMFTests:
|
|
|
54
78
|
# dict indexed by test name, value is pathlib.Path of relative path
|
|
55
79
|
# of the fmf metadata root towards the test metadata location
|
|
56
80
|
self.test_dirs = {}
|
|
57
|
-
# fmf.Context instance, as used for test discovery
|
|
58
|
-
self.context = fmf.Context(**context) if context else fmf.Context()
|
|
59
81
|
|
|
82
|
+
# fmf.Context instance, as used for test discovery
|
|
83
|
+
context = fmf.Context(**context) if context else fmf.Context()
|
|
84
|
+
# allow the user to pass fmf.Tree directly, greatly speeding up the
|
|
85
|
+
# instantiation of multiple FMFTests instances
|
|
60
86
|
tree = fmf_tree.copy() if isinstance(fmf_tree, fmf.Tree) else fmf.Tree(fmf_tree)
|
|
61
|
-
tree.adjust(context=
|
|
87
|
+
tree.adjust(context=context)
|
|
62
88
|
|
|
63
89
|
# Path of the metadata root
|
|
64
90
|
self.root = Path(tree.root)
|
|
65
91
|
|
|
66
92
|
# lookup the plan first
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
93
|
+
if plan_name:
|
|
94
|
+
plan = tree.find(plan_name)
|
|
95
|
+
if not plan:
|
|
96
|
+
raise ValueError(f"plan {plan_name} not found in {tree.root}")
|
|
97
|
+
if "test" in plan.data:
|
|
98
|
+
raise ValueError(f"plan {plan_name} appears to be a test")
|
|
99
|
+
# fall back to a dummy plan
|
|
100
|
+
else:
|
|
101
|
+
class plan: # noqa: N801
|
|
102
|
+
data = {}
|
|
72
103
|
|
|
73
104
|
# gather and merge plan-defined environment variables
|
|
74
105
|
#
|
|
@@ -88,13 +119,16 @@ class FMFTests:
|
|
|
88
119
|
# script:
|
|
89
120
|
# - some-command
|
|
90
121
|
for entry in listlike(plan.data, "prepare"):
|
|
91
|
-
if "how"
|
|
92
|
-
continue
|
|
93
|
-
if entry["how"] == "install":
|
|
122
|
+
if entry.get("how") == "install":
|
|
94
123
|
self.prepare_pkgs += listlike(entry, "package")
|
|
95
|
-
elif entry
|
|
124
|
+
elif entry.get("how") == "shell":
|
|
96
125
|
self.prepare_scripts += listlike(entry, "script")
|
|
97
126
|
|
|
127
|
+
# gather all finish scripts, same as prepare scripts
|
|
128
|
+
for entry in listlike(plan.data, "finish"):
|
|
129
|
+
if entry.get("how") == "shell":
|
|
130
|
+
self.finish_scripts += listlike(entry, "script")
|
|
131
|
+
|
|
98
132
|
# gather all tests selected by the plan
|
|
99
133
|
#
|
|
100
134
|
# discover:
|
|
@@ -105,49 +139,50 @@ class FMFTests:
|
|
|
105
139
|
# - some-test-regex
|
|
106
140
|
# exclude:
|
|
107
141
|
# - some-test-regex
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
if
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
142
|
+
plan_filters = collections.defaultdict(list)
|
|
143
|
+
for entry in listlike(plan.data, "discover"):
|
|
144
|
+
if entry.get("how") != "fmf":
|
|
145
|
+
continue
|
|
146
|
+
for meta_name in ("filter", "test", "exclude"):
|
|
147
|
+
if value := listlike(entry, meta_name):
|
|
148
|
+
plan_filters[meta_name] += value
|
|
149
|
+
|
|
150
|
+
prune_kwargs = {}
|
|
151
|
+
if names:
|
|
152
|
+
prune_kwargs["names"] = names
|
|
153
|
+
elif "test" in plan_filters:
|
|
154
|
+
prune_kwargs["names"] = plan_filters["test"]
|
|
155
|
+
if filters:
|
|
156
|
+
prune_kwargs["filters"] = filters
|
|
157
|
+
elif "filter" in plan_filters:
|
|
158
|
+
prune_kwargs["filters"] = plan_filters["filter"]
|
|
159
|
+
if conditions:
|
|
160
|
+
prune_kwargs["conditions"] = conditions
|
|
161
|
+
if not excludes:
|
|
162
|
+
excludes = plan_filters.get("exclude")
|
|
163
|
+
|
|
164
|
+
# actually discover the tests
|
|
165
|
+
for child in tree.prune(**prune_kwargs):
|
|
166
|
+
# excludes not supported by .prune(), we have to do it here
|
|
167
|
+
if excludes and any(re.match(x, child.name) for x in excludes):
|
|
168
|
+
continue
|
|
169
|
+
# only tests
|
|
170
|
+
if "test" not in child.data:
|
|
171
|
+
continue
|
|
172
|
+
# only enabled tests
|
|
173
|
+
if "enabled" in child.data and not child.data["enabled"]:
|
|
174
|
+
continue
|
|
175
|
+
# no manual tests and no stories
|
|
176
|
+
if child.data.get("manual") or child.data.get("story"):
|
|
177
|
+
continue
|
|
178
|
+
# after adjusting above, any adjusts are useless, free some space
|
|
179
|
+
if "adjust" in child.data:
|
|
180
|
+
del child.data["adjust"]
|
|
181
|
+
|
|
182
|
+
self.tests[child.name] = child.data
|
|
183
|
+
# child.sources ie. ['/abs/path/to/some.fmf', '/abs/path/to/some/node.fmf']
|
|
184
|
+
self.test_dirs[child.name] = \
|
|
185
|
+
Path(child.sources[-1]).parent.relative_to(self.root)
|
|
151
186
|
|
|
152
187
|
|
|
153
188
|
def test_pkg_requires(data, key="require"):
|
|
@@ -200,18 +235,3 @@ def all_pkg_requires(fmf_tests, key="require"):
|
|
|
200
235
|
# context. Any other filters are applied afterwards to allow modification
|
|
201
236
|
# of tree metadata by the adjust expressions. Ie.
|
|
202
237
|
# {'distro': 'rhel-9.6.0', 'arch': 'x86_64'}
|
|
203
|
-
|
|
204
|
-
#Platform = collections.namedtuple("Platform", ["distro", "arch"])
|
|
205
|
-
#
|
|
206
|
-
#
|
|
207
|
-
#def combine_platforms(fmf_path, plan_name, platforms):
|
|
208
|
-
# # TODO: document
|
|
209
|
-
# fmf_tests = {}
|
|
210
|
-
# tree = fmf.Tree(fmf_path)
|
|
211
|
-
# for platform in platforms:
|
|
212
|
-
# context = {"distro": platform.distro, "arch": platform.arch}
|
|
213
|
-
# fmf_tests[platform] = FMFTests(tree, plan_name, context=context)
|
|
214
|
-
# return fmf_tests
|
|
215
|
-
|
|
216
|
-
# TODO: in Orchestrator, when a Provisioner becomes free, have it pick a test
|
|
217
|
-
# from the appropriate tests[platform] per the Provisioner's platform
|
atex/orchestrator/__init__.py
CHANGED
|
@@ -1,2 +1,76 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
import importlib as _importlib
|
|
2
|
+
import pkgutil as _pkgutil
|
|
3
|
+
import time as _time
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class OrchestratorError(Exception):
|
|
7
|
+
pass
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Orchestrator:
|
|
11
|
+
"""
|
|
12
|
+
A scheduler for parallel execution on multiple resources (machines/systems).
|
|
13
|
+
|
|
14
|
+
TODO: more description
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def serve_once(self):
|
|
18
|
+
"""
|
|
19
|
+
Run the orchestration logic, processing any outstanding requests
|
|
20
|
+
(for provisioning, new test execution, etc.) and returning once these
|
|
21
|
+
are taken care of.
|
|
22
|
+
|
|
23
|
+
Returns True to indicate that it should be called again by the user
|
|
24
|
+
(more work to be done), False once all testing is concluded.
|
|
25
|
+
"""
|
|
26
|
+
raise NotImplementedError(f"'serve_once' not implemented for {self.__class__.__name__}")
|
|
27
|
+
|
|
28
|
+
def serve_forever(self):
|
|
29
|
+
"""
|
|
30
|
+
Run the orchestration logic, blocking until all testing is concluded.
|
|
31
|
+
"""
|
|
32
|
+
while self.serve_once():
|
|
33
|
+
_time.sleep(1)
|
|
34
|
+
|
|
35
|
+
def start(self):
|
|
36
|
+
"""
|
|
37
|
+
Start the Orchestrator instance, opening any files / allocating
|
|
38
|
+
resources as necessary.
|
|
39
|
+
"""
|
|
40
|
+
raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
|
|
41
|
+
|
|
42
|
+
def stop(self):
|
|
43
|
+
"""
|
|
44
|
+
Stop the Orchestrator instance, freeing all allocated resources.
|
|
45
|
+
"""
|
|
46
|
+
raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
|
|
47
|
+
|
|
48
|
+
def __enter__(self):
|
|
49
|
+
try:
|
|
50
|
+
self.start()
|
|
51
|
+
return self
|
|
52
|
+
except Exception:
|
|
53
|
+
self.stop()
|
|
54
|
+
raise
|
|
55
|
+
|
|
56
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
57
|
+
self.stop()
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
_submodules = [
|
|
61
|
+
info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
|
|
62
|
+
]
|
|
63
|
+
|
|
64
|
+
__all__ = [*_submodules, Orchestrator.__name__] # noqa: PLE0604
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def __dir__():
|
|
68
|
+
return __all__
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# lazily import submodules
|
|
72
|
+
def __getattr__(attr):
|
|
73
|
+
if attr in _submodules:
|
|
74
|
+
return _importlib.import_module(f".{attr}", __name__)
|
|
75
|
+
else:
|
|
76
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")
|