atex 0.10__py3-none-any.whl → 0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex/aggregator/__init__.py +8 -6
- atex/aggregator/json.py +234 -51
- atex/cli/__init__.py +3 -0
- atex/cli/fmf.py +7 -7
- atex/cli/testingfarm.py +95 -45
- atex/executor/__init__.py +23 -2
- atex/executor/executor.py +26 -21
- atex/executor/reporter.py +3 -4
- atex/executor/scripts.py +14 -14
- atex/executor/testcontrol.py +32 -27
- atex/orchestrator/adhoc.py +116 -83
- atex/orchestrator/contest.py +116 -0
- atex/provisioner/__init__.py +0 -16
- atex/provisioner/libvirt/libvirt.py +13 -1
- atex/provisioner/testingfarm/api.py +57 -10
- atex/provisioner/testingfarm/testingfarm.py +25 -21
- atex/util/log.py +1 -1
- atex/util/subprocess.py +6 -6
- {atex-0.10.dist-info → atex-0.12.dist-info}/METADATA +1 -1
- {atex-0.10.dist-info → atex-0.12.dist-info}/RECORD +23 -22
- {atex-0.10.dist-info → atex-0.12.dist-info}/WHEEL +1 -1
- {atex-0.10.dist-info → atex-0.12.dist-info}/entry_points.txt +0 -0
- {atex-0.10.dist-info → atex-0.12.dist-info}/licenses/COPYING.txt +0 -0
atex/aggregator/__init__.py
CHANGED
|
@@ -7,12 +7,14 @@ class Aggregator:
|
|
|
7
7
|
TODO: generic description, not JSON-specific
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
|
-
def ingest(self, platform, test_name,
|
|
10
|
+
def ingest(self, platform, test_name, test_results, test_files):
|
|
11
11
|
"""
|
|
12
|
-
Process '
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
12
|
+
Process 'test_results' (string/Path) for as results reported by a test
|
|
13
|
+
ran by Executor, along with 'test_files' as files uploaded by that test,
|
|
14
|
+
aggregating them under 'platform' (string) as 'test_name' (string).
|
|
15
|
+
|
|
16
|
+
This is DESTRUCTIVE, the input results/files are consumed in the
|
|
17
|
+
process.
|
|
16
18
|
"""
|
|
17
19
|
raise NotImplementedError(f"'ingest' not implemented for {self.__class__.__name__}")
|
|
18
20
|
|
|
@@ -34,7 +36,7 @@ class Aggregator:
|
|
|
34
36
|
self.start()
|
|
35
37
|
return self
|
|
36
38
|
except Exception:
|
|
37
|
-
self.
|
|
39
|
+
self.stop()
|
|
38
40
|
raise
|
|
39
41
|
|
|
40
42
|
def __exit__(self, exc_type, exc_value, traceback):
|
atex/aggregator/json.py
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
|
+
import abc
|
|
1
2
|
import gzip
|
|
3
|
+
import lzma
|
|
2
4
|
import json
|
|
3
5
|
import shutil
|
|
4
6
|
import threading
|
|
@@ -7,10 +9,16 @@ from pathlib import Path
|
|
|
7
9
|
from . import Aggregator
|
|
8
10
|
|
|
9
11
|
|
|
12
|
+
def _verbatim_move(src, dst):
|
|
13
|
+
def copy_without_symlinks(src, dst):
|
|
14
|
+
return shutil.copy2(src, dst, follow_symlinks=False)
|
|
15
|
+
shutil.move(src, dst, copy_function=copy_without_symlinks)
|
|
16
|
+
|
|
17
|
+
|
|
10
18
|
class JSONAggregator(Aggregator):
|
|
11
19
|
"""
|
|
12
|
-
Collects reported results
|
|
13
|
-
multiple test runs under a shared directory.
|
|
20
|
+
Collects reported results in a line-JSON output file and uploaded files
|
|
21
|
+
(logs) from multiple test runs under a shared directory.
|
|
14
22
|
|
|
15
23
|
Note that the aggregated JSON file *does not* use the test-based JSON format
|
|
16
24
|
described by executor/RESULTS.md - both use JSON, but are very different.
|
|
@@ -23,74 +31,249 @@ class JSONAggregator(Aggregator):
|
|
|
23
31
|
All these are strings except 'files', which is another (nested) array
|
|
24
32
|
of strings.
|
|
25
33
|
|
|
34
|
+
If 'testout' is present in an input test result, it is prepended to
|
|
35
|
+
the list of 'files'.
|
|
26
36
|
If a field is missing in the source result, it is translated to a null
|
|
27
37
|
value.
|
|
28
38
|
"""
|
|
29
39
|
|
|
30
|
-
def __init__(self,
|
|
40
|
+
def __init__(self, target, files):
|
|
31
41
|
"""
|
|
32
|
-
'
|
|
42
|
+
'target' is a string/Path to a .json file for all ingested
|
|
43
|
+
results to be aggregated (written) to.
|
|
33
44
|
|
|
34
|
-
'
|
|
45
|
+
'files' is a string/Path of the top-level parent for all
|
|
35
46
|
per-platform / per-test files uploaded by tests.
|
|
36
47
|
"""
|
|
37
48
|
self.lock = threading.RLock()
|
|
38
|
-
self.
|
|
39
|
-
self.
|
|
40
|
-
self.
|
|
49
|
+
self.target = Path(target)
|
|
50
|
+
self.files = Path(files)
|
|
51
|
+
self.target_fobj = None
|
|
41
52
|
|
|
42
53
|
def start(self):
|
|
43
|
-
if self.
|
|
44
|
-
raise FileExistsError(f"{self.
|
|
45
|
-
self.
|
|
54
|
+
if self.target.exists():
|
|
55
|
+
raise FileExistsError(f"{self.target} already exists")
|
|
56
|
+
self.target_fobj = open(self.target, "w")
|
|
46
57
|
|
|
47
|
-
if self.
|
|
48
|
-
raise FileExistsError(f"{self.
|
|
49
|
-
self.
|
|
58
|
+
if self.files.exists():
|
|
59
|
+
raise FileExistsError(f"{self.files} already exists")
|
|
60
|
+
self.files.mkdir()
|
|
50
61
|
|
|
51
62
|
def stop(self):
|
|
52
|
-
if self.
|
|
53
|
-
self.
|
|
54
|
-
self.
|
|
63
|
+
if self.target_fobj:
|
|
64
|
+
self.target_fobj.close()
|
|
65
|
+
self.target_fobj = None
|
|
66
|
+
|
|
67
|
+
def _get_test_files_path(self, platform, test_name):
|
|
68
|
+
"""
|
|
69
|
+
Return a directory path to where uploaded files should be stored
|
|
70
|
+
for a particular 'platform' and 'test_name'.
|
|
71
|
+
"""
|
|
72
|
+
platform_files = self.files / platform
|
|
73
|
+
platform_files.mkdir(exist_ok=True)
|
|
74
|
+
test_files = platform_files / test_name.lstrip("/")
|
|
75
|
+
return test_files
|
|
76
|
+
|
|
77
|
+
@staticmethod
|
|
78
|
+
def _modify_file_list(test_files):
|
|
79
|
+
return test_files
|
|
80
|
+
|
|
81
|
+
@staticmethod
|
|
82
|
+
def _move_test_files(test_files, target_dir):
|
|
83
|
+
"""
|
|
84
|
+
Move (or otherwise process) 'test_files' as directory of files uploaded
|
|
85
|
+
by the test, into the pre-computed 'target_dir' location (inside
|
|
86
|
+
a hierarchy of all files from all tests).
|
|
87
|
+
"""
|
|
88
|
+
_verbatim_move(test_files, target_dir)
|
|
89
|
+
|
|
90
|
+
def _gen_test_results(self, input_fobj, platform, test_name):
|
|
91
|
+
"""
|
|
92
|
+
Yield complete output JSON objects, one for each input result.
|
|
93
|
+
"""
|
|
94
|
+
# 'testout' , 'files' and others are standard fields in the
|
|
95
|
+
# test control interface, see RESULTS.md for the Executor
|
|
96
|
+
for raw_line in input_fobj:
|
|
97
|
+
result_line = json.loads(raw_line)
|
|
98
|
+
|
|
99
|
+
file_names = []
|
|
100
|
+
# process the file specified by the 'testout' key
|
|
101
|
+
if "testout" in result_line:
|
|
102
|
+
file_names.append(result_line["testout"])
|
|
103
|
+
# process any additional files in the 'files' key
|
|
104
|
+
if "files" in result_line:
|
|
105
|
+
file_names += (f["name"] for f in result_line["files"])
|
|
106
|
+
|
|
107
|
+
file_names = self._modify_file_list(file_names)
|
|
55
108
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
109
|
+
output_line = (
|
|
110
|
+
platform,
|
|
111
|
+
result_line["status"],
|
|
112
|
+
test_name,
|
|
113
|
+
result_line.get("name"), # subtest
|
|
114
|
+
file_names,
|
|
115
|
+
result_line.get("note"),
|
|
116
|
+
)
|
|
117
|
+
yield json.dumps(output_line, indent=None)
|
|
118
|
+
|
|
119
|
+
def ingest(self, platform, test_name, test_results, test_files):
|
|
120
|
+
target_test_files = self._get_test_files_path(platform, test_name)
|
|
121
|
+
if target_test_files.exists():
|
|
122
|
+
raise FileExistsError(f"{target_test_files} already exists for {test_name}")
|
|
61
123
|
|
|
62
124
|
# parse the results separately, before writing any aggregated output,
|
|
63
|
-
# to ensure that either
|
|
125
|
+
# to ensure that either ALL results from the test are ingested, or none
|
|
64
126
|
# at all (ie. if one of the result lines contains JSON errors)
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
result_line = json.loads(raw_line)
|
|
69
|
-
|
|
70
|
-
file_names = []
|
|
71
|
-
if "testout" in result_line:
|
|
72
|
-
file_names.append(result_line["testout"])
|
|
73
|
-
if "files" in result_line:
|
|
74
|
-
file_names += (f["name"] for f in result_line["files"])
|
|
75
|
-
|
|
76
|
-
output_line = (
|
|
77
|
-
platform,
|
|
78
|
-
result_line["status"],
|
|
79
|
-
test_name,
|
|
80
|
-
result_line.get("name"), # subtest
|
|
81
|
-
file_names,
|
|
82
|
-
result_line.get("note"),
|
|
83
|
-
)
|
|
84
|
-
encoded = json.dumps(output_line, indent=None)
|
|
85
|
-
output_lines.append(encoded)
|
|
86
|
-
|
|
87
|
-
output_str = "\n".join(output_lines) + "\n"
|
|
127
|
+
with open(test_results) as test_results_fobj:
|
|
128
|
+
output_results = self._gen_test_results(test_results_fobj, platform, test_name)
|
|
129
|
+
output_json = "\n".join(output_results) + "\n"
|
|
88
130
|
|
|
89
131
|
with self.lock:
|
|
90
|
-
self.
|
|
91
|
-
self.
|
|
132
|
+
self.target_fobj.write(output_json)
|
|
133
|
+
self.target_fobj.flush()
|
|
134
|
+
|
|
135
|
+
# clean up the source test_results (Aggregator should 'mv', not 'cp')
|
|
136
|
+
Path(test_results).unlink()
|
|
137
|
+
|
|
138
|
+
# if the test_files dir is not empty
|
|
139
|
+
if any(test_files.iterdir()):
|
|
140
|
+
self._move_test_files(test_files, target_test_files)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class CompressedJSONAggregator(JSONAggregator, abc.ABC):
|
|
144
|
+
compress_files = False
|
|
145
|
+
suffix = ""
|
|
146
|
+
exclude = ()
|
|
147
|
+
|
|
148
|
+
@abc.abstractmethod
|
|
149
|
+
def compressed_open(self, *args, **kwargs):
|
|
150
|
+
pass
|
|
151
|
+
|
|
152
|
+
def start(self):
|
|
153
|
+
if self.target.exists():
|
|
154
|
+
raise FileExistsError(f"{self.target_file} already exists")
|
|
155
|
+
self.target_fobj = self.compressed_open(self.target, "wt", newline="\n")
|
|
156
|
+
|
|
157
|
+
if self.files.exists():
|
|
158
|
+
raise FileExistsError(f"{self.storage_dir} already exists")
|
|
159
|
+
self.files.mkdir()
|
|
160
|
+
|
|
161
|
+
def _modify_file_list(self, test_files):
|
|
162
|
+
if self.compress_files and self.suffix:
|
|
163
|
+
return [
|
|
164
|
+
(name if name in self.exclude else f"{name}{self.suffix}")
|
|
165
|
+
for name in test_files
|
|
166
|
+
]
|
|
167
|
+
else:
|
|
168
|
+
return super()._modify_file_list(test_files)
|
|
169
|
+
|
|
170
|
+
def _move_test_files(self, test_files, target_dir):
|
|
171
|
+
if not self.compress_files:
|
|
172
|
+
super()._move_test_files(test_files, target_dir)
|
|
173
|
+
return
|
|
174
|
+
|
|
175
|
+
for root, _, files in test_files.walk(top_down=False):
|
|
176
|
+
for file_name in files:
|
|
177
|
+
src_path = root / file_name
|
|
178
|
+
dst_path = target_dir / src_path.relative_to(test_files)
|
|
179
|
+
|
|
180
|
+
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
|
92
181
|
|
|
93
|
-
|
|
182
|
+
# skip dirs, symlinks, device files, etc.
|
|
183
|
+
if not src_path.is_file(follow_symlinks=False) or file_name in self.exclude:
|
|
184
|
+
_verbatim_move(src_path, dst_path)
|
|
185
|
+
continue
|
|
94
186
|
|
|
95
|
-
|
|
96
|
-
|
|
187
|
+
if self.suffix:
|
|
188
|
+
dst_path = dst_path.with_name(f"{dst_path.name}{self.suffix}")
|
|
189
|
+
|
|
190
|
+
with open(src_path, "rb") as plain_fobj:
|
|
191
|
+
with self.compressed_open(dst_path, "wb") as compress_fobj:
|
|
192
|
+
shutil.copyfileobj(plain_fobj, compress_fobj, 1048576)
|
|
193
|
+
|
|
194
|
+
src_path.unlink()
|
|
195
|
+
|
|
196
|
+
# we're walking bottom-up, so the local root should be empty now
|
|
197
|
+
root.rmdir()
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class GzipJSONAggregator(CompressedJSONAggregator):
|
|
201
|
+
"""
|
|
202
|
+
Identical to JSONAggregator, but transparently Gzips either or both of
|
|
203
|
+
the output line-JSON file with results and the uploaded files.
|
|
204
|
+
"""
|
|
205
|
+
def compressed_open(self, *args, **kwargs):
|
|
206
|
+
return gzip.open(*args, compresslevel=self.level, **kwargs)
|
|
207
|
+
|
|
208
|
+
def __init__(
|
|
209
|
+
self, target, files, *, compress_level=9,
|
|
210
|
+
compress_files=True, compress_files_suffix=".gz", compress_files_exclude=None,
|
|
211
|
+
):
|
|
212
|
+
"""
|
|
213
|
+
'target' is a string/Path to a .json.gz file for all ingested
|
|
214
|
+
results to be aggregated (written) to.
|
|
215
|
+
|
|
216
|
+
'files' is a string/Path of the top-level parent for all
|
|
217
|
+
per-platform / per-test files uploaded by tests.
|
|
218
|
+
|
|
219
|
+
'compress_level' specifies how much effort should be spent compressing,
|
|
220
|
+
(1 = fast, 9 = slow).
|
|
221
|
+
|
|
222
|
+
If 'compress_files' is True, compress also any files uploaded by tests.
|
|
223
|
+
|
|
224
|
+
The 'compress_files_suffix' is appended to any processed test-uploaded
|
|
225
|
+
files, and the respective 'files' results array is modified with the
|
|
226
|
+
new file names (as if the test uploaded compressed files already).
|
|
227
|
+
Set to "" (empty string) to use original file names and just compress
|
|
228
|
+
them transparently in-place.
|
|
229
|
+
|
|
230
|
+
'compress_files_exclude' is a tuple/list of strings (input 'files'
|
|
231
|
+
names) to skip when compressing. Their names also won't be modified.
|
|
232
|
+
"""
|
|
233
|
+
super().__init__(target, files)
|
|
234
|
+
self.level = compress_level
|
|
235
|
+
self.compress_files = compress_files
|
|
236
|
+
self.suffix = compress_files_suffix
|
|
237
|
+
self.exclude = compress_files_exclude or ()
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
class LZMAJSONAggregator(CompressedJSONAggregator):
|
|
241
|
+
"""
|
|
242
|
+
Identical to JSONAggregator, but transparently compresses (via LZMA/XZ)
|
|
243
|
+
either or both of the output line-JSON file with results and the uploaded
|
|
244
|
+
files.
|
|
245
|
+
"""
|
|
246
|
+
def compressed_open(self, *args, **kwargs):
|
|
247
|
+
return lzma.open(*args, preset=self.preset, **kwargs)
|
|
248
|
+
|
|
249
|
+
def __init__(
|
|
250
|
+
self, target, files, *, compress_preset=9,
|
|
251
|
+
compress_files=True, compress_files_suffix=".xz", compress_files_exclude=None,
|
|
252
|
+
):
|
|
253
|
+
"""
|
|
254
|
+
'target' is a string/Path to a .json.xz file for all ingested
|
|
255
|
+
results to be aggregated (written) to.
|
|
256
|
+
|
|
257
|
+
'files' is a string/Path of the top-level parent for all
|
|
258
|
+
per-platform / per-test files uploaded by tests.
|
|
259
|
+
|
|
260
|
+
'compress_preset' specifies how much effort should be spent compressing,
|
|
261
|
+
(1 = fast, 9 = slow). Optionally ORed with lzma.PRESET_EXTREME to spend
|
|
262
|
+
even more CPU time compressing.
|
|
263
|
+
|
|
264
|
+
If 'compress_files' is True, compress also any files uploaded by tests.
|
|
265
|
+
|
|
266
|
+
The 'compress_files_suffix' is appended to any processed test-uploaded
|
|
267
|
+
files, and the respective 'files' results array is modified with the
|
|
268
|
+
new file names (as if the test uploaded compressed files already).
|
|
269
|
+
Set to "" (empty string) to use original file names and just compress
|
|
270
|
+
them transparently in-place.
|
|
271
|
+
|
|
272
|
+
'compress_files_exclude' is a tuple/list of strings (input 'files'
|
|
273
|
+
names) to skip when compressing. Their names also won't be modified.
|
|
274
|
+
"""
|
|
275
|
+
super().__init__(target, files)
|
|
276
|
+
self.preset = compress_preset
|
|
277
|
+
self.compress_files = compress_files
|
|
278
|
+
self.suffix = compress_files_suffix
|
|
279
|
+
self.exclude = compress_files_exclude or ()
|
atex/cli/__init__.py
CHANGED
|
@@ -33,6 +33,9 @@ from .. import util
|
|
|
33
33
|
def setup_logging(level):
|
|
34
34
|
if level <= util.EXTRADEBUG:
|
|
35
35
|
fmt = "%(asctime)s %(name)s: %(filename)s:%(lineno)s: %(funcName)s(): %(message)s"
|
|
36
|
+
# also print urllib3 headers
|
|
37
|
+
import http.client # noqa: PLC0415
|
|
38
|
+
http.client.HTTPConnection.debuglevel = 5
|
|
36
39
|
else:
|
|
37
40
|
fmt = "%(asctime)s %(name)s: %(message)s"
|
|
38
41
|
logging.basicConfig(
|
atex/cli/fmf.py
CHANGED
|
@@ -56,17 +56,17 @@ def prepare(args):
|
|
|
56
56
|
result = make_fmftests(args)
|
|
57
57
|
print("--- fmf root ---")
|
|
58
58
|
print(str(result.root))
|
|
59
|
-
print("--- prepare packages ---")
|
|
59
|
+
print("\n--- prepare packages ---")
|
|
60
60
|
print("\n".join(result.prepare_pkgs))
|
|
61
|
-
print("--- plan environment ---")
|
|
62
|
-
print("\n".join("{k}={v}" for k,v in result.plan_env))
|
|
61
|
+
print("\n--- plan environment ---")
|
|
62
|
+
print("\n".join(f"{k}={v}" for k,v in result.plan_env.items()))
|
|
63
63
|
for script in result.prepare_scripts:
|
|
64
|
-
print("--- prepare script ---")
|
|
65
|
-
print(script)
|
|
64
|
+
print("\n--- prepare script ---")
|
|
65
|
+
print(script.rstrip("\n"))
|
|
66
66
|
print("----------------------")
|
|
67
67
|
for script in result.finish_scripts:
|
|
68
|
-
print("--- finish script ---")
|
|
69
|
-
print(script)
|
|
68
|
+
print("\n--- finish script ---")
|
|
69
|
+
print(script.rstrip("\n"))
|
|
70
70
|
print("----------------------")
|
|
71
71
|
|
|
72
72
|
|
atex/cli/testingfarm.py
CHANGED
|
@@ -31,7 +31,8 @@ def composes(args):
|
|
|
31
31
|
comps = api.composes(ranch=args.ranch)
|
|
32
32
|
comps_list = comps["composes"]
|
|
33
33
|
for comp in comps_list:
|
|
34
|
-
|
|
34
|
+
if comp["type"] == "compose":
|
|
35
|
+
print(comp["name"])
|
|
35
36
|
|
|
36
37
|
|
|
37
38
|
def get_request(args):
|
|
@@ -47,36 +48,63 @@ def cancel(args):
|
|
|
47
48
|
|
|
48
49
|
def search_requests(args):
|
|
49
50
|
api = _get_api(args)
|
|
50
|
-
reply = api.search_requests(
|
|
51
|
-
state=args.state,
|
|
52
|
-
mine=not args.all,
|
|
53
|
-
user_id=args.user_id,
|
|
54
|
-
token_id=args.token_id,
|
|
55
|
-
ranch=args.ranch,
|
|
56
|
-
created_before=args.before,
|
|
57
|
-
created_after=args.after,
|
|
58
|
-
)
|
|
59
|
-
if not reply:
|
|
60
|
-
return
|
|
61
51
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
52
|
+
func_kwargs = {
|
|
53
|
+
"mine": not args.all,
|
|
54
|
+
"user_id": args.user_id,
|
|
55
|
+
"token_id": args.token_id,
|
|
56
|
+
"ranch": args.ranch,
|
|
57
|
+
"created_before": args.before,
|
|
58
|
+
"created_after": args.after,
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if args.page is not None:
|
|
62
|
+
reply = api.search_requests_paged(
|
|
63
|
+
state=args.state,
|
|
64
|
+
page=args.page,
|
|
65
|
+
**func_kwargs,
|
|
66
|
+
)
|
|
67
|
+
if not reply:
|
|
68
|
+
return
|
|
65
69
|
else:
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
70
|
+
reply = api.search_requests(
|
|
71
|
+
state=args.state,
|
|
72
|
+
**func_kwargs,
|
|
73
|
+
)
|
|
74
|
+
if not reply:
|
|
75
|
+
return
|
|
76
|
+
reply = sorted(reply, key=lambda x: x["created"])
|
|
69
77
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
arch = env["arch"]
|
|
75
|
-
if compose and arch:
|
|
76
|
-
envs.append(f"{compose}@{arch}")
|
|
77
|
-
envs_str = ", ".join(envs)
|
|
78
|
+
if args.json:
|
|
79
|
+
for req in reply:
|
|
80
|
+
print(json.dumps(req))
|
|
81
|
+
return
|
|
78
82
|
|
|
79
|
-
|
|
83
|
+
for req in reply:
|
|
84
|
+
req_id = req["id"]
|
|
85
|
+
created = req["created"].partition(".")[0]
|
|
86
|
+
|
|
87
|
+
if "fmf" in req["test"] and req["test"]["fmf"]:
|
|
88
|
+
test = req["test"]["fmf"]["url"]
|
|
89
|
+
elif "tmt" in req["test"] and req["test"]["tmt"]:
|
|
90
|
+
test = req["test"]["tmf"]["url"]
|
|
91
|
+
else:
|
|
92
|
+
test = ""
|
|
93
|
+
|
|
94
|
+
envs = []
|
|
95
|
+
for env in req["environments_requested"]:
|
|
96
|
+
if "os" in env and env["os"] and "compose" in env["os"]:
|
|
97
|
+
compose = env["os"]["compose"]
|
|
98
|
+
arch = env["arch"]
|
|
99
|
+
if compose and arch:
|
|
100
|
+
envs.append(f"{compose}@{arch}")
|
|
101
|
+
|
|
102
|
+
print(f"{created} {req_id}", end="")
|
|
103
|
+
if test:
|
|
104
|
+
print(f" | test:{test}", end="")
|
|
105
|
+
if envs:
|
|
106
|
+
print(f" | envs:[{', '.join(envs)}]", end="")
|
|
107
|
+
print()
|
|
80
108
|
|
|
81
109
|
|
|
82
110
|
def stats(args):
|
|
@@ -92,29 +120,46 @@ def stats(args):
|
|
|
92
120
|
elif "tmt" in req["test"] and req["test"]["tmt"]:
|
|
93
121
|
repos[req["test"]["tmt"]["url"]] += 1
|
|
94
122
|
|
|
123
|
+
top_tokens = sorted(tokens, key=lambda x: tokens[x], reverse=True)[:10]
|
|
124
|
+
top_repos = sorted(repos, key=lambda x: repos[x], reverse=True)[:10]
|
|
125
|
+
if not top_tokens or not top_repos:
|
|
126
|
+
return
|
|
127
|
+
digits = max(len(str(tokens[top_tokens[0]])), len(str(repos[top_repos[0]])))
|
|
128
|
+
|
|
95
129
|
print("Top 10 token IDs:")
|
|
96
|
-
for token_id in
|
|
130
|
+
for token_id in top_tokens:
|
|
97
131
|
count = tokens[token_id]
|
|
98
|
-
print(f"{count:>
|
|
132
|
+
print(f"{count:>{digits}} {token_id}")
|
|
99
133
|
|
|
100
134
|
print("Top 10 repo URLs:")
|
|
101
|
-
for repo_url in
|
|
135
|
+
for repo_url in top_repos:
|
|
102
136
|
count = repos[repo_url]
|
|
103
|
-
print(f"{count:>
|
|
104
|
-
|
|
105
|
-
def
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
137
|
+
print(f"{count:>{digits}} {repo_url}")
|
|
138
|
+
|
|
139
|
+
def request_search_results():
|
|
140
|
+
if args.before is not None or args.after is not None:
|
|
141
|
+
for state in args.states.split(","):
|
|
142
|
+
reply = api.search_requests_paged(
|
|
143
|
+
state=state,
|
|
144
|
+
page=args.page,
|
|
145
|
+
mine=False,
|
|
146
|
+
ranch=args.ranch,
|
|
147
|
+
created_before=args.before,
|
|
148
|
+
created_after=args.after,
|
|
149
|
+
)
|
|
150
|
+
if reply:
|
|
151
|
+
yield from reply
|
|
152
|
+
else:
|
|
153
|
+
for state in args.states.split(","):
|
|
154
|
+
reply = api.search_requests(
|
|
155
|
+
state=state,
|
|
156
|
+
mine=False,
|
|
157
|
+
ranch=args.ranch,
|
|
158
|
+
)
|
|
159
|
+
if reply:
|
|
160
|
+
yield from reply
|
|
161
|
+
|
|
162
|
+
top_users_repos(request_search_results())
|
|
118
163
|
|
|
119
164
|
|
|
120
165
|
def reserve(args):
|
|
@@ -234,12 +279,17 @@ def parse_args(parser):
|
|
|
234
279
|
cmd.add_argument("--before", help="only requests created before ISO8601")
|
|
235
280
|
cmd.add_argument("--after", help="only requests created after ISO8601")
|
|
236
281
|
cmd.add_argument("--json", help="full details, one request per line", action="store_true")
|
|
282
|
+
cmd.add_argument("--page", help="do paged search, page interval in secs", type=int)
|
|
237
283
|
|
|
238
284
|
cmd = cmds.add_parser(
|
|
239
285
|
"stats",
|
|
240
286
|
help="print out TF usage statistics",
|
|
241
287
|
)
|
|
288
|
+
cmd.add_argument("--before", help="only requests created before ISO8601")
|
|
289
|
+
cmd.add_argument("--after", help="only requests created after ISO8601")
|
|
290
|
+
cmd.add_argument("--page", help="do paged search, page interval in secs", type=int)
|
|
242
291
|
cmd.add_argument("ranch", help="Testing Farm ranch name")
|
|
292
|
+
cmd.add_argument("states", help="comma-separated TF request states")
|
|
243
293
|
|
|
244
294
|
cmd = cmds.add_parser(
|
|
245
295
|
"reserve",
|
atex/executor/__init__.py
CHANGED
|
@@ -1,2 +1,23 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
class ExecutorError(Exception):
|
|
2
|
+
"""
|
|
3
|
+
Raised by class Executor.
|
|
4
|
+
"""
|
|
5
|
+
pass
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class TestSetupError(ExecutorError):
|
|
9
|
+
"""
|
|
10
|
+
Raised when the preparation for test execution (ie. pkg install) fails.
|
|
11
|
+
"""
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class TestAbortedError(ExecutorError):
|
|
16
|
+
"""
|
|
17
|
+
Raised when an infrastructure-related issue happened while running a test.
|
|
18
|
+
"""
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
from . import testcontrol # noqa: F401, E402
|
|
23
|
+
from .executor import Executor # noqa: F401, E402
|