atex 0.5__py3-none-any.whl → 0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. atex/__init__.py +2 -12
  2. atex/cli/__init__.py +13 -13
  3. atex/cli/fmf.py +93 -0
  4. atex/cli/testingfarm.py +71 -61
  5. atex/connection/__init__.py +117 -0
  6. atex/connection/ssh.py +390 -0
  7. atex/executor/__init__.py +2 -0
  8. atex/executor/duration.py +60 -0
  9. atex/executor/executor.py +378 -0
  10. atex/executor/reporter.py +106 -0
  11. atex/executor/scripts.py +155 -0
  12. atex/executor/testcontrol.py +353 -0
  13. atex/fmf.py +217 -0
  14. atex/orchestrator/__init__.py +2 -0
  15. atex/orchestrator/aggregator.py +106 -0
  16. atex/orchestrator/orchestrator.py +324 -0
  17. atex/provision/__init__.py +101 -90
  18. atex/provision/libvirt/VM_PROVISION +8 -0
  19. atex/provision/libvirt/__init__.py +4 -4
  20. atex/provision/podman/README +59 -0
  21. atex/provision/podman/host_container.sh +74 -0
  22. atex/provision/testingfarm/__init__.py +2 -0
  23. atex/{testingfarm.py → provision/testingfarm/api.py} +170 -132
  24. atex/provision/testingfarm/testingfarm.py +236 -0
  25. atex/util/__init__.py +5 -10
  26. atex/util/dedent.py +1 -1
  27. atex/util/log.py +20 -12
  28. atex/util/path.py +16 -0
  29. atex/util/ssh_keygen.py +14 -0
  30. atex/util/subprocess.py +14 -13
  31. atex/util/threads.py +55 -0
  32. {atex-0.5.dist-info → atex-0.8.dist-info}/METADATA +97 -2
  33. atex-0.8.dist-info/RECORD +37 -0
  34. atex/cli/minitmt.py +0 -82
  35. atex/minitmt/__init__.py +0 -115
  36. atex/minitmt/fmf.py +0 -168
  37. atex/minitmt/report.py +0 -174
  38. atex/minitmt/scripts.py +0 -51
  39. atex/minitmt/testme.py +0 -3
  40. atex/orchestrator.py +0 -38
  41. atex/ssh.py +0 -320
  42. atex/util/lockable_class.py +0 -38
  43. atex-0.5.dist-info/RECORD +0 -26
  44. {atex-0.5.dist-info → atex-0.8.dist-info}/WHEEL +0 -0
  45. {atex-0.5.dist-info → atex-0.8.dist-info}/entry_points.txt +0 -0
  46. {atex-0.5.dist-info → atex-0.8.dist-info}/licenses/COPYING.txt +0 -0
@@ -0,0 +1,353 @@
1
+ import os
2
+ import collections
3
+ import json
4
+
5
+ from .. import util
6
+
7
+
8
+ class BufferFullError(Exception):
9
+ pass
10
+
11
+
12
+ class NonblockLineReader:
13
+ """
14
+ Kind of like io.BufferedReader but capable of reading from non-blocking
15
+ sources (both O_NONBLOCK sockets and os.set_blocking(False) descriptors),
16
+ re-assembling full lines from (potentially) multiple read() calls.
17
+
18
+ It also takes a file descriptor (not a file-like object) and takes extra
19
+ care to read one-byte-at-a-time to not read (and buffer) more data from the
20
+ source descriptor, allowing it to be used for in-kernel move, such as via
21
+ os.sendfile() or os.splice().
22
+ """
23
+
24
+ def __init__(self, src, maxlen=4096):
25
+ """
26
+ 'src' is an opened file descriptor (integer).
27
+
28
+ 'maxlen' is a maximum potential line length, incl. the newline
29
+ character - if reached, a BufferFullError is raised.
30
+ """
31
+ self.src = src
32
+ self.eof = False
33
+ self.buffer = bytearray(maxlen)
34
+ self.bytes_read = 0
35
+
36
+ def readline(self):
37
+ r"""
38
+ Read a line and return it, without the '\n' terminating character,
39
+ clearing the internal buffer upon return.
40
+
41
+ Returns None if nothing could be read (BlockingIOError) or if EOF
42
+ was reached.
43
+ """
44
+ while self.bytes_read < len(self.buffer):
45
+ try:
46
+ data = os.read(self.src, 1)
47
+ except BlockingIOError:
48
+ return None
49
+
50
+ # stream EOF
51
+ if len(data) == 0:
52
+ self.eof = True
53
+ return None
54
+
55
+ char = data[0]
56
+
57
+ if char == 0x0a: # \n
58
+ line = self.buffer[0:self.bytes_read]
59
+ self.bytes_read = 0
60
+ return line
61
+ else:
62
+ self.buffer[self.bytes_read] = char
63
+ self.bytes_read += 1
64
+
65
+ raise BufferFullError(f"line buffer reached {len(self.buffer)} bytes")
66
+
67
+ def clear(self):
68
+ """
69
+ Clear the internal buffer, clearing any partially-read line data.
70
+ """
71
+ self.bytes_read = 0
72
+
73
+
74
+ class BadControlError(Exception):
75
+ """
76
+ Raised by TestControl when abnormalities are detected in the control stream,
77
+ such as invalid syntax, unknown control word, or bad or unexpected data for
78
+ any given control word.
79
+ """
80
+ pass
81
+
82
+
83
+ class BadReportJSONError(BadControlError):
84
+ """
85
+ Raised on a syntactical or semantical error caused by the test not following
86
+ the TEST_CONROL.md specification when passing JSON data to the 'result'
87
+ control word.
88
+ """
89
+ pass
90
+
91
+
92
+ class TestControl:
93
+ """
94
+ An implementation of the protocol described by TEST_CONTROL.md,
95
+ processing test-issued commands, results and uploaded files.
96
+ """
97
+
98
+ def __init__(self, *, control_fd, reporter, duration, testout_fd):
99
+ """
100
+ 'control_fd' is a non-blocking file descriptor to be read.
101
+
102
+ 'reporter' is an instance of class Reporter all the results
103
+ and uploaded files will be written to.
104
+
105
+ 'duration' is a class Duration instance.
106
+
107
+ 'testout_fd' is an optional file descriptor handle which the test uses
108
+ to write its output to - useful here for the 'result' control word and
109
+ its protocol, which allows "hardlinking" the fd to a real file name.
110
+ """
111
+ self.control_fd = control_fd
112
+ self.stream = NonblockLineReader(control_fd)
113
+ self.reporter = reporter
114
+ self.duration = duration
115
+ self.testout_fd = testout_fd
116
+ self.eof = False
117
+ self.in_progress = None
118
+ self.partial_results = collections.defaultdict(dict)
119
+ self.exit_code = None
120
+ self.reconnect = None
121
+ self.nameless_result_seen = False
122
+
123
+ def process(self):
124
+ """
125
+ Read from the control file descriptor and potentially perform any
126
+ appropriate action based on commands read from the test.
127
+
128
+ Returns True if there is more data expected, False otherwise
129
+ (when the control file descriptor reached EOF).
130
+ """
131
+ # if a parser operation is in progress, continue calling it,
132
+ # avoid reading a control line
133
+ if self.in_progress:
134
+ try:
135
+ next(self.in_progress)
136
+ return
137
+ except StopIteration:
138
+ # parser is done, continue on to a control line
139
+ self.in_progress = None
140
+
141
+ try:
142
+ line = self.stream.readline()
143
+ except BufferFullError as e:
144
+ raise BadControlError(str(e)) from None
145
+
146
+ util.debug(f"got control line: {line}")
147
+
148
+ if self.stream.eof:
149
+ self.eof = True
150
+ return
151
+ # partial read or BlockingIOError, try next time
152
+ if line is None:
153
+ return
154
+ elif len(line) == 0:
155
+ raise BadControlError(r"empty control line (just '\n')")
156
+
157
+ line = line.decode()
158
+ word, _, arg = line.partition(" ")
159
+
160
+ if word == "result":
161
+ parser = self._parser_result(arg)
162
+ elif word == "duration":
163
+ parser = self._parser_duration(arg)
164
+ elif word == "exitcode":
165
+ parser = self._parser_exitcode(arg)
166
+ elif word == "reconnect":
167
+ parser = self._parser_reconnect(arg)
168
+ else:
169
+ raise BadControlError(f"unknown control word: {word}")
170
+
171
+ try:
172
+ next(parser)
173
+ # parser not done parsing, run it next time we're called
174
+ self.in_progress = parser
175
+ except StopIteration:
176
+ pass
177
+
178
+ @classmethod
179
+ def _merge(cls, dst, src):
180
+ """
181
+ Merge a 'src' dict into 'dst', using the rules described by
182
+ TEST_CONTROL.md for 'Partial results'.
183
+ """
184
+ for key, value in src.items():
185
+ # delete existing if new value is None (JSON null)
186
+ if value is None and key in dst:
187
+ del dst[key]
188
+ continue
189
+ # add new key
190
+ elif key not in dst:
191
+ dst[key] = value
192
+ continue
193
+
194
+ orig_value = dst[key]
195
+ # different type - replace
196
+ if type(value) is not type(orig_value):
197
+ dst[key] = value
198
+ continue
199
+
200
+ # nested dict, merge it recursively
201
+ if isinstance(value, dict):
202
+ cls._merge(orig_value, value)
203
+ # extensible list-like iterable, extend it
204
+ elif isinstance(value, (tuple, list)):
205
+ orig_value += value
206
+ # overridable types, doesn't make sense to extend them
207
+ elif isinstance(value, (str, int, float, bool, bytes, bytearray)):
208
+ dst[key] = value
209
+ # set-like, needs unioning
210
+ elif isinstance(value, set):
211
+ orig_value.update(value)
212
+ else:
213
+ raise BadReportJSONError(f"cannot merge type {type(value)}")
214
+
215
+ def _parser_result(self, arg):
216
+ try:
217
+ json_length = int(arg)
218
+ except ValueError as e:
219
+ raise BadControlError(f"reading json length: {str(e)}") from None
220
+
221
+ # read the full JSON
222
+ json_data = bytearray()
223
+ while json_length > 0:
224
+ try:
225
+ chunk = os.read(self.control_fd, json_length)
226
+ except BlockingIOError:
227
+ yield
228
+ continue
229
+ if chunk == b"":
230
+ raise BadControlError("EOF when reading data")
231
+ json_data += chunk
232
+ json_length -= len(chunk)
233
+ yield
234
+
235
+ # convert to native python dict
236
+ try:
237
+ result = json.loads(json_data)
238
+ except json.decoder.JSONDecodeError as e:
239
+ raise BadReportJSONError(f"JSON decode: {str(e)} caused by: {json_data}") from None
240
+
241
+ # note that this may be None (result for the test itself)
242
+ name = result.get("name")
243
+ if not name:
244
+ self.nameless_result_seen = True
245
+
246
+ # upload files
247
+ for entry in result.get("files", ()):
248
+ file_name = entry.get("name")
249
+ file_length = entry.get("length")
250
+ if not file_name or file_length is None:
251
+ raise BadReportJSONError(f"file entry missing 'name' or 'length': {entry}")
252
+ try:
253
+ file_length = int(file_length)
254
+ except ValueError as e:
255
+ raise BadReportJSONError(f"file entry {file_name} length: {str(e)}") from None
256
+
257
+ with self.reporter.open_tmpfile() as fd:
258
+ while file_length > 0:
259
+ try:
260
+ # try a more universal sendfile first, fall back to splice
261
+ try:
262
+ written = os.sendfile(fd, self.control_fd, None, file_length)
263
+ except OSError as e:
264
+ if e.errno == 22: # EINVAL
265
+ written = os.splice(self.control_fd, fd, file_length)
266
+ else:
267
+ raise
268
+ except BlockingIOError:
269
+ yield
270
+ continue
271
+ if written == 0:
272
+ raise BadControlError("EOF when reading data")
273
+ file_length -= written
274
+ yield
275
+ try:
276
+ self.reporter.link_tmpfile_to(fd, file_name, name)
277
+ except FileExistsError:
278
+ raise BadReportJSONError(f"file '{file_name}' already exists") from None
279
+
280
+ # either store partial result + return,
281
+ # or load previous partial result and merge into it
282
+ partial = result.get("partial", False)
283
+ if partial:
284
+ # do not store the 'partial' key in the result
285
+ del result["partial"]
286
+ # note that nameless result will get None as dict key,
287
+ # which is perfectly fine
288
+ self._merge(self.partial_results[name], result)
289
+ # partial = do nothing
290
+ return
291
+
292
+ # if previously-stored partial result exist, merge the current one
293
+ # into it, but then use the merged result
294
+ # - avoid .get() or __getitem__() on defaultdict, it would create
295
+ # a new key with an empty value if there was no partial result
296
+ if name in self.partial_results:
297
+ partial_result = self.partial_results[name]
298
+ del self.partial_results[name]
299
+ self._merge(partial_result, result)
300
+ result = partial_result
301
+
302
+ if "testout" in result:
303
+ testout = result.get("testout")
304
+ if not testout:
305
+ raise BadReportJSONError("'testout' specified, but empty")
306
+ try:
307
+ self.reporter.link_tmpfile_to(self.testout_fd, testout, name)
308
+ except FileExistsError:
309
+ raise BadReportJSONError(f"file '{testout}' already exists") from None
310
+
311
+ self.reporter.report(result)
312
+
313
+ def _parser_duration(self, arg):
314
+ if not arg:
315
+ raise BadControlError("duration argument empty")
316
+ # increment/decrement
317
+ if arg[0] == "+":
318
+ self.duration.increment(arg[1:])
319
+ elif arg[0] == "-":
320
+ self.duration.decrement(arg[1:])
321
+ # save/restore
322
+ elif arg == "save":
323
+ self.duration.save()
324
+ elif arg == "restore":
325
+ self.duration.restore()
326
+ else:
327
+ self.duration.set(arg)
328
+ # pretend to be a generator
329
+ if False:
330
+ yield
331
+
332
+ def _parser_exitcode(self, arg):
333
+ if not arg:
334
+ raise BadControlError("exitcode argument empty")
335
+ try:
336
+ code = int(arg)
337
+ except ValueError:
338
+ raise BadControlError(f"'{arg}' is not an integer exit code") from None
339
+ self.exit_code = code
340
+ # pretend to be a generator
341
+ if False:
342
+ yield
343
+
344
+ def _parser_reconnect(self, arg):
345
+ if not arg:
346
+ self.reconnect = "once"
347
+ elif arg == "always":
348
+ self.reconnect = "always"
349
+ else:
350
+ raise BadControlError(f"unknown reconnect arg: {arg}")
351
+ # pretend to be a generator
352
+ if False:
353
+ yield
atex/fmf.py ADDED
@@ -0,0 +1,217 @@
1
+ import re
2
+ from pathlib import Path
3
+
4
+ # from system-wide sys.path
5
+ import fmf
6
+
7
+
8
+ def listlike(data, key):
9
+ """
10
+ Get a piece of fmf metadata as an iterable regardless of whether it was
11
+ defined as a dict or a list.
12
+
13
+ This is needed because many fmf metadata keys can be used either as
14
+ some_key: 123
15
+ or as lists via YAML syntax
16
+ some_key:
17
+ - 123
18
+ - 456
19
+ and, for simplicity, we want to always deal with lists (iterables).
20
+ """
21
+ if value := data.get(key):
22
+ return value if isinstance(value, list) else (value,)
23
+ else:
24
+ return ()
25
+
26
+
27
+ class FMFTests:
28
+ """
29
+ FMF test metadata parsed from on-disk metadata using a specific plan name,
30
+ with all metadata dictionaries for all nodes being adjusted by that plan
31
+ and (optionally) a specified context.
32
+ """
33
+ # TODO: usage example ^^^^
34
+
35
+ def __init__(self, fmf_tree, plan_name, context=None):
36
+ """
37
+ 'fmf_tree' is filesystem path somewhere inside fmf metadata tree,
38
+ or a root fmf.Tree instance.
39
+
40
+ 'plan_name' is fmf identifier (like /some/thing) of a tmt plan
41
+ to use for discovering tests.
42
+
43
+ 'context' is a dict like {'distro': 'rhel-9.6'} used for filtering
44
+ discovered tests.
45
+ """
46
+ # list of packages to install, as extracted from plan
47
+ self.prepare_pkgs = []
48
+ # list of scripts to run, as extracted from plan
49
+ self.prepare_scripts = []
50
+ # dict of environment, as extracted from plan
51
+ self.plan_env = {}
52
+ # dict indexed by test name, value is dict with fmf-parsed metadata
53
+ self.tests = {}
54
+ # dict indexed by test name, value is pathlib.Path of relative path
55
+ # of the fmf metadata root towards the test metadata location
56
+ self.test_dirs = {}
57
+ # fmf.Context instance, as used for test discovery
58
+ self.context = fmf.Context(**context) if context else fmf.Context()
59
+
60
+ tree = fmf_tree.copy() if isinstance(fmf_tree, fmf.Tree) else fmf.Tree(fmf_tree)
61
+ tree.adjust(context=self.context)
62
+
63
+ # Path of the metadata root
64
+ self.root = Path(tree.root)
65
+
66
+ # lookup the plan first
67
+ plan = tree.find(plan_name)
68
+ if not plan:
69
+ raise ValueError(f"plan {plan_name} not found in {tree.root}")
70
+ if "test" in plan.data:
71
+ raise ValueError(f"plan {plan_name} appears to be a test")
72
+
73
+ # gather and merge plan-defined environment variables
74
+ #
75
+ # environment:
76
+ # - FOO: BAR
77
+ # BAR: BAZ
78
+ for entry in listlike(plan.data, "environment"):
79
+ self.plan_env.update(entry)
80
+
81
+ # gather all prepare scripts / packages
82
+ #
83
+ # prepare:
84
+ # - how: install
85
+ # package:
86
+ # - some-rpm-name
87
+ # - how: shell
88
+ # script:
89
+ # - some-command
90
+ for entry in listlike(plan.data, "prepare"):
91
+ if "how" not in entry:
92
+ continue
93
+ if entry["how"] == "install":
94
+ self.prepare_pkgs += listlike(entry, "package")
95
+ elif entry["how"] == "shell":
96
+ self.prepare_scripts += listlike(entry, "script")
97
+
98
+ # gather all tests selected by the plan
99
+ #
100
+ # discover:
101
+ # - how: fmf
102
+ # filter:
103
+ # - tag:some_tag
104
+ # test:
105
+ # - some-test-regex
106
+ # exclude:
107
+ # - some-test-regex
108
+ if "discover" in plan.data:
109
+ discover = plan.data["discover"]
110
+ if not isinstance(discover, list):
111
+ discover = (discover,)
112
+
113
+ for entry in discover:
114
+ if entry.get("how") != "fmf":
115
+ continue
116
+
117
+ filtering = {}
118
+ for meta_name in ("filter", "test", "exclude"):
119
+ if value := listlike(entry, meta_name):
120
+ filtering[meta_name] = value
121
+
122
+ children = tree.prune(
123
+ names=filtering.get("test"),
124
+ filters=filtering.get("filter"),
125
+ )
126
+ for child in children:
127
+ # excludes not supported by .prune(), we have to do it here
128
+ excludes = filtering.get("exclude")
129
+ if excludes and any(re.match(x, child.name) for x in excludes):
130
+ continue
131
+ # only enabled tests
132
+ if "enabled" in child.data and not child.data["enabled"]:
133
+ continue
134
+ # no manual tests and no stories
135
+ if child.data.get("manual") or child.data.get("story"):
136
+ continue
137
+ # after adjusting above, any adjusts are useless, free some space
138
+ if "adjust" in child.data:
139
+ del child.data["adjust"]
140
+
141
+ self.tests[child.name] = child.data
142
+ # child.sources ie. ['/abs/path/to/some.fmf', '/abs/path/to/some/node.fmf']
143
+ self.test_dirs[child.name] = \
144
+ Path(child.sources[-1]).parent.relative_to(self.root)
145
+
146
+ def match(self, regex):
147
+ """
148
+ Yield test names that match 'regex', simulating how tmt discovers tests.
149
+ """
150
+ yield from (name for name in self.tests if re.match(regex, name))
151
+
152
+
153
+ def test_pkg_requires(data, key="require"):
154
+ """
155
+ Yield RPM package names specified by test 'data' (fmf metadata dict)
156
+ in the metadata 'key' (require or recommend), ignoring any non-RPM-package
157
+ requires/recommends.
158
+ """
159
+ for entry in listlike(data, key):
160
+ # skip type:library and type:path
161
+ if not isinstance(entry, str):
162
+ continue
163
+ # skip "fake RPMs" that begin with 'library('
164
+ if entry.startswith("library("):
165
+ continue
166
+ yield entry
167
+
168
+
169
+ def all_pkg_requires(fmf_tests, key="require"):
170
+ """
171
+ Yield RPM package names from the plan and all tests discovered by
172
+ a class FMFTests instance 'fmf_tests', ignoring any non-RPM-package
173
+ requires/recommends.
174
+ """
175
+ # use a set to avoid duplicates
176
+ pkgs = set()
177
+ pkgs.update(fmf_tests.prepare_pkgs)
178
+ for data in fmf_tests.tests.values():
179
+ pkgs.update(test_pkg_requires(data, key))
180
+ yield from pkgs
181
+
182
+
183
+ # Some extra notes for fmf.prune() arguments:
184
+ #
185
+ # Set 'names' to filter by a list of fmf node names, ie.
186
+ # ['/some/test', '/another/test']
187
+ #
188
+ # Set 'filters' to filter by a list of fmf-style filter expressions, see
189
+ # https://fmf.readthedocs.io/en/stable/modules.html#fmf.filter
190
+ #
191
+ # Set 'conditions' to filter by a list of python expressions whose namespace
192
+ # locals() are set up to be a dictionary of the tree. When any of the
193
+ # expressions returns True, the tree is returned, ie.
194
+ # ['environment["FOO"] == "BAR"']
195
+ # ['"enabled" not in locals() or enabled']
196
+ # Note that KeyError is silently ignored and treated as False.
197
+ #
198
+ # Set 'context' to a dictionary to post-process the tree metadata with
199
+ # adjust expressions (that may be present in a tree) using the specified
200
+ # context. Any other filters are applied afterwards to allow modification
201
+ # of tree metadata by the adjust expressions. Ie.
202
+ # {'distro': 'rhel-9.6.0', 'arch': 'x86_64'}
203
+
204
+ #Platform = collections.namedtuple("Platform", ["distro", "arch"])
205
+ #
206
+ #
207
+ #def combine_platforms(fmf_path, plan_name, platforms):
208
+ # # TODO: document
209
+ # fmf_tests = {}
210
+ # tree = fmf.Tree(fmf_path)
211
+ # for platform in platforms:
212
+ # context = {"distro": platform.distro, "arch": platform.arch}
213
+ # fmf_tests[platform] = FMFTests(tree, plan_name, context=context)
214
+ # return fmf_tests
215
+
216
+ # TODO: in Orchestrator, when a Provisioner becomes free, have it pick a test
217
+ # from the appropriate tests[platform] per the Provisioner's platform
@@ -0,0 +1,2 @@
1
+ from .aggregator import CSVAggregator # noqa: F401
2
+ from .orchestrator import Orchestrator # noqa: F401
@@ -0,0 +1,106 @@
1
+ import csv
2
+ import gzip
3
+ import json
4
+ import shutil
5
+ import threading
6
+ from pathlib import Path
7
+
8
+
9
+ class CSVAggregator:
10
+ """
11
+ Collects reported results as a GZIP-ed CSV and files (logs) from multiple
12
+ test runs under a shared directory.
13
+ """
14
+
15
+ class _ExcelWithUnixNewline(csv.excel):
16
+ lineterminator = "\n"
17
+
18
+ def __init__(self, csv_file, storage_dir):
19
+ """
20
+ 'csv_file' is a string/Path to a .csv.gz file with aggregated results.
21
+
22
+ 'storage_dir' is a string/Path of the top-level parent for all
23
+ per-platform / per-test files uploaded by tests.
24
+ """
25
+ self.lock = threading.RLock()
26
+ self.storage_dir = Path(storage_dir)
27
+ self.csv_file = Path(csv_file)
28
+ self.csv_writer = None
29
+ self.results_gzip_handle = None
30
+
31
+ def open(self):
32
+ if self.csv_file.exists():
33
+ raise FileExistsError(f"{self.csv_file} already exists")
34
+ f = gzip.open(self.csv_file, "wt", newline="")
35
+ try:
36
+ self.csv_writer = csv.writer(f, dialect=self._ExcelWithUnixNewline)
37
+ except:
38
+ f.close()
39
+ raise
40
+ self.results_gzip_handle = f
41
+
42
+ if self.storage_dir.exists():
43
+ raise FileExistsError(f"{self.storage_dir} already exists")
44
+ self.storage_dir.mkdir()
45
+
46
+ def close(self):
47
+ self.results_gzip_handle.close()
48
+ self.results_gzip_handle = None
49
+ self.csv_writer = None
50
+
51
+ def __enter__(self):
52
+ self.open()
53
+ return self
54
+
55
+ def __exit__(self, exc_type, exc_value, traceback):
56
+ self.close()
57
+
58
+ def ingest(self, platform, test_name, json_file, files_dir):
59
+ """
60
+ Process 'json_file' (string/Path) for reported results and append them
61
+ to the overall aggregated CSV file, recursively copying over the dir
62
+ structure under 'files_dir' (string/Path) under the respective platform
63
+ and test name in the aggregated files storage dir.
64
+ """
65
+ # parse the JSON separately, before writing any CSV lines, to ensure
66
+ # that either all results from the test are ingested, or none at all
67
+ # (if one of the lines contains JSON errors)
68
+ csv_lines = []
69
+ with open(json_file) as json_fobj:
70
+ for raw_line in json_fobj:
71
+ result_line = json.loads(raw_line)
72
+
73
+ result_name = result_line.get("name")
74
+ if result_name:
75
+ # sub-result; prefix test name
76
+ result_name = f"{test_name}/{result_name}"
77
+ else:
78
+ # result for test itself; use test name
79
+ result_name = test_name
80
+
81
+ file_names = []
82
+ if "testout" in result_line:
83
+ file_names.append(result_line["testout"])
84
+ if "files" in result_line:
85
+ file_names += (f["name"] for f in result_line["files"])
86
+
87
+ csv_lines.append((
88
+ platform,
89
+ result_line["status"],
90
+ result_name,
91
+ result_line.get("note", ""),
92
+ *file_names,
93
+ ))
94
+
95
+ with self.lock:
96
+ self.csv_writer.writerows(csv_lines)
97
+ self.results_gzip_handle.flush()
98
+
99
+ Path(json_file).unlink()
100
+
101
+ platform_dir = self.storage_dir / platform
102
+ platform_dir.mkdir(exist_ok=True)
103
+ test_dir = platform_dir / test_name.lstrip("/")
104
+ if test_dir.exists():
105
+ raise FileExistsError(f"{test_dir} already exists for {test_name}")
106
+ shutil.move(files_dir, test_dir, copy_function=shutil.copy)