atex 0.8__py3-none-any.whl → 0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -95,7 +95,7 @@ class TestControl:
95
95
  processing test-issued commands, results and uploaded files.
96
96
  """
97
97
 
98
- def __init__(self, *, control_fd, reporter, duration, testout_fd):
98
+ def __init__(self, *, reporter, duration, control_fd=None):
99
99
  """
100
100
  'control_fd' is a non-blocking file descriptor to be read.
101
101
 
@@ -103,16 +103,15 @@ class TestControl:
103
103
  and uploaded files will be written to.
104
104
 
105
105
  'duration' is a class Duration instance.
106
-
107
- 'testout_fd' is an optional file descriptor handle which the test uses
108
- to write its output to - useful here for the 'result' control word and
109
- its protocol, which allows "hardlinking" the fd to a real file name.
110
106
  """
111
- self.control_fd = control_fd
112
- self.stream = NonblockLineReader(control_fd)
113
107
  self.reporter = reporter
114
108
  self.duration = duration
115
- self.testout_fd = testout_fd
109
+ if control_fd:
110
+ self.control_fd = control_fd
111
+ self.stream = NonblockLineReader(control_fd)
112
+ else:
113
+ self.control_fd = None
114
+ self.stream = None
116
115
  self.eof = False
117
116
  self.in_progress = None
118
117
  self.partial_results = collections.defaultdict(dict)
@@ -120,6 +119,20 @@ class TestControl:
120
119
  self.reconnect = None
121
120
  self.nameless_result_seen = False
122
121
 
122
+ def reassign(self, new_fd):
123
+ """
124
+ Assign a new control file descriptor to read test control from,
125
+ replacing a previous one. Useful on test reconnect.
126
+ """
127
+ err = "tried to assign new control fd while"
128
+ if self.in_progress:
129
+ raise BadControlError(f"{err} old one is reading non-control binary data")
130
+ elif self.stream and self.stream.bytes_read != 0:
131
+ raise BadControlError(f"{err} old one is in the middle of reading a control line")
132
+ self.eof = False
133
+ self.control_fd = new_fd
134
+ self.stream = NonblockLineReader(new_fd)
135
+
123
136
  def process(self):
124
137
  """
125
138
  Read from the control file descriptor and potentially perform any
@@ -143,7 +156,7 @@ class TestControl:
143
156
  except BufferFullError as e:
144
157
  raise BadControlError(str(e)) from None
145
158
 
146
- util.debug(f"got control line: {line}")
159
+ util.debug(f"got control line: {line} // eof: {self.stream.eof}")
147
160
 
148
161
  if self.stream.eof:
149
162
  self.eof = True
@@ -254,28 +267,28 @@ class TestControl:
254
267
  except ValueError as e:
255
268
  raise BadReportJSONError(f"file entry {file_name} length: {str(e)}") from None
256
269
 
257
- with self.reporter.open_tmpfile() as fd:
258
- while file_length > 0:
259
- try:
260
- # try a more universal sendfile first, fall back to splice
270
+ try:
271
+ with self.reporter.open_file(file_name, name) as f:
272
+ fd = f.fileno()
273
+ while file_length > 0:
261
274
  try:
262
- written = os.sendfile(fd, self.control_fd, None, file_length)
263
- except OSError as e:
264
- if e.errno == 22: # EINVAL
265
- written = os.splice(self.control_fd, fd, file_length)
266
- else:
267
- raise
268
- except BlockingIOError:
275
+ # try a more universal sendfile first, fall back to splice
276
+ try:
277
+ written = os.sendfile(fd, self.control_fd, None, file_length)
278
+ except OSError as e:
279
+ if e.errno == 22: # EINVAL
280
+ written = os.splice(self.control_fd, fd, file_length)
281
+ else:
282
+ raise
283
+ except BlockingIOError:
284
+ yield
285
+ continue
286
+ if written == 0:
287
+ raise BadControlError("EOF when reading data")
288
+ file_length -= written
269
289
  yield
270
- continue
271
- if written == 0:
272
- raise BadControlError("EOF when reading data")
273
- file_length -= written
274
- yield
275
- try:
276
- self.reporter.link_tmpfile_to(fd, file_name, name)
277
- except FileExistsError:
278
- raise BadReportJSONError(f"file '{file_name}' already exists") from None
290
+ except FileExistsError:
291
+ raise BadReportJSONError(f"file '{file_name}' already exists") from None
279
292
 
280
293
  # either store partial result + return,
281
294
  # or load previous partial result and merge into it
@@ -304,7 +317,7 @@ class TestControl:
304
317
  if not testout:
305
318
  raise BadReportJSONError("'testout' specified, but empty")
306
319
  try:
307
- self.reporter.link_tmpfile_to(self.testout_fd, testout, name)
320
+ self.reporter.link_testout(testout, name)
308
321
  except FileExistsError:
309
322
  raise BadReportJSONError(f"file '{testout}' already exists") from None
310
323
 
atex/fmf.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import re
2
+ import collections
2
3
  from pathlib import Path
3
4
 
4
5
  # from system-wide sys.path
@@ -32,21 +33,44 @@ class FMFTests:
32
33
  """
33
34
  # TODO: usage example ^^^^
34
35
 
35
- def __init__(self, fmf_tree, plan_name, context=None):
36
+ def __init__(
37
+ self, fmf_tree, plan_name=None, *,
38
+ names=None, filters=None, conditions=None, excludes=None,
39
+ context=None,
40
+ ):
36
41
  """
37
42
  'fmf_tree' is filesystem path somewhere inside fmf metadata tree,
38
43
  or a root fmf.Tree instance.
39
44
 
40
45
  'plan_name' is fmf identifier (like /some/thing) of a tmt plan
41
- to use for discovering tests.
46
+ to use for discovering tests. If None, a dummy (empty) plan is used.
42
47
 
43
- 'context' is a dict like {'distro': 'rhel-9.6'} used for filtering
44
- discovered tests.
48
+ 'names', 'filters', 'conditions' and 'exclude' (all tuple/list)
49
+ are fmf tree filters (resolved by the fmf module), overriding any
50
+ existing tree filters in the plan's discover phase specifies, where:
51
+
52
+ 'names' are test regexes like ["/some/test", "/another/test"]
53
+
54
+ 'filters' are fmf-style filter expressions, as documented on
55
+ https://fmf.readthedocs.io/en/stable/modules.html#fmf.filter
56
+
57
+ 'conditions' are python expressions whose namespace locals()
58
+ are set up to be a dictionary of the fmf tree. When any of the
59
+ expressions returns True, the tree is returned, ie.
60
+ ["environment['FOO'] == 'BAR'"]
61
+ ["'enabled' not in locals() or enabled"]
62
+ Note that KeyError is silently ignored and treated as False.
63
+
64
+ 'excludes' are test regexes to exclude, format same as 'names'
65
+
66
+ 'context' is a dict like {'distro': 'rhel-9.6'} used for additional
67
+ adjustment of the discovered fmf metadata.
45
68
  """
46
69
  # list of packages to install, as extracted from plan
47
70
  self.prepare_pkgs = []
48
71
  # list of scripts to run, as extracted from plan
49
72
  self.prepare_scripts = []
73
+ self.finish_scripts = []
50
74
  # dict of environment, as extracted from plan
51
75
  self.plan_env = {}
52
76
  # dict indexed by test name, value is dict with fmf-parsed metadata
@@ -54,21 +78,28 @@ class FMFTests:
54
78
  # dict indexed by test name, value is pathlib.Path of relative path
55
79
  # of the fmf metadata root towards the test metadata location
56
80
  self.test_dirs = {}
57
- # fmf.Context instance, as used for test discovery
58
- self.context = fmf.Context(**context) if context else fmf.Context()
59
81
 
82
+ # fmf.Context instance, as used for test discovery
83
+ context = fmf.Context(**context) if context else fmf.Context()
84
+ # allow the user to pass fmf.Tree directly, greatly speeding up the
85
+ # instantiation of multiple FMFTests instances
60
86
  tree = fmf_tree.copy() if isinstance(fmf_tree, fmf.Tree) else fmf.Tree(fmf_tree)
61
- tree.adjust(context=self.context)
87
+ tree.adjust(context=context)
62
88
 
63
89
  # Path of the metadata root
64
90
  self.root = Path(tree.root)
65
91
 
66
92
  # lookup the plan first
67
- plan = tree.find(plan_name)
68
- if not plan:
69
- raise ValueError(f"plan {plan_name} not found in {tree.root}")
70
- if "test" in plan.data:
71
- raise ValueError(f"plan {plan_name} appears to be a test")
93
+ if plan_name:
94
+ plan = tree.find(plan_name)
95
+ if not plan:
96
+ raise ValueError(f"plan {plan_name} not found in {tree.root}")
97
+ if "test" in plan.data:
98
+ raise ValueError(f"plan {plan_name} appears to be a test")
99
+ # fall back to a dummy plan
100
+ else:
101
+ class plan: # noqa: N801
102
+ data = {}
72
103
 
73
104
  # gather and merge plan-defined environment variables
74
105
  #
@@ -88,13 +119,16 @@ class FMFTests:
88
119
  # script:
89
120
  # - some-command
90
121
  for entry in listlike(plan.data, "prepare"):
91
- if "how" not in entry:
92
- continue
93
- if entry["how"] == "install":
122
+ if entry.get("how") == "install":
94
123
  self.prepare_pkgs += listlike(entry, "package")
95
- elif entry["how"] == "shell":
124
+ elif entry.get("how") == "shell":
96
125
  self.prepare_scripts += listlike(entry, "script")
97
126
 
127
+ # gather all finish scripts, same as prepare scripts
128
+ for entry in listlike(plan.data, "finish"):
129
+ if entry.get("how") == "shell":
130
+ self.finish_scripts += listlike(entry, "script")
131
+
98
132
  # gather all tests selected by the plan
99
133
  #
100
134
  # discover:
@@ -105,49 +139,50 @@ class FMFTests:
105
139
  # - some-test-regex
106
140
  # exclude:
107
141
  # - some-test-regex
108
- if "discover" in plan.data:
109
- discover = plan.data["discover"]
110
- if not isinstance(discover, list):
111
- discover = (discover,)
112
-
113
- for entry in discover:
114
- if entry.get("how") != "fmf":
115
- continue
116
-
117
- filtering = {}
118
- for meta_name in ("filter", "test", "exclude"):
119
- if value := listlike(entry, meta_name):
120
- filtering[meta_name] = value
121
-
122
- children = tree.prune(
123
- names=filtering.get("test"),
124
- filters=filtering.get("filter"),
125
- )
126
- for child in children:
127
- # excludes not supported by .prune(), we have to do it here
128
- excludes = filtering.get("exclude")
129
- if excludes and any(re.match(x, child.name) for x in excludes):
130
- continue
131
- # only enabled tests
132
- if "enabled" in child.data and not child.data["enabled"]:
133
- continue
134
- # no manual tests and no stories
135
- if child.data.get("manual") or child.data.get("story"):
136
- continue
137
- # after adjusting above, any adjusts are useless, free some space
138
- if "adjust" in child.data:
139
- del child.data["adjust"]
140
-
141
- self.tests[child.name] = child.data
142
- # child.sources ie. ['/abs/path/to/some.fmf', '/abs/path/to/some/node.fmf']
143
- self.test_dirs[child.name] = \
144
- Path(child.sources[-1]).parent.relative_to(self.root)
145
-
146
- def match(self, regex):
147
- """
148
- Yield test names that match 'regex', simulating how tmt discovers tests.
149
- """
150
- yield from (name for name in self.tests if re.match(regex, name))
142
+ plan_filters = collections.defaultdict(list)
143
+ for entry in listlike(plan.data, "discover"):
144
+ if entry.get("how") != "fmf":
145
+ continue
146
+ for meta_name in ("filter", "test", "exclude"):
147
+ if value := listlike(entry, meta_name):
148
+ plan_filters[meta_name] += value
149
+
150
+ prune_kwargs = {}
151
+ if names:
152
+ prune_kwargs["names"] = names
153
+ elif "test" in plan_filters:
154
+ prune_kwargs["names"] = plan_filters["test"]
155
+ if filters:
156
+ prune_kwargs["filters"] = filters
157
+ elif "filter" in plan_filters:
158
+ prune_kwargs["filters"] = plan_filters["filter"]
159
+ if conditions:
160
+ prune_kwargs["conditions"] = conditions
161
+ if not excludes:
162
+ excludes = plan_filters.get("exclude")
163
+
164
+ # actually discover the tests
165
+ for child in tree.prune(**prune_kwargs):
166
+ # excludes not supported by .prune(), we have to do it here
167
+ if excludes and any(re.match(x, child.name) for x in excludes):
168
+ continue
169
+ # only tests
170
+ if "test" not in child.data:
171
+ continue
172
+ # only enabled tests
173
+ if "enabled" in child.data and not child.data["enabled"]:
174
+ continue
175
+ # no manual tests and no stories
176
+ if child.data.get("manual") or child.data.get("story"):
177
+ continue
178
+ # after adjusting above, any adjusts are useless, free some space
179
+ if "adjust" in child.data:
180
+ del child.data["adjust"]
181
+
182
+ self.tests[child.name] = child.data
183
+ # child.sources ie. ['/abs/path/to/some.fmf', '/abs/path/to/some/node.fmf']
184
+ self.test_dirs[child.name] = \
185
+ Path(child.sources[-1]).parent.relative_to(self.root)
151
186
 
152
187
 
153
188
  def test_pkg_requires(data, key="require"):
@@ -200,18 +235,3 @@ def all_pkg_requires(fmf_tests, key="require"):
200
235
  # context. Any other filters are applied afterwards to allow modification
201
236
  # of tree metadata by the adjust expressions. Ie.
202
237
  # {'distro': 'rhel-9.6.0', 'arch': 'x86_64'}
203
-
204
- #Platform = collections.namedtuple("Platform", ["distro", "arch"])
205
- #
206
- #
207
- #def combine_platforms(fmf_path, plan_name, platforms):
208
- # # TODO: document
209
- # fmf_tests = {}
210
- # tree = fmf.Tree(fmf_path)
211
- # for platform in platforms:
212
- # context = {"distro": platform.distro, "arch": platform.arch}
213
- # fmf_tests[platform] = FMFTests(tree, plan_name, context=context)
214
- # return fmf_tests
215
-
216
- # TODO: in Orchestrator, when a Provisioner becomes free, have it pick a test
217
- # from the appropriate tests[platform] per the Provisioner's platform
@@ -1,2 +1,3 @@
1
- from .aggregator import CSVAggregator # noqa: F401
2
- from .orchestrator import Orchestrator # noqa: F401
1
+ #from .aggregator import CSVAggregator, JSONAggregator # noqa: F401
2
+ from .aggregator import JSONAggregator # noqa: F401
3
+ from .orchestrator import Orchestrator, OrchestratorError, FailedSetupError # noqa: F401
@@ -1,4 +1,3 @@
1
- import csv
2
1
  import gzip
3
2
  import json
4
3
  import shutil
@@ -6,77 +5,82 @@ import threading
6
5
  from pathlib import Path
7
6
 
8
7
 
9
- class CSVAggregator:
10
- """
11
- Collects reported results as a GZIP-ed CSV and files (logs) from multiple
12
- test runs under a shared directory.
8
+ class JSONAggregator:
13
9
  """
10
+ Collects reported results as a GZIP-ed line-JSON and files (logs) from
11
+ multiple test runs under a shared directory.
12
+
13
+ Note that the aggregated JSON file *does not* use the test-based JSON format
14
+ described by executor/RESULTS.md - both use JSON, but are very different.
15
+
16
+ This aggergated format uses a top-level array (on each line) with a fixed
17
+ field order:
14
18
 
15
- class _ExcelWithUnixNewline(csv.excel):
16
- lineterminator = "\n"
19
+ platform, status, test name, subtest name, files, note
20
+
21
+ All these are strings except 'files', which is another (nested) array
22
+ of strings.
23
+
24
+ If a field is missing in the source result, it is translated to a null
25
+ value.
26
+ """
17
27
 
18
- def __init__(self, csv_file, storage_dir):
28
+ def __init__(self, json_file, storage_dir):
19
29
  """
20
- 'csv_file' is a string/Path to a .csv.gz file with aggregated results.
30
+ 'json_file' is a string/Path to a .json.gz file with aggregated results.
21
31
 
22
32
  'storage_dir' is a string/Path of the top-level parent for all
23
33
  per-platform / per-test files uploaded by tests.
24
34
  """
25
35
  self.lock = threading.RLock()
26
36
  self.storage_dir = Path(storage_dir)
27
- self.csv_file = Path(csv_file)
28
- self.csv_writer = None
29
- self.results_gzip_handle = None
37
+ self.json_file = Path(json_file)
38
+ self.json_gzip_fobj = None
30
39
 
31
40
  def open(self):
32
- if self.csv_file.exists():
33
- raise FileExistsError(f"{self.csv_file} already exists")
34
- f = gzip.open(self.csv_file, "wt", newline="")
35
- try:
36
- self.csv_writer = csv.writer(f, dialect=self._ExcelWithUnixNewline)
37
- except:
38
- f.close()
39
- raise
40
- self.results_gzip_handle = f
41
+ if self.json_file.exists():
42
+ raise FileExistsError(f"{self.json_file} already exists")
43
+ self.json_gzip_fobj = gzip.open(self.json_file, "wt", newline="\n")
41
44
 
42
45
  if self.storage_dir.exists():
43
46
  raise FileExistsError(f"{self.storage_dir} already exists")
44
47
  self.storage_dir.mkdir()
45
48
 
46
49
  def close(self):
47
- self.results_gzip_handle.close()
48
- self.results_gzip_handle = None
49
- self.csv_writer = None
50
+ if self.json_gzip_fobj:
51
+ self.json_gzip_fobj.close()
52
+ self.json_gzip_fobj = None
50
53
 
51
54
  def __enter__(self):
52
- self.open()
53
- return self
55
+ try:
56
+ self.open()
57
+ return self
58
+ except Exception:
59
+ self.close()
60
+ raise
54
61
 
55
62
  def __exit__(self, exc_type, exc_value, traceback):
56
63
  self.close()
57
64
 
58
- def ingest(self, platform, test_name, json_file, files_dir):
65
+ def ingest(self, platform, test_name, results_file, files_dir):
59
66
  """
60
- Process 'json_file' (string/Path) for reported results and append them
61
- to the overall aggregated CSV file, recursively copying over the dir
62
- structure under 'files_dir' (string/Path) under the respective platform
63
- and test name in the aggregated files storage dir.
67
+ Process 'results_file' (string/Path) for reported results and append
68
+ them to the overall aggregated line-JSON file, recursively copying over
69
+ the dir structure under 'files_dir' (string/Path) under the respective
70
+ platform and test name in the aggregated storage dir.
64
71
  """
65
- # parse the JSON separately, before writing any CSV lines, to ensure
66
- # that either all results from the test are ingested, or none at all
67
- # (if one of the lines contains JSON errors)
68
- csv_lines = []
69
- with open(json_file) as json_fobj:
70
- for raw_line in json_fobj:
71
- result_line = json.loads(raw_line)
72
+ platform_dir = self.storage_dir / platform
73
+ test_dir = platform_dir / test_name.lstrip("/")
74
+ if test_dir.exists():
75
+ raise FileExistsError(f"{test_dir} already exists for {test_name}")
72
76
 
73
- result_name = result_line.get("name")
74
- if result_name:
75
- # sub-result; prefix test name
76
- result_name = f"{test_name}/{result_name}"
77
- else:
78
- # result for test itself; use test name
79
- result_name = test_name
77
+ # parse the results separately, before writing any aggregated output,
78
+ # to ensure that either all results from the test are ingested, or none
79
+ # at all (ie. if one of the result lines contains JSON errors)
80
+ output_lines = []
81
+ with open(results_file) as results_fobj:
82
+ for raw_line in results_fobj:
83
+ result_line = json.loads(raw_line)
80
84
 
81
85
  file_names = []
82
86
  if "testout" in result_line:
@@ -84,23 +88,24 @@ class CSVAggregator:
84
88
  if "files" in result_line:
85
89
  file_names += (f["name"] for f in result_line["files"])
86
90
 
87
- csv_lines.append((
91
+ output_line = (
88
92
  platform,
89
93
  result_line["status"],
90
- result_name,
91
- result_line.get("note", ""),
92
- *file_names,
93
- ))
94
+ test_name,
95
+ result_line.get("name"),
96
+ file_names,
97
+ result_line.get("note"),
98
+ )
99
+ encoded = json.dumps(output_line, indent=None)
100
+ output_lines.append(encoded)
101
+
102
+ output_str = "\n".join(output_lines) + "\n"
94
103
 
95
104
  with self.lock:
96
- self.csv_writer.writerows(csv_lines)
97
- self.results_gzip_handle.flush()
105
+ self.json_gzip_fobj.write(output_str)
106
+ self.json_gzip_fobj.flush()
98
107
 
99
- Path(json_file).unlink()
108
+ Path(results_file).unlink()
100
109
 
101
- platform_dir = self.storage_dir / platform
102
110
  platform_dir.mkdir(exist_ok=True)
103
- test_dir = platform_dir / test_name.lstrip("/")
104
- if test_dir.exists():
105
- raise FileExistsError(f"{test_dir} already exists for {test_name}")
106
- shutil.move(files_dir, test_dir, copy_function=shutil.copy)
111
+ shutil.move(files_dir, test_dir)