atex 0.10__py3-none-any.whl → 0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,12 +7,14 @@ class Aggregator:
7
7
  TODO: generic description, not JSON-specific
8
8
  """
9
9
 
10
- def ingest(self, platform, test_name, results_file, files_dir):
10
+ def ingest(self, platform, test_name, test_results, test_files):
11
11
  """
12
- Process 'results_file' (string/Path) for reported results and append
13
- them to the overall aggregated line-JSON file, recursively copying over
14
- the dir structure under 'files_dir' (string/Path) under the respective
15
- platform and test name in the aggregated storage dir.
12
+ Process 'test_results' (string/Path) for as results reported by a test
13
+ ran by Executor, along with 'test_files' as files uploaded by that test,
14
+ aggregating them under 'platform' (string) as 'test_name' (string).
15
+
16
+ This is DESTRUCTIVE, the input results/files are consumed in the
17
+ process.
16
18
  """
17
19
  raise NotImplementedError(f"'ingest' not implemented for {self.__class__.__name__}")
18
20
 
@@ -34,7 +36,7 @@ class Aggregator:
34
36
  self.start()
35
37
  return self
36
38
  except Exception:
37
- self.close()
39
+ self.stop()
38
40
  raise
39
41
 
40
42
  def __exit__(self, exc_type, exc_value, traceback):
atex/aggregator/json.py CHANGED
@@ -1,4 +1,6 @@
1
+ import abc
1
2
  import gzip
3
+ import lzma
2
4
  import json
3
5
  import shutil
4
6
  import threading
@@ -7,10 +9,16 @@ from pathlib import Path
7
9
  from . import Aggregator
8
10
 
9
11
 
12
+ def _verbatim_move(src, dst):
13
+ def copy_without_symlinks(src, dst):
14
+ return shutil.copy2(src, dst, follow_symlinks=False)
15
+ shutil.move(src, dst, copy_function=copy_without_symlinks)
16
+
17
+
10
18
  class JSONAggregator(Aggregator):
11
19
  """
12
- Collects reported results as a GZIP-ed line-JSON and files (logs) from
13
- multiple test runs under a shared directory.
20
+ Collects reported results in a line-JSON output file and uploaded files
21
+ (logs) from multiple test runs under a shared directory.
14
22
 
15
23
  Note that the aggregated JSON file *does not* use the test-based JSON format
16
24
  described by executor/RESULTS.md - both use JSON, but are very different.
@@ -23,74 +31,249 @@ class JSONAggregator(Aggregator):
23
31
  All these are strings except 'files', which is another (nested) array
24
32
  of strings.
25
33
 
34
+ If 'testout' is present in an input test result, it is prepended to
35
+ the list of 'files'.
26
36
  If a field is missing in the source result, it is translated to a null
27
37
  value.
28
38
  """
29
39
 
30
- def __init__(self, json_file, storage_dir):
40
+ def __init__(self, target, files):
31
41
  """
32
- 'json_file' is a string/Path to a .json.gz file with aggregated results.
42
+ 'target' is a string/Path to a .json file for all ingested
43
+ results to be aggregated (written) to.
33
44
 
34
- 'storage_dir' is a string/Path of the top-level parent for all
45
+ 'files' is a string/Path of the top-level parent for all
35
46
  per-platform / per-test files uploaded by tests.
36
47
  """
37
48
  self.lock = threading.RLock()
38
- self.storage_dir = Path(storage_dir)
39
- self.json_file = Path(json_file)
40
- self.json_gzip_fobj = None
49
+ self.target = Path(target)
50
+ self.files = Path(files)
51
+ self.target_fobj = None
41
52
 
42
53
  def start(self):
43
- if self.json_file.exists():
44
- raise FileExistsError(f"{self.json_file} already exists")
45
- self.json_gzip_fobj = gzip.open(self.json_file, "wt", newline="\n")
54
+ if self.target.exists():
55
+ raise FileExistsError(f"{self.target} already exists")
56
+ self.target_fobj = open(self.target, "w")
46
57
 
47
- if self.storage_dir.exists():
48
- raise FileExistsError(f"{self.storage_dir} already exists")
49
- self.storage_dir.mkdir()
58
+ if self.files.exists():
59
+ raise FileExistsError(f"{self.files} already exists")
60
+ self.files.mkdir()
50
61
 
51
62
  def stop(self):
52
- if self.json_gzip_fobj:
53
- self.json_gzip_fobj.close()
54
- self.json_gzip_fobj = None
63
+ if self.target_fobj:
64
+ self.target_fobj.close()
65
+ self.target_fobj = None
66
+
67
+ def _get_test_files_path(self, platform, test_name):
68
+ """
69
+ Return a directory path to where uploaded files should be stored
70
+ for a particular 'platform' and 'test_name'.
71
+ """
72
+ platform_files = self.files / platform
73
+ platform_files.mkdir(exist_ok=True)
74
+ test_files = platform_files / test_name.lstrip("/")
75
+ return test_files
76
+
77
+ @staticmethod
78
+ def _modify_file_list(test_files):
79
+ return test_files
80
+
81
+ @staticmethod
82
+ def _move_test_files(test_files, target_dir):
83
+ """
84
+ Move (or otherwise process) 'test_files' as directory of files uploaded
85
+ by the test, into the pre-computed 'target_dir' location (inside
86
+ a hierarchy of all files from all tests).
87
+ """
88
+ _verbatim_move(test_files, target_dir)
89
+
90
+ def _gen_test_results(self, input_fobj, platform, test_name):
91
+ """
92
+ Yield complete output JSON objects, one for each input result.
93
+ """
94
+ # 'testout' , 'files' and others are standard fields in the
95
+ # test control interface, see RESULTS.md for the Executor
96
+ for raw_line in input_fobj:
97
+ result_line = json.loads(raw_line)
98
+
99
+ file_names = []
100
+ # process the file specified by the 'testout' key
101
+ if "testout" in result_line:
102
+ file_names.append(result_line["testout"])
103
+ # process any additional files in the 'files' key
104
+ if "files" in result_line:
105
+ file_names += (f["name"] for f in result_line["files"])
106
+
107
+ file_names = self._modify_file_list(file_names)
55
108
 
56
- def ingest(self, platform, test_name, results_file, files_dir):
57
- platform_dir = self.storage_dir / platform
58
- test_dir = platform_dir / test_name.lstrip("/")
59
- if test_dir.exists():
60
- raise FileExistsError(f"{test_dir} already exists for {test_name}")
109
+ output_line = (
110
+ platform,
111
+ result_line["status"],
112
+ test_name,
113
+ result_line.get("name"), # subtest
114
+ file_names,
115
+ result_line.get("note"),
116
+ )
117
+ yield json.dumps(output_line, indent=None)
118
+
119
+ def ingest(self, platform, test_name, test_results, test_files):
120
+ target_test_files = self._get_test_files_path(platform, test_name)
121
+ if target_test_files.exists():
122
+ raise FileExistsError(f"{target_test_files} already exists for {test_name}")
61
123
 
62
124
  # parse the results separately, before writing any aggregated output,
63
- # to ensure that either all results from the test are ingested, or none
125
+ # to ensure that either ALL results from the test are ingested, or none
64
126
  # at all (ie. if one of the result lines contains JSON errors)
65
- output_lines = []
66
- with open(results_file) as results_fobj:
67
- for raw_line in results_fobj:
68
- result_line = json.loads(raw_line)
69
-
70
- file_names = []
71
- if "testout" in result_line:
72
- file_names.append(result_line["testout"])
73
- if "files" in result_line:
74
- file_names += (f["name"] for f in result_line["files"])
75
-
76
- output_line = (
77
- platform,
78
- result_line["status"],
79
- test_name,
80
- result_line.get("name"), # subtest
81
- file_names,
82
- result_line.get("note"),
83
- )
84
- encoded = json.dumps(output_line, indent=None)
85
- output_lines.append(encoded)
86
-
87
- output_str = "\n".join(output_lines) + "\n"
127
+ with open(test_results) as test_results_fobj:
128
+ output_results = self._gen_test_results(test_results_fobj, platform, test_name)
129
+ output_json = "\n".join(output_results) + "\n"
88
130
 
89
131
  with self.lock:
90
- self.json_gzip_fobj.write(output_str)
91
- self.json_gzip_fobj.flush()
132
+ self.target_fobj.write(output_json)
133
+ self.target_fobj.flush()
134
+
135
+ # clean up the source test_results (Aggregator should 'mv', not 'cp')
136
+ Path(test_results).unlink()
137
+
138
+ # if the test_files dir is not empty
139
+ if any(test_files.iterdir()):
140
+ self._move_test_files(test_files, target_test_files)
141
+
142
+
143
+ class CompressedJSONAggregator(JSONAggregator, abc.ABC):
144
+ compress_files = False
145
+ suffix = ""
146
+ exclude = ()
147
+
148
+ @abc.abstractmethod
149
+ def compressed_open(self, *args, **kwargs):
150
+ pass
151
+
152
+ def start(self):
153
+ if self.target.exists():
154
+ raise FileExistsError(f"{self.target_file} already exists")
155
+ self.target_fobj = self.compressed_open(self.target, "wt", newline="\n")
156
+
157
+ if self.files.exists():
158
+ raise FileExistsError(f"{self.storage_dir} already exists")
159
+ self.files.mkdir()
160
+
161
+ def _modify_file_list(self, test_files):
162
+ if self.compress_files and self.suffix:
163
+ return [
164
+ (name if name in self.exclude else f"{name}{self.suffix}")
165
+ for name in test_files
166
+ ]
167
+ else:
168
+ return super()._modify_file_list(test_files)
169
+
170
+ def _move_test_files(self, test_files, target_dir):
171
+ if not self.compress_files:
172
+ super()._move_test_files(test_files, target_dir)
173
+ return
174
+
175
+ for root, _, files in test_files.walk(top_down=False):
176
+ for file_name in files:
177
+ src_path = root / file_name
178
+ dst_path = target_dir / src_path.relative_to(test_files)
179
+
180
+ dst_path.parent.mkdir(parents=True, exist_ok=True)
92
181
 
93
- Path(results_file).unlink()
182
+ # skip dirs, symlinks, device files, etc.
183
+ if not src_path.is_file(follow_symlinks=False) or file_name in self.exclude:
184
+ _verbatim_move(src_path, dst_path)
185
+ continue
94
186
 
95
- platform_dir.mkdir(exist_ok=True)
96
- shutil.move(files_dir, test_dir)
187
+ if self.suffix:
188
+ dst_path = dst_path.with_name(f"{dst_path.name}{self.suffix}")
189
+
190
+ with open(src_path, "rb") as plain_fobj:
191
+ with self.compressed_open(dst_path, "wb") as compress_fobj:
192
+ shutil.copyfileobj(plain_fobj, compress_fobj, 1048576)
193
+
194
+ src_path.unlink()
195
+
196
+ # we're walking bottom-up, so the local root should be empty now
197
+ root.rmdir()
198
+
199
+
200
+ class GzipJSONAggregator(CompressedJSONAggregator):
201
+ """
202
+ Identical to JSONAggregator, but transparently Gzips either or both of
203
+ the output line-JSON file with results and the uploaded files.
204
+ """
205
+ def compressed_open(self, *args, **kwargs):
206
+ return gzip.open(*args, compresslevel=self.level, **kwargs)
207
+
208
+ def __init__(
209
+ self, target, files, *, compress_level=9,
210
+ compress_files=True, compress_files_suffix=".gz", compress_files_exclude=None,
211
+ ):
212
+ """
213
+ 'target' is a string/Path to a .json.gz file for all ingested
214
+ results to be aggregated (written) to.
215
+
216
+ 'files' is a string/Path of the top-level parent for all
217
+ per-platform / per-test files uploaded by tests.
218
+
219
+ 'compress_level' specifies how much effort should be spent compressing,
220
+ (1 = fast, 9 = slow).
221
+
222
+ If 'compress_files' is True, compress also any files uploaded by tests.
223
+
224
+ The 'compress_files_suffix' is appended to any processed test-uploaded
225
+ files, and the respective 'files' results array is modified with the
226
+ new file names (as if the test uploaded compressed files already).
227
+ Set to "" (empty string) to use original file names and just compress
228
+ them transparently in-place.
229
+
230
+ 'compress_files_exclude' is a tuple/list of strings (input 'files'
231
+ names) to skip when compressing. Their names also won't be modified.
232
+ """
233
+ super().__init__(target, files)
234
+ self.level = compress_level
235
+ self.compress_files = compress_files
236
+ self.suffix = compress_files_suffix
237
+ self.exclude = compress_files_exclude or ()
238
+
239
+
240
+ class LZMAJSONAggregator(CompressedJSONAggregator):
241
+ """
242
+ Identical to JSONAggregator, but transparently compresses (via LZMA/XZ)
243
+ either or both of the output line-JSON file with results and the uploaded
244
+ files.
245
+ """
246
+ def compressed_open(self, *args, **kwargs):
247
+ return lzma.open(*args, preset=self.preset, **kwargs)
248
+
249
+ def __init__(
250
+ self, target, files, *, compress_preset=9,
251
+ compress_files=True, compress_files_suffix=".xz", compress_files_exclude=None,
252
+ ):
253
+ """
254
+ 'target' is a string/Path to a .json.xz file for all ingested
255
+ results to be aggregated (written) to.
256
+
257
+ 'files' is a string/Path of the top-level parent for all
258
+ per-platform / per-test files uploaded by tests.
259
+
260
+ 'compress_preset' specifies how much effort should be spent compressing,
261
+ (1 = fast, 9 = slow). Optionally ORed with lzma.PRESET_EXTREME to spend
262
+ even more CPU time compressing.
263
+
264
+ If 'compress_files' is True, compress also any files uploaded by tests.
265
+
266
+ The 'compress_files_suffix' is appended to any processed test-uploaded
267
+ files, and the respective 'files' results array is modified with the
268
+ new file names (as if the test uploaded compressed files already).
269
+ Set to "" (empty string) to use original file names and just compress
270
+ them transparently in-place.
271
+
272
+ 'compress_files_exclude' is a tuple/list of strings (input 'files'
273
+ names) to skip when compressing. Their names also won't be modified.
274
+ """
275
+ super().__init__(target, files)
276
+ self.preset = compress_preset
277
+ self.compress_files = compress_files
278
+ self.suffix = compress_files_suffix
279
+ self.exclude = compress_files_exclude or ()
atex/cli/__init__.py CHANGED
@@ -33,6 +33,9 @@ from .. import util
33
33
  def setup_logging(level):
34
34
  if level <= util.EXTRADEBUG:
35
35
  fmt = "%(asctime)s %(name)s: %(filename)s:%(lineno)s: %(funcName)s(): %(message)s"
36
+ # also print urllib3 headers
37
+ import http.client # noqa: PLC0415
38
+ http.client.HTTPConnection.debuglevel = 5
36
39
  else:
37
40
  fmt = "%(asctime)s %(name)s: %(message)s"
38
41
  logging.basicConfig(
atex/cli/fmf.py CHANGED
@@ -56,17 +56,17 @@ def prepare(args):
56
56
  result = make_fmftests(args)
57
57
  print("--- fmf root ---")
58
58
  print(str(result.root))
59
- print("--- prepare packages ---")
59
+ print("\n--- prepare packages ---")
60
60
  print("\n".join(result.prepare_pkgs))
61
- print("--- plan environment ---")
62
- print("\n".join("{k}={v}" for k,v in result.plan_env))
61
+ print("\n--- plan environment ---")
62
+ print("\n".join(f"{k}={v}" for k,v in result.plan_env.items()))
63
63
  for script in result.prepare_scripts:
64
- print("--- prepare script ---")
65
- print(script)
64
+ print("\n--- prepare script ---")
65
+ print(script.rstrip("\n"))
66
66
  print("----------------------")
67
67
  for script in result.finish_scripts:
68
- print("--- finish script ---")
69
- print(script)
68
+ print("\n--- finish script ---")
69
+ print(script.rstrip("\n"))
70
70
  print("----------------------")
71
71
 
72
72
 
atex/cli/testingfarm.py CHANGED
@@ -2,6 +2,7 @@ import sys
2
2
  import json
3
3
  import pprint
4
4
  import collections
5
+ from datetime import datetime, timedelta, UTC
5
6
 
6
7
  from .. import util
7
8
  from ..provisioner.testingfarm import api as tf
@@ -92,29 +93,52 @@ def stats(args):
92
93
  elif "tmt" in req["test"] and req["test"]["tmt"]:
93
94
  repos[req["test"]["tmt"]["url"]] += 1
94
95
 
96
+ top_tokens = sorted(tokens, key=lambda x: tokens[x], reverse=True)[:10]
97
+ top_repos = sorted(repos, key=lambda x: repos[x], reverse=True)[:10]
98
+ if not top_tokens or not top_repos:
99
+ return
100
+ digits = max(len(str(tokens[top_tokens[0]])), len(str(repos[top_repos[0]])))
101
+
95
102
  print("Top 10 token IDs:")
96
- for token_id in sorted(tokens, key=lambda x: tokens[x], reverse=True)[:10]:
103
+ for token_id in top_tokens:
97
104
  count = tokens[token_id]
98
- print(f"{count:>5} {token_id}")
105
+ print(f"{count:>{digits}} {token_id}")
99
106
 
100
107
  print("Top 10 repo URLs:")
101
- for repo_url in sorted(repos, key=lambda x: repos[x], reverse=True)[:10]:
108
+ for repo_url in top_repos:
102
109
  count = repos[repo_url]
103
- print(f"{count:>5} {repo_url}")
104
-
105
- def chain_without_none(*iterables):
106
- for itr in iterables:
107
- if itr is None:
108
- continue
109
- for item in itr:
110
- if item is not None:
111
- yield item
112
-
113
- queued_and_running = chain_without_none(
114
- api.search_requests(state="queued", ranch=args.ranch, mine=False),
115
- api.search_requests(state="running", ranch=args.ranch, mine=False),
116
- )
117
- top_users_repos(queued_and_running)
110
+ print(f"{count:>{digits}} {repo_url}")
111
+
112
+ def request_search_results():
113
+ for state in args.states.split(","):
114
+ result = api.search_requests(
115
+ state=state,
116
+ ranch=args.ranch,
117
+ mine=False,
118
+ )
119
+ if result:
120
+ yield from result
121
+
122
+ def multiday_request_search_results():
123
+ now = datetime.now(UTC)
124
+ for day in range(0,args.days):
125
+ before = now - timedelta(days=day)
126
+ after = now - timedelta(days=day+1)
127
+ for state in args.states.split(","):
128
+ result = api.search_requests(
129
+ state=state,
130
+ created_before=before.replace(microsecond=0).isoformat(),
131
+ created_after=after.replace(microsecond=0).isoformat(),
132
+ ranch=args.ranch,
133
+ mine=False,
134
+ )
135
+ if result:
136
+ yield from result
137
+
138
+ if args.days is not None:
139
+ top_users_repos(multiday_request_search_results())
140
+ else:
141
+ top_users_repos(request_search_results())
118
142
 
119
143
 
120
144
  def reserve(args):
@@ -239,7 +263,9 @@ def parse_args(parser):
239
263
  "stats",
240
264
  help="print out TF usage statistics",
241
265
  )
266
+ cmd.add_argument("--days", type=int, help="query last N days instead of all TF requests")
242
267
  cmd.add_argument("ranch", help="Testing Farm ranch name")
268
+ cmd.add_argument("states", help="comma-separated TF request states")
243
269
 
244
270
  cmd = cmds.add_parser(
245
271
  "reserve",
atex/executor/executor.py CHANGED
@@ -153,11 +153,11 @@ class Executor:
153
153
  **self.env,
154
154
  "TMT_PLAN_ENVIRONMENT_FILE": self.plan_env_file,
155
155
  }
156
- env_args = (f"{k}={v}" for k, v in env.items())
156
+ env_args = tuple(f"{k}={v}" for k, v in env.items())
157
157
  # run the scripts
158
158
  for script in scripts:
159
159
  self.conn.cmd(
160
- ("env", *env_args, "bash"),
160
+ ("env", "-C", self.tests_dir, *env_args, "bash"),
161
161
  func=util.subprocess_log,
162
162
  stderr=subprocess.STDOUT,
163
163
  input=script,
@@ -387,7 +387,7 @@ class Executor:
387
387
  pass
388
388
  reporter.report({
389
389
  "status": "infra",
390
- "note": repr(exception),
390
+ "note": f"{type(exception).__name__}({exception})",
391
391
  "testout": "output.txt",
392
392
  })
393
393
 
@@ -1,6 +1,6 @@
1
1
  import tempfile
2
- import concurrent
3
2
  import collections
3
+ import concurrent.futures
4
4
  from pathlib import Path
5
5
 
6
6
  from .. import util, executor
@@ -114,6 +114,8 @@ class AdHocOrchestrator(Orchestrator):
114
114
  self.setup_queue = util.ThreadQueue(daemon=True)
115
115
  # thread queue for remotes being released
116
116
  self.release_queue = util.ThreadQueue(daemon=True)
117
+ # thread queue for results being ingested
118
+ self.ingest_queue = util.ThreadQueue(daemon=False)
117
119
 
118
120
  def _run_new_test(self, info):
119
121
  """
@@ -125,7 +127,7 @@ class AdHocOrchestrator(Orchestrator):
125
127
  next_test_name = self.next_test(self.to_run, self.fmf_tests.tests, info)
126
128
  assert next_test_name in self.to_run, "next_test() returned valid test name"
127
129
 
128
- util.info(f"starting '{next_test_name}' on {info.remote}")
130
+ util.info(f"{info.remote}: starting '{next_test_name}'")
129
131
 
130
132
  self.to_run.remove(next_test_name)
131
133
 
@@ -140,6 +142,7 @@ class AdHocOrchestrator(Orchestrator):
140
142
  )
141
143
 
142
144
  tmp_dir_path = Path(rinfo.tmp_dir.name)
145
+ tmp_dir_path.chmod(0o755)
143
146
  self.test_queue.start_thread(
144
147
  target=info.executor.run_test,
145
148
  target_args=(
@@ -176,6 +179,7 @@ class AdHocOrchestrator(Orchestrator):
176
179
 
177
180
  if not self.was_successful(finfo, test_data) and self.should_be_rerun(finfo, test_data):
178
181
  # re-run the test
182
+ util.info(f"{remote_with_test} failed, re-running")
179
183
  self.to_run.add(finfo.test_name)
180
184
  else:
181
185
  # ingest the result
@@ -183,15 +187,27 @@ class AdHocOrchestrator(Orchestrator):
183
187
  # a condition just in case Executor code itself threw an exception
184
188
  # and didn't even report the fallback 'infra' result
185
189
  if finfo.results is not None and finfo.files is not None:
186
- self.aggregator.ingest(
187
- self.platform,
188
- finfo.test_name,
189
- finfo.results,
190
- finfo.files,
190
+ util.info(f"{remote_with_test} completed, ingesting result")
191
+
192
+ def ingest_and_cleanup(ingest, args, cleanup):
193
+ ingest(*args)
194
+ # also delete the tmpdir housing these
195
+ cleanup()
196
+
197
+ self.ingest_queue.start_thread(
198
+ ingest_and_cleanup,
199
+ target_args=(
200
+ # ingest func itself
201
+ self.aggregator.ingest,
202
+ # args for ingest
203
+ (self.platform, finfo.test_name, finfo.results, finfo.files),
204
+ # cleanup func itself
205
+ finfo.tmp_dir.cleanup,
206
+ ),
207
+ test_name=finfo.test_name,
191
208
  )
192
- # also delete the tmpdir housing these
193
- finfo.tmp_dir.cleanup()
194
- # ingesting destroyed these
209
+
210
+ # ingesting destroys these
195
211
  finfo = self.FinishedInfo._from(
196
212
  finfo,
197
213
  results=None,
@@ -207,6 +223,8 @@ class AdHocOrchestrator(Orchestrator):
207
223
  finfo.remote.release,
208
224
  remote=finfo.remote,
209
225
  )
226
+ # TODO: should this be conditioned by 'self.to_run:' ? to not uselessly fall
227
+ # into setup spares and get immediately released after setup?
210
228
  finfo.provisioner.provision(1)
211
229
 
212
230
  # if still not destroyed, run another test on it
@@ -215,6 +233,14 @@ class AdHocOrchestrator(Orchestrator):
215
233
  util.debug(f"{remote_with_test} was non-destructive, running next test")
216
234
  self._run_new_test(finfo)
217
235
 
236
+ # no more tests to run, release the remote
237
+ else:
238
+ util.debug(f"{finfo.remote} no longer useful, releasing it")
239
+ self.release_queue.start_thread(
240
+ finfo.remote.release,
241
+ remote=finfo.remote,
242
+ )
243
+
218
244
  def serve_once(self):
219
245
  """
220
246
  Run the orchestration logic, processing any outstanding requests
@@ -225,7 +251,7 @@ class AdHocOrchestrator(Orchestrator):
225
251
  (more work to be done), False once all testing is concluded.
226
252
  """
227
253
  # all done
228
- if not self.to_run and not self.running_tests and self.release_queue.qsize() == 0:
254
+ if not self.to_run and not self.running_tests:
229
255
  return False
230
256
 
231
257
  # process all finished tests, potentially reusing remotes for executing
@@ -263,7 +289,8 @@ class AdHocOrchestrator(Orchestrator):
263
289
  sinfo = treturn.sinfo
264
290
 
265
291
  if treturn.exception:
266
- msg = f"{sinfo.remote}: setup failed with {repr(treturn.exception)}"
292
+ exc_str = f"{type(treturn.exception).__name__}({treturn.exception})"
293
+ msg = f"{sinfo.remote}: setup failed with {exc_str}"
267
294
  self.release_queue.start_thread(
268
295
  sinfo.remote.release,
269
296
  remote=sinfo.remote,
@@ -286,6 +313,7 @@ class AdHocOrchestrator(Orchestrator):
286
313
  treturn = self.setup_queue.get_raw(block=False)
287
314
  except util.ThreadQueue.Empty:
288
315
  break
316
+ util.debug(f"releasing extraneous set-up {treturn.sinfo.remote}")
289
317
  self.release_queue.start_thread(
290
318
  treturn.sinfo.remote.release,
291
319
  remote=treturn.sinfo.remote,
@@ -311,15 +339,30 @@ class AdHocOrchestrator(Orchestrator):
311
339
  # gather returns from Remote.release() functions - check for exceptions
312
340
  # thrown, re-report them as warnings as they are not typically critical
313
341
  # for operation
314
- try:
315
- treturn = self.release_queue.get_raw(block=False)
316
- except util.ThreadQueue.Empty:
317
- pass
318
- else:
319
- if treturn.exception:
320
- util.warning(f"{treturn.remote} release failed: {repr(treturn.exception)}")
342
+ while True:
343
+ try:
344
+ treturn = self.release_queue.get_raw(block=False)
345
+ except util.ThreadQueue.Empty:
346
+ break
347
+ else:
348
+ if treturn.exception:
349
+ exc_str = f"{type(treturn.exception).__name__}({treturn.exception})"
350
+ util.warning(f"{treturn.remote} release failed: {exc_str}")
351
+ else:
352
+ util.debug(f"{treturn.remote} release completed")
353
+
354
+ # gather returns from Aggregator.ingest() calls - check for exceptions
355
+ while True:
356
+ try:
357
+ treturn = self.ingest_queue.get_raw(block=False)
358
+ except util.ThreadQueue.Empty:
359
+ break
321
360
  else:
322
- util.debug(f"{treturn.remote}: completed .release()")
361
+ if treturn.exception:
362
+ exc_str = f"{type(treturn.exception).__name__}({treturn.exception})"
363
+ util.warning(f"'{treturn.test_name}' ingesting failed: {exc_str}")
364
+ else:
365
+ util.debug(f"'{treturn.test_name}' ingesting completed")
323
366
 
324
367
  return True
325
368
 
@@ -342,16 +385,30 @@ class AdHocOrchestrator(Orchestrator):
342
385
  # cancel all running tests and wait for them to clean up (up to 0.1sec)
343
386
  for rinfo in self.running_tests.values():
344
387
  rinfo.executor.cancel()
345
- self.test_queue.join() # also ignore any exceptions raised
388
+ self.test_queue.join() # also ignore any exceptions raised
389
+
390
+ # wait for all running ingestions to finish, print exceptions
391
+ # (we would rather stop provisioners further below than raise here)
392
+ while True:
393
+ try:
394
+ treturn = self.ingest_queue.get_raw(block=False)
395
+ except util.ThreadQueue.Empty:
396
+ break
397
+ else:
398
+ if treturn.exception:
399
+ exc_str = f"{type(treturn.exception).__name__}({treturn.exception})"
400
+ util.warning(f"'{treturn.test_name}' ingesting failed: {exc_str}")
401
+ else:
402
+ util.debug(f"'{treturn.test_name}' ingesting completed")
403
+ self.ingest_queue.join()
346
404
 
347
405
  # stop all provisioners, also releasing all remotes
348
- # TODO: don't parallelize here, remove .stop_defer() and parallelize in provisioners
406
+ # - parallelize up to 10 provisioners at a time
349
407
  if self.provisioners:
350
- workers = min(len(self.provisioners), 20)
408
+ workers = min(len(self.provisioners), 10)
351
409
  with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as ex:
352
410
  for provisioner in self.provisioners:
353
- for func in provisioner.stop_defer():
354
- ex.submit(func)
411
+ ex.submit(provisioner.stop)
355
412
 
356
413
  @staticmethod
357
414
  def run_setup(sinfo):
@@ -426,7 +483,8 @@ class AdHocOrchestrator(Orchestrator):
426
483
 
427
484
  # executor (or test) threw exception
428
485
  if info.exception:
429
- util.info(f"{remote_with_test} threw {repr(info.exception)} during test runtime")
486
+ exc_str = f"{type(info.exception).__name__}({info.exception})"
487
+ util.info(f"{remote_with_test} threw {exc_str} during test runtime")
430
488
  return False
431
489
 
432
490
  # the test exited as non-0
@@ -456,10 +514,10 @@ class AdHocOrchestrator(Orchestrator):
456
514
  # of tests, counting reruns for each
457
515
  # - allows the user to adjust counts per-test (ie. test_data metadata)
458
516
  # - allows this template to be @staticmethod
459
- if (reruns_left := self.reruns[info.test_name]) > 0:
460
- util.info(f"{remote_with_test}: re-running ({reruns_left} reruns left)")
517
+ reruns_left = self.reruns[info.test_name]
518
+ util.info(f"{remote_with_test}: {reruns_left} reruns left")
519
+ if reruns_left > 0:
461
520
  self.reruns[info.test_name] -= 1
462
521
  return True
463
522
  else:
464
- util.info(f"{remote_with_test}: reruns exceeded, giving up")
465
523
  return False
@@ -0,0 +1,94 @@
1
+ from .. import util
2
+ from .adhoc import AdHocOrchestrator
3
+
4
+
5
+ # copy/pasted from the Contest repo, lib/virt.py
6
+ def calculate_guest_tag(tags):
7
+ if "snapshottable" not in tags:
8
+ return None
9
+ name = "default"
10
+ if "with-gui" in tags:
11
+ name += "_gui"
12
+ if "uefi" in tags:
13
+ name += "_uefi"
14
+ if "fips" in tags:
15
+ name += "_fips"
16
+ return name
17
+
18
+
19
+ class ContestOrchestrator(AdHocOrchestrator):
20
+ """
21
+ Orchestrator for the Contest test suite:
22
+ https://github.com/RHSecurityCompliance/contest
23
+
24
+ Includes SCAP content upload via rsync and other Contest-specific
25
+ optimizations (around VM snapshots and scheduling).
26
+ """
27
+ content_dir_on_remote = "/root/upstream-content"
28
+
29
+ def __init__(self, *args, content_dir, **kwargs):
30
+ self.content_dir = content_dir
31
+ super().__init__(*args, **kwargs)
32
+
33
+ def run_setup(self, sinfo):
34
+ super().run_setup(sinfo)
35
+ # upload pre-built content
36
+ sinfo.remote.rsync(
37
+ "-r", "--delete", "--exclude=.git/",
38
+ f"{self.content_dir}/",
39
+ f"remote:{self.content_dir_on_remote}",
40
+ func=util.subprocess_log,
41
+ )
42
+
43
+ @classmethod
44
+ def next_test(cls, to_run, all_tests, previous):
45
+ # fresh remote, prefer running destructive tests (which likely need
46
+ # clean OS) to get them out of the way and prevent them from running
47
+ # on a tainted OS later
48
+ if type(previous) is AdHocOrchestrator.SetupInfo:
49
+ for next_name in to_run:
50
+ next_tags = all_tests[next_name].get("tag", ())
51
+ util.debug(f"considering next_test for destructivity: {next_name}")
52
+ if "destructive" in next_tags:
53
+ util.debug(f"chosen next_test: {next_name}")
54
+ return next_name
55
+
56
+ # previous test was run and finished non-destructively,
57
+ # try to find a next test with the same Contest lib.virt guest tags
58
+ # as the previous one, allowing snapshot reuse by Contest
59
+ elif type(previous) is AdHocOrchestrator.FinishedInfo:
60
+ finished_tags = all_tests[previous.test_name].get("tag", ())
61
+ util.debug(f"previous finished test on {previous.remote}: {previous.test_name}")
62
+ # if Guest tag is None, don't bother searching
63
+ if finished_guest_tag := calculate_guest_tag(finished_tags):
64
+ for next_name in to_run:
65
+ util.debug(f"considering next_test with tags {finished_tags}: {next_name}")
66
+ next_tags = all_tests[next_name].get("tag", ())
67
+ next_guest_tag = calculate_guest_tag(next_tags)
68
+ if next_guest_tag and finished_guest_tag == next_guest_tag:
69
+ util.debug(f"chosen next_test: {next_name}")
70
+ return next_name
71
+
72
+ # fallback to the default next_test()
73
+ return super().next_test(to_run, all_tests, previous)
74
+
75
+ @classmethod
76
+ def destructive(cls, info, test_data):
77
+ # if Executor ended with an exception (ie. duration exceeded),
78
+ # consider the test destructive
79
+ if info.exception:
80
+ return True
81
+
82
+ # if the test returned non-0 exit code, it could have thrown
83
+ # a python exception of its own, or (if bash) aborted abruptly
84
+ # due to 'set -e', don't trust the remote, consider it destroyed
85
+ # (0 = pass, 2 = fail, anything else = bad)
86
+ if info.exit_code not in [0,2]:
87
+ return True
88
+
89
+ # if the test was destructive, assume the remote is destroyed
90
+ tags = test_data.get("tag", ())
91
+ if "destructive" in tags:
92
+ return True
93
+
94
+ return False
@@ -56,10 +56,6 @@ class Provisioner:
56
56
  that .get_remote() will ever return a Remote. Ie. the caller can call
57
57
  .provision(count=math.inf) to receive as many remotes as the Provisioner
58
58
  can possibly supply.
59
-
60
- TODO: remove .defer_stop() (or stop_defer) and mention this below:
61
- Note that .stop() or .defer_stop() may be called from a different
62
- thread, asynchronously to any other functions.
63
59
  """
64
60
 
65
61
  def provision(self, count=1):
@@ -93,18 +89,6 @@ class Provisioner:
93
89
  """
94
90
  raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
95
91
 
96
- def stop_defer(self):
97
- """
98
- Enable an external caller to stop the Provisioner instance,
99
- deferring resource deallocation to the caller.
100
-
101
- Return an iterable of argument-free thread-safe callables that can be
102
- called, possibly in parallel, to free up resources.
103
- Ie. a list of 200 .release() functions, to be called in a thread pool
104
- by the user, speeding up cleanup.
105
- """
106
- return (self.stop,)
107
-
108
92
  def __enter__(self):
109
93
  try:
110
94
  self.start()
@@ -260,7 +260,19 @@ class LibvirtCloningProvisioner(Provisioner):
260
260
  # by libvirt natively (because treating nvram as a storage pool
261
261
  # is a user hack)
262
262
  for p in conn.listAllStoragePools():
263
- p.refresh()
263
+ # retry a few times to work around a libvirt race condition
264
+ for _ in range(10):
265
+ try:
266
+ p.refresh()
267
+ except libvirt.libvirtError as e:
268
+ if "domain is not running" in str(e):
269
+ break
270
+ elif "has asynchronous jobs running" in str(e):
271
+ continue
272
+ else:
273
+ raise
274
+ else:
275
+ break
264
276
  try:
265
277
  nvram_vol = conn.storageVolLookupByPath(nvram_path)
266
278
  except libvirt.libvirtError as e:
@@ -18,7 +18,7 @@ DEFAULT_API_URL = "https://api.testing-farm.io/v0.1"
18
18
 
19
19
  DEFAULT_RESERVE_TEST = {
20
20
  "url": "https://github.com/RHSecurityCompliance/atex-reserve",
21
- "ref": "v0.10",
21
+ "ref": "0.11",
22
22
  "path": ".",
23
23
  "name": "/plans/reserve",
24
24
  }
@@ -34,10 +34,10 @@ _http = urllib3.PoolManager(
34
34
  maxsize=10,
35
35
  block=True,
36
36
  retries=urllib3.Retry(
37
- total=10,
37
+ total=24,
38
38
  # account for API restarts / short outages
39
- backoff_factor=60,
40
- backoff_max=600,
39
+ backoff_factor=10,
40
+ backoff_max=3600,
41
41
  # retry on API server errors too, not just connection issues
42
42
  status=10,
43
43
  status_forcelist={403,404,408,429,500,502,503,504},
@@ -438,7 +438,6 @@ class Reserve:
438
438
  'api' is a TestingFarmAPI instance - if unspecified, a sensible default
439
439
  will be used.
440
440
  """
441
- util.info(f"will reserve compose:{compose} on arch:{arch} for {timeout}min")
442
441
  spec = {
443
442
  "test": {
444
443
  "fmf": reserve_test or DEFAULT_RESERVE_TEST,
@@ -1,6 +1,7 @@
1
1
  import time
2
2
  import tempfile
3
3
  import threading
4
+ import concurrent.futures
4
5
 
5
6
  from ... import connection, util
6
7
  from .. import Provisioner, Remote
@@ -49,7 +50,12 @@ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConnection):
49
50
 
50
51
 
51
52
  class TestingFarmProvisioner(Provisioner):
53
+ # maximum number of TF requests the user can .provision(),
54
+ # as a last safety measure against Orchestrator(remotes=math.inf)
52
55
  absolute_max_remotes = 100
56
+ # number of parallel threads running HTTP DELETE calls to cancel
57
+ # TF requests on .stop() or Context Manager exit
58
+ stop_release_workers = 10
53
59
 
54
60
  def __init__(self, compose, arch="x86_64", *, max_retries=10, **reserve_kwargs):
55
61
  """
@@ -129,6 +135,7 @@ class TestingFarmProvisioner(Provisioner):
129
135
  # instantiate a class Reserve from the Testing Farm api module
130
136
  # (which typically provides context manager, but we use its .reserve()
131
137
  # and .release() functions directly)
138
+ util.info(f"{repr(self)}: reserving new remote")
132
139
  tf_reserve = api.Reserve(
133
140
  compose=self.compose,
134
141
  arch=self.arch,
@@ -154,29 +161,25 @@ class TestingFarmProvisioner(Provisioner):
154
161
  self.ssh_key, self.ssh_pubkey = util.ssh_keygen(self._tmpdir.name)
155
162
 
156
163
  def stop(self):
157
- with self.lock:
158
- # abort reservations in progress
159
- while self.reserving:
160
- # testingfarm api.Reserve instances
161
- self.reserving.pop().release()
162
- # cancel/release all Remotes ever created by us
163
- while self.remotes:
164
- # TestingFarmRemote instances
165
- self.remotes.pop().release()
166
- # explicitly remove the tmpdir rather than relying on destructor
167
- self._tmpdir.cleanup()
168
- self._tmpdir = None
164
+ release_funcs = []
169
165
 
170
- def stop_defer(self):
171
- callables = []
172
166
  with self.lock:
173
- callables += (f.release for f in self.reserving)
167
+ release_funcs += (f.release for f in self.reserving)
174
168
  self.reserving = []
175
- callables += (r.release for r in self.remotes)
176
- self.remotes = [] # just in case
177
- callables.append(self._tmpdir.cleanup)
169
+ release_funcs += (r.release for r in self.remotes)
170
+ self.remotes = [] # just in case of a later .start()
171
+
172
+ # parallelize at most stop_release_workers TF API release (DELETE) calls
173
+ if release_funcs:
174
+ workers = min(len(release_funcs), self.stop_release_workers)
175
+ with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as ex:
176
+ for func in release_funcs:
177
+ ex.submit(func)
178
+
179
+ with self.lock:
180
+ # explicitly remove the tmpdir rather than relying on destructor
181
+ self._tmpdir.cleanup()
178
182
  self._tmpdir = None
179
- return callables
180
183
 
181
184
  def provision(self, count=1):
182
185
  with self.lock:
@@ -198,10 +201,11 @@ class TestingFarmProvisioner(Provisioner):
198
201
  # always non-blocking
199
202
  return None
200
203
  except (api.TestingFarmError, connection.ssh.SSHError) as e:
204
+ exc_str = f"{type(e).__name__}({e})"
201
205
  with self.lock:
202
206
  if self.retries > 0:
203
207
  util.warning(
204
- f"caught while reserving a TF system: {repr(e)}, "
208
+ f"caught while reserving a TF system: {exc_str}, "
205
209
  f"retrying ({self.retries} left)",
206
210
  )
207
211
  self.retries -= 1
@@ -212,7 +216,7 @@ class TestingFarmProvisioner(Provisioner):
212
216
  return None
213
217
  else:
214
218
  util.warning(
215
- f"caught while reserving a TF system: {repr(e)}, "
219
+ f"caught while reserving a TF system: {exc_str}, "
216
220
  "exhausted all retries, giving up",
217
221
  )
218
222
  raise
atex/util/subprocess.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import subprocess
2
2
 
3
- from .log import debug, extradebug
3
+ from .log import extradebug
4
4
 
5
5
 
6
6
  def subprocess_run(cmd, **kwargs):
@@ -9,7 +9,7 @@ def subprocess_run(cmd, **kwargs):
9
9
  """
10
10
  # when logging, skip current stack frame - report the place we were called
11
11
  # from, not util.subprocess_run itself
12
- debug(f"running: '{cmd}' with {kwargs=}")
12
+ extradebug(f"running: '{cmd}' with {kwargs=}")
13
13
  return subprocess.run(cmd, **kwargs)
14
14
 
15
15
 
@@ -17,7 +17,7 @@ def subprocess_output(cmd, *, check=True, text=True, **kwargs):
17
17
  """
18
18
  A wrapper simulating subprocess.check_output() via a modern .run() API.
19
19
  """
20
- debug(f"running: '{cmd}' with {check=}, {text=} and {kwargs=}")
20
+ extradebug(f"running: '{cmd}' with {check=}, {text=} and {kwargs=}")
21
21
  proc = subprocess.run(cmd, check=check, text=text, stdout=subprocess.PIPE, **kwargs)
22
22
  return proc.stdout.rstrip("\n") if text else proc.stdout
23
23
 
@@ -26,7 +26,7 @@ def subprocess_Popen(cmd, **kwargs): # noqa: N802
26
26
  """
27
27
  A simple wrapper for the real subprocess.Popen() that logs the command used.
28
28
  """
29
- debug(f"running: '{cmd}' with {kwargs=}")
29
+ extradebug(f"running: '{cmd}' with {kwargs=}")
30
30
  return subprocess.Popen(cmd, **kwargs)
31
31
 
32
32
 
@@ -56,7 +56,7 @@ def subprocess_stream(cmd, *, stream="stdout", check=False, input=None, **kwargs
56
56
  all_kwargs["stdin"] = subprocess.PIPE
57
57
  all_kwargs |= kwargs
58
58
 
59
- debug(f"running: '{cmd}' with {all_kwargs=}")
59
+ extradebug(f"running: '{cmd}' with {all_kwargs=}")
60
60
  proc = subprocess.Popen(cmd, **all_kwargs)
61
61
 
62
62
  def generate_lines():
@@ -80,7 +80,7 @@ def subprocess_log(cmd, **kwargs):
80
80
 
81
81
  Uses subprocess_stream() to gather the lines.
82
82
  """
83
- debug(f"running: '{cmd}' with {kwargs=}")
83
+ extradebug(f"running: '{cmd}' with {kwargs=}")
84
84
  _, lines = subprocess_stream(cmd, **kwargs)
85
85
  for line in lines:
86
86
  extradebug(line)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: atex
3
- Version: 0.10
3
+ Version: 0.11
4
4
  Summary: Ad-hoc Test EXecutor
5
5
  Project-URL: Homepage, https://github.com/RHSecurityCompliance/atex
6
6
  License-Expression: GPL-3.0-or-later
@@ -1,33 +1,34 @@
1
1
  atex/__init__.py,sha256=LdX67gprtHYeAkjLhFPKzpc7ECv2rHxUbHKDGbGXO1c,517
2
2
  atex/fmf.py,sha256=gkJXIaRO7_KvwJR-V6Tc1NVn4a9Hq7hoBLQLhxYIdbg,8834
3
- atex/aggregator/__init__.py,sha256=uNnYSyDGXjknxckI8MFfl-C8_gin8FwQchiq-UOyP6I,1744
4
- atex/aggregator/json.py,sha256=x1zim9O2olzBh185NYWo5N96fixB2oxCamoOZwmgR9w,3330
5
- atex/cli/__init__.py,sha256=X5XxkDEDXE4tJAjwt5ShRHCFTXDK-2zvxQ34opmueUc,2768
6
- atex/cli/fmf.py,sha256=HfbTgFbCwK4Nuyq6vtGutcq_4-4kj-tmoqzXUn3AYtY,3573
3
+ atex/aggregator/__init__.py,sha256=8mN-glHdzR4icKAUGO4JPodsTrLMdJoeuZsO2CTbhyU,1773
4
+ atex/aggregator/json.py,sha256=tpoUZoZM8EMYhZKwVr4LRtgEIDjRxC11BIKVXZKYPOs,10441
5
+ atex/cli/__init__.py,sha256=Ew2z-gC0jvOmU_DqYgXVQla3p1rTnrz64I63q52aHv4,2899
6
+ atex/cli/fmf.py,sha256=pvj_OIp6XT_nVUwziL7-v_HNbyAtuUmb7k_Ey_KkFJc,3616
7
7
  atex/cli/libvirt.py,sha256=6tt5ANb8XBBRXOQsYPTWILThKqf-gvt5AZh5Dctg2PA,3782
8
- atex/cli/testingfarm.py,sha256=HGlqrkhanUMo2CqKxmM3ACgptWtxm0gICyEGf7O6Qc0,9078
8
+ atex/cli/testingfarm.py,sha256=ovgoogmIM2TglS7iQD3liMiEYYtcykS_HRRKbltpW2I,10131
9
9
  atex/connection/__init__.py,sha256=dj8ZBcEspom7Z_UjecfLGBRNvLZ3dyGR9q19i_B4xpY,3880
10
10
  atex/connection/podman.py,sha256=1T56gh1TgbcQWpTIJHL4NaxZOI6aMg7Xp7sn6PQQyBk,1911
11
11
  atex/connection/ssh.py,sha256=9A57b9YR_HI-kIu06Asic1y__JPVXEheDZxjbG2Qcsc,13460
12
12
  atex/executor/__init__.py,sha256=XCfhi7QDELjey7N1uzhMjc46Kp1Jsd5bOCf52I27SCE,85
13
13
  atex/executor/duration.py,sha256=x06sItKOZi6XA8KszQwZGpIb1Z_L-HWqIwZKo2SDo0s,1759
14
- atex/executor/executor.py,sha256=JLFR9cZjSlUdAlAlLct6WuzmYbtjGtSobxvsToQum6M,15738
14
+ atex/executor/executor.py,sha256=toyLVQCDzfw381iEGrvOXoKPsd4SqxMZHwlDSTJGqKk,15792
15
15
  atex/executor/reporter.py,sha256=MceFmHFt0bTEClBZbRI1WnFbfMhR0e1noOzcu7gjKuQ,3403
16
16
  atex/executor/scripts.py,sha256=riJAQWsV-BFGkJwR2Dmf3R0ZRRZJs9w9iYnPpYaQNaE,5618
17
17
  atex/executor/testcontrol.py,sha256=mVrLwQUnDRfUq-5diz-80UvCWWxn1TkcBgmAKhKNb5E,12696
18
18
  atex/orchestrator/__init__.py,sha256=8Q1YknyibilXLjWRYkHm_Mr2HMm0SRw8Zv39KypeASM,2059
19
- atex/orchestrator/adhoc.py,sha256=GnvHLlCHeJ_nQ8doEjMuDzqmu4XZorI7ZzOtG_C08tU,18451
20
- atex/provisioner/__init__.py,sha256=2eepmEznq94tbam9VSWbsGFrZZpWeNSVlsTczGxjNuQ,4667
19
+ atex/orchestrator/adhoc.py,sha256=QpYoPeyQzYFDBM1zgFJKMXH1RtdJixbH5whVX0OP-14,21003
20
+ atex/orchestrator/contest.py,sha256=ADmRlsZPQx-MJ6fWHmBcJOIy3DSPnvwVheVL9Upwtg0,3703
21
+ atex/provisioner/__init__.py,sha256=6hZxQlvTQ0yWWqCRCPqWMoYuim5wDMCcDIYHF-nIfMs,4013
21
22
  atex/provisioner/libvirt/VM_PROVISION,sha256=7pkZ-ozgTyK4qNGC-E-HUznr4IhbosWSASbB72Gknl8,2664
22
23
  atex/provisioner/libvirt/__init__.py,sha256=pKG5IpZSC2IHs5wL2ecQx_fd9AzAXEbZmDzA7RyZsfM,119
23
- atex/provisioner/libvirt/libvirt.py,sha256=rtxowv5DpgcWsGRXYF29n6S9x_cgXRVgqY41DiFu920,18431
24
+ atex/provisioner/libvirt/libvirt.py,sha256=ZKctK2B51olvWvLxz2pZ2s6LtX_7EJ43LvlyJHnI1Ho,18955
24
25
  atex/provisioner/libvirt/locking.py,sha256=AXtDyidZNmUoMmrit26g9iTHDqInrzL_RSQEoc_EAXw,5669
25
26
  atex/provisioner/libvirt/setup-libvirt.sh,sha256=oCMy9SCnbC_QuAzO2sFwvB5ui1kMQ6uviHsgdXyoFXc,2428
26
27
  atex/provisioner/podman/__init__.py,sha256=dM0JzQXWX7edtWSc0KH0cMFXAjArFn2Vme4j_ZMsdYA,138
27
28
  atex/provisioner/podman/podman.py,sha256=ztRypoakSf-jF04iER58tEMUZ4Y6AuzIpNpFXp44bB4,4997
28
29
  atex/provisioner/testingfarm/__init__.py,sha256=kZncgLGdRCR4FMaRQr2GTwJ8vjlA-24ri8JO2ueZJuw,113
29
- atex/provisioner/testingfarm/api.py,sha256=UcMN61nBr3wqEd5KSR5Xhv1-TS7nSPFvk2byb6PdIs8,21811
30
- atex/provisioner/testingfarm/testingfarm.py,sha256=OI-a99xALaiYf-y5037WFVxY1g2H2y1xEKxHBdUQvfg,8271
30
+ atex/provisioner/testingfarm/api.py,sha256=dlXe9brzHERawIx2UTv34u2tOSskdZtXD68-u1MnOHk,21726
31
+ atex/provisioner/testingfarm/testingfarm.py,sha256=yvQzWat92B4UnJNZzCLI8mpAKf_QvHUKyKbjlk5123Q,8573
31
32
  atex/util/__init__.py,sha256=cWHFbtQ4mDlKe6lXyPDWRmWJOTcHDGfVuW_-GYa8hB0,1473
32
33
  atex/util/dedent.py,sha256=SEuJMtLzqz3dQ7g7qyZzEJ9VYynVlk52tQCJY-FveXo,603
33
34
  atex/util/libvirt.py,sha256=kDZmT6xLYEZkQNLZY98gJ2M48DDWXxHF8rQY9PnjB3U,660
@@ -35,10 +36,10 @@ atex/util/log.py,sha256=KVR7ep8n5wtghsvBFCtHiPsMAQBdAmK83E_Jec5t4cU,2230
35
36
  atex/util/named_mapping.py,sha256=UBMe9TetjV-DGPhjYjJ42YtC40FVPKAAEROXl9MA5fo,4700
36
37
  atex/util/path.py,sha256=x-kXqiWCVodfZWbEwtC5A8LFvutpDIPYv2m0boZSlXU,504
37
38
  atex/util/ssh_keygen.py,sha256=9yuSl2yBV7pG3Qfsf9tossVC00nbIUrAeLdbwTykpjk,384
38
- atex/util/subprocess.py,sha256=PQBxcQJPapP1ZLO4LqENyrxxCbNAxtJDNNlBV5DcD9k,2953
39
+ atex/util/subprocess.py,sha256=_oQN8CNgGoH9GAR6nZlpujYe2HjXFBcCuIkLPw-IxJ4,2971
39
40
  atex/util/threads.py,sha256=c8hsEc-8SqJGodInorv_6JxpiHiSkGFGob4qbMmOD2M,3531
40
- atex-0.10.dist-info/METADATA,sha256=evOBYvVboY2T8eGrAKy64UpyeuFKSMLGz8pUz8Sstm8,3050
41
- atex-0.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
42
- atex-0.10.dist-info/entry_points.txt,sha256=pLqJdcfeyQTgup2h6dWb6SvkHhtOl-W5Eg9zV8moK0o,39
43
- atex-0.10.dist-info/licenses/COPYING.txt,sha256=oEuj51jdmbXcCUy7pZ-KE0BNcJTR1okudRp5zQ0yWnU,670
44
- atex-0.10.dist-info/RECORD,,
41
+ atex-0.11.dist-info/METADATA,sha256=3fRMLBrkoRIHwbY2GheyNkrx4mNVmL_95wlxGjZsORc,3050
42
+ atex-0.11.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
43
+ atex-0.11.dist-info/entry_points.txt,sha256=pLqJdcfeyQTgup2h6dWb6SvkHhtOl-W5Eg9zV8moK0o,39
44
+ atex-0.11.dist-info/licenses/COPYING.txt,sha256=oEuj51jdmbXcCUy7pZ-KE0BNcJTR1okudRp5zQ0yWnU,670
45
+ atex-0.11.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any