atex 0.9__py3-none-any.whl → 0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. atex/aggregator/__init__.py +62 -0
  2. atex/aggregator/json.py +279 -0
  3. atex/cli/__init__.py +14 -1
  4. atex/cli/fmf.py +7 -7
  5. atex/cli/libvirt.py +3 -2
  6. atex/cli/testingfarm.py +74 -3
  7. atex/connection/podman.py +2 -4
  8. atex/connection/ssh.py +7 -14
  9. atex/executor/executor.py +21 -20
  10. atex/executor/scripts.py +5 -3
  11. atex/executor/testcontrol.py +1 -1
  12. atex/orchestrator/__init__.py +76 -3
  13. atex/orchestrator/{orchestrator.py → adhoc.py} +246 -108
  14. atex/orchestrator/contest.py +94 -0
  15. atex/{provision → provisioner}/__init__.py +48 -52
  16. atex/{provision → provisioner}/libvirt/libvirt.py +34 -15
  17. atex/{provision → provisioner}/libvirt/locking.py +3 -1
  18. atex/provisioner/podman/__init__.py +2 -0
  19. atex/provisioner/podman/podman.py +169 -0
  20. atex/{provision → provisioner}/testingfarm/api.py +56 -48
  21. atex/{provision → provisioner}/testingfarm/testingfarm.py +43 -45
  22. atex/util/log.py +62 -67
  23. atex/util/subprocess.py +46 -12
  24. atex/util/threads.py +7 -0
  25. atex-0.11.dist-info/METADATA +86 -0
  26. atex-0.11.dist-info/RECORD +45 -0
  27. {atex-0.9.dist-info → atex-0.11.dist-info}/WHEEL +1 -1
  28. atex/orchestrator/aggregator.py +0 -111
  29. atex/provision/podman/__init__.py +0 -1
  30. atex/provision/podman/podman.py +0 -274
  31. atex-0.9.dist-info/METADATA +0 -178
  32. atex-0.9.dist-info/RECORD +0 -43
  33. /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
  34. /atex/{provision → provisioner}/libvirt/__init__.py +0 -0
  35. /atex/{provision → provisioner}/libvirt/setup-libvirt.sh +0 -0
  36. /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
  37. {atex-0.9.dist-info → atex-0.11.dist-info}/entry_points.txt +0 -0
  38. {atex-0.9.dist-info → atex-0.11.dist-info}/licenses/COPYING.txt +0 -0
atex/executor/executor.py CHANGED
@@ -26,9 +26,9 @@ class Executor:
26
26
  and uploaded files by those tests.
27
27
 
28
28
  tests_repo = "path/to/cloned/tests"
29
- tests_data = atex.fmf.FMFTests(tests_repo, "/plans/default")
29
+ fmf_tests = atex.fmf.FMFTests(tests_repo, "/plans/default")
30
30
 
31
- with Executor(tests_data, conn) as e:
31
+ with Executor(fmf_tests, conn) as e:
32
32
  e.upload_tests()
33
33
  e.plan_prepare()
34
34
  Path("output_here").mkdir()
@@ -42,12 +42,12 @@ class Executor:
42
42
 
43
43
  conn.cmd(["mkdir", "-p", "/shared"])
44
44
 
45
- with Executor(tests_data, conn, state_dir="/shared") as e:
45
+ with Executor(fmf_tests, conn, state_dir="/shared") as e:
46
46
  e.upload_tests()
47
47
  e.plan_prepare()
48
48
 
49
49
  # in parallel (ie. threading or multiprocessing)
50
- with Executor(tests_data, unique_conn, state_dir="/shared") as e:
50
+ with Executor(fmf_tests, unique_conn, state_dir="/shared") as e:
51
51
  e.run_test(...)
52
52
  """
53
53
 
@@ -74,7 +74,7 @@ class Executor:
74
74
  self.plan_env_file = None
75
75
  self.cancelled = False
76
76
 
77
- def setup(self):
77
+ def start(self):
78
78
  with self.lock:
79
79
  state_dir = self.state_dir
80
80
 
@@ -107,7 +107,7 @@ class Executor:
107
107
  # create / truncate the TMT_PLAN_ENVIRONMENT_FILE
108
108
  self.conn.cmd(("truncate", "-s", "0", self.plan_env_file), check=True)
109
109
 
110
- def cleanup(self):
110
+ def stop(self):
111
111
  with self.lock:
112
112
  work_dir = self.work_dir
113
113
 
@@ -121,14 +121,14 @@ class Executor:
121
121
 
122
122
  def __enter__(self):
123
123
  try:
124
- self.setup()
124
+ self.start()
125
125
  return self
126
126
  except Exception:
127
- self.cleanup()
127
+ self.stop()
128
128
  raise
129
129
 
130
130
  def __exit__(self, exc_type, exc_value, traceback):
131
- self.cleanup()
131
+ self.stop()
132
132
 
133
133
  def cancel(self):
134
134
  with self.lock:
@@ -140,10 +140,10 @@ class Executor:
140
140
  __init__() inside 'fmf_tests', to the remote host.
141
141
  """
142
142
  self.conn.rsync(
143
- "-rv" if util.in_debug_mode() else "-rq",
144
- "--delete", "--exclude=.git/",
143
+ "-r", "--delete", "--exclude=.git/",
145
144
  f"{self.fmf_tests.root}/",
146
145
  f"remote:{self.tests_dir}",
146
+ func=util.subprocess_log,
147
147
  )
148
148
 
149
149
  def _run_prepare_scripts(self, scripts):
@@ -153,16 +153,15 @@ class Executor:
153
153
  **self.env,
154
154
  "TMT_PLAN_ENVIRONMENT_FILE": self.plan_env_file,
155
155
  }
156
- env_args = (f"{k}={v}" for k, v in env.items())
156
+ env_args = tuple(f"{k}={v}" for k, v in env.items())
157
157
  # run the scripts
158
158
  for script in scripts:
159
159
  self.conn.cmd(
160
- ("env", *env_args, "bash"),
160
+ ("env", "-C", self.tests_dir, *env_args, "bash"),
161
+ func=util.subprocess_log,
162
+ stderr=subprocess.STDOUT,
161
163
  input=script,
162
- text=True,
163
164
  check=True,
164
- stdout=None if util.in_debug_mode() else subprocess.DEVNULL,
165
- stderr=subprocess.STDOUT,
166
165
  )
167
166
 
168
167
  def plan_prepare(self):
@@ -180,9 +179,9 @@ class Executor:
180
179
  "dnf", "-y", "--setopt=install_weak_deps=False",
181
180
  "install", *self.fmf_tests.prepare_pkgs,
182
181
  ),
183
- check=True,
184
- stdout=None if util.in_debug_mode() else subprocess.DEVNULL,
182
+ func=util.subprocess_log,
185
183
  stderr=subprocess.STDOUT,
184
+ check=True,
186
185
  )
187
186
 
188
187
  # run 'prepare' scripts from the plan
@@ -336,7 +335,9 @@ class Executor:
336
335
  reconnects += 1
337
336
  state = self.State.STARTING_TEST
338
337
  except BlockingIOError:
339
- pass
338
+ # avoid 100% CPU spinning if the connection it too slow
339
+ # to come up (ie. ssh ControlMaster socket file not created)
340
+ time.sleep(0.5)
340
341
  except ConnectionError:
341
342
  # can happen when ie. ssh is connecting over a LocalForward port,
342
343
  # causing 'read: Connection reset by peer' instead of timeout
@@ -386,7 +387,7 @@ class Executor:
386
387
  pass
387
388
  reporter.report({
388
389
  "status": "infra",
389
- "note": repr(exception),
390
+ "note": f"{type(exception).__name__}({exception})",
390
391
  "testout": "output.txt",
391
392
  })
392
393
 
atex/executor/scripts.py CHANGED
@@ -1,7 +1,9 @@
1
+ import os
1
2
  import collections
2
- import yaml
3
3
  from pathlib import Path
4
4
 
5
+ import yaml
6
+
5
7
  from .. import util, fmf
6
8
 
7
9
  # name: fmf path to the test as string, ie. /some/test
@@ -50,7 +52,7 @@ def test_wrapper(*, test, tests_dir, test_exec):
50
52
  # doing it here avoids unnecessary traffic (reading stdin) via ssh,
51
53
  # even if it is fed from subprocess.DEVNULL on the runner
52
54
 
53
- if util.in_debug_mode():
55
+ if os.environ.get("ATEX_DEBUG_TEST") == "1":
54
56
  out += "set -x\n"
55
57
 
56
58
  # use a subshell to limit the scope of the CWD change
@@ -122,7 +124,7 @@ def test_setup(*, test, wrapper_exec, test_exec, test_yaml, **kwargs):
122
124
  """
123
125
  out = "#!/bin/bash\n"
124
126
 
125
- if util.in_debug_mode():
127
+ if os.environ.get("ATEX_DEBUG_TEST") == "1":
126
128
  out += "set -xe\n"
127
129
  else:
128
130
  out += "exec 1>/dev/null\n"
@@ -156,7 +156,7 @@ class TestControl:
156
156
  except BufferFullError as e:
157
157
  raise BadControlError(str(e)) from None
158
158
 
159
- util.debug(f"got control line: {line} // eof: {self.stream.eof}")
159
+ util.extradebug(f"control line: {line} // eof: {self.stream.eof}")
160
160
 
161
161
  if self.stream.eof:
162
162
  self.eof = True
@@ -1,3 +1,76 @@
1
- #from .aggregator import CSVAggregator, JSONAggregator # noqa: F401
2
- from .aggregator import JSONAggregator # noqa: F401
3
- from .orchestrator import Orchestrator, OrchestratorError, FailedSetupError # noqa: F401
1
+ import importlib as _importlib
2
+ import pkgutil as _pkgutil
3
+ import time as _time
4
+
5
+
6
+ class OrchestratorError(Exception):
7
+ pass
8
+
9
+
10
+ class Orchestrator:
11
+ """
12
+ A scheduler for parallel execution on multiple resources (machines/systems).
13
+
14
+ TODO: more description
15
+ """
16
+
17
+ def serve_once(self):
18
+ """
19
+ Run the orchestration logic, processing any outstanding requests
20
+ (for provisioning, new test execution, etc.) and returning once these
21
+ are taken care of.
22
+
23
+ Returns True to indicate that it should be called again by the user
24
+ (more work to be done), False once all testing is concluded.
25
+ """
26
+ raise NotImplementedError(f"'serve_once' not implemented for {self.__class__.__name__}")
27
+
28
+ def serve_forever(self):
29
+ """
30
+ Run the orchestration logic, blocking until all testing is concluded.
31
+ """
32
+ while self.serve_once():
33
+ _time.sleep(1)
34
+
35
+ def start(self):
36
+ """
37
+ Start the Orchestrator instance, opening any files / allocating
38
+ resources as necessary.
39
+ """
40
+ raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
41
+
42
+ def stop(self):
43
+ """
44
+ Stop the Orchestrator instance, freeing all allocated resources.
45
+ """
46
+ raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
47
+
48
+ def __enter__(self):
49
+ try:
50
+ self.start()
51
+ return self
52
+ except Exception:
53
+ self.stop()
54
+ raise
55
+
56
+ def __exit__(self, exc_type, exc_value, traceback):
57
+ self.stop()
58
+
59
+
60
+ _submodules = [
61
+ info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
62
+ ]
63
+
64
+ __all__ = [*_submodules, Orchestrator.__name__] # noqa: PLE0604
65
+
66
+
67
+ def __dir__():
68
+ return __all__
69
+
70
+
71
+ # lazily import submodules
72
+ def __getattr__(attr):
73
+ if attr in _submodules:
74
+ return _importlib.import_module(f".{attr}", __name__)
75
+ else:
76
+ raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")