atex 0.8__py3-none-any.whl → 0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex/aggregator/__init__.py +60 -0
- atex/aggregator/json.py +96 -0
- atex/cli/__init__.py +11 -1
- atex/cli/fmf.py +73 -23
- atex/cli/libvirt.py +128 -0
- atex/cli/testingfarm.py +60 -3
- atex/connection/__init__.py +13 -11
- atex/connection/podman.py +61 -0
- atex/connection/ssh.py +38 -47
- atex/executor/executor.py +144 -119
- atex/executor/reporter.py +66 -71
- atex/executor/scripts.py +13 -5
- atex/executor/testcontrol.py +43 -30
- atex/fmf.py +94 -74
- atex/orchestrator/__init__.py +76 -2
- atex/orchestrator/adhoc.py +465 -0
- atex/{provision → provisioner}/__init__.py +54 -42
- atex/provisioner/libvirt/__init__.py +2 -0
- atex/provisioner/libvirt/libvirt.py +472 -0
- atex/provisioner/libvirt/locking.py +170 -0
- atex/{provision → provisioner}/libvirt/setup-libvirt.sh +21 -1
- atex/provisioner/podman/__init__.py +2 -0
- atex/provisioner/podman/podman.py +169 -0
- atex/{provision → provisioner}/testingfarm/api.py +121 -69
- atex/{provision → provisioner}/testingfarm/testingfarm.py +44 -52
- atex/util/libvirt.py +18 -0
- atex/util/log.py +53 -43
- atex/util/named_mapping.py +158 -0
- atex/util/subprocess.py +46 -12
- atex/util/threads.py +71 -20
- atex-0.10.dist-info/METADATA +86 -0
- atex-0.10.dist-info/RECORD +44 -0
- atex/orchestrator/aggregator.py +0 -106
- atex/orchestrator/orchestrator.py +0 -324
- atex/provision/libvirt/__init__.py +0 -24
- atex/provision/podman/README +0 -59
- atex/provision/podman/host_container.sh +0 -74
- atex-0.8.dist-info/METADATA +0 -197
- atex-0.8.dist-info/RECORD +0 -37
- /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
- /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
atex/util/threads.py
CHANGED
|
@@ -1,48 +1,85 @@
|
|
|
1
|
-
import collections
|
|
2
1
|
import queue
|
|
3
2
|
import threading
|
|
4
3
|
|
|
4
|
+
from .named_mapping import NamedMapping
|
|
5
|
+
|
|
5
6
|
# TODO: documentation; this is like concurrent.futures, but with daemon=True support
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
class ThreadQueue:
|
|
9
|
-
ThreadReturn
|
|
10
|
+
class ThreadReturn(NamedMapping, required=("thread", "returned", "exception")):
|
|
11
|
+
pass
|
|
12
|
+
|
|
10
13
|
Empty = queue.Empty
|
|
11
14
|
|
|
12
15
|
def __init__(self, daemon=False):
|
|
16
|
+
self.lock = threading.RLock()
|
|
13
17
|
self.queue = queue.SimpleQueue()
|
|
14
18
|
self.daemon = daemon
|
|
15
19
|
self.threads = set()
|
|
16
20
|
|
|
17
|
-
def _wrapper(self, func,
|
|
21
|
+
def _wrapper(self, func, func_args, func_kwargs, **user_kwargs):
|
|
18
22
|
current_thread = threading.current_thread()
|
|
19
23
|
try:
|
|
20
|
-
ret = func(*
|
|
21
|
-
result = self.ThreadReturn(
|
|
24
|
+
ret = func(*func_args, **func_kwargs)
|
|
25
|
+
result = self.ThreadReturn(
|
|
26
|
+
thread=current_thread,
|
|
27
|
+
returned=ret,
|
|
28
|
+
exception=None,
|
|
29
|
+
**user_kwargs,
|
|
30
|
+
)
|
|
22
31
|
except Exception as e:
|
|
23
|
-
result = self.ThreadReturn(
|
|
32
|
+
result = self.ThreadReturn(
|
|
33
|
+
thread=current_thread,
|
|
34
|
+
returned=None,
|
|
35
|
+
exception=e,
|
|
36
|
+
**user_kwargs,
|
|
37
|
+
)
|
|
24
38
|
self.queue.put(result)
|
|
25
39
|
|
|
26
|
-
def start_thread(self, target,
|
|
27
|
-
|
|
28
|
-
|
|
40
|
+
def start_thread(self, target, target_args=None, target_kwargs=None, **user_kwargs):
|
|
41
|
+
"""
|
|
42
|
+
Start a new thread and call 'target' as a callable inside it, passing it
|
|
43
|
+
'target_args' as arguments and 'target_kwargs' as keyword arguments.
|
|
44
|
+
|
|
45
|
+
Any additional 'user_kwargs' specified are NOT passed to the callable,
|
|
46
|
+
but instead become part of the ThreadReturn namespace returned by the
|
|
47
|
+
.get_raw() method.
|
|
48
|
+
"""
|
|
29
49
|
t = threading.Thread(
|
|
30
50
|
target=self._wrapper,
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
kwargs=kwargs,
|
|
51
|
+
args=(target, target_args or (), target_kwargs or {}),
|
|
52
|
+
kwargs=user_kwargs,
|
|
34
53
|
daemon=self.daemon,
|
|
35
54
|
)
|
|
55
|
+
with self.lock:
|
|
56
|
+
self.threads.add(t)
|
|
36
57
|
t.start()
|
|
37
|
-
|
|
58
|
+
|
|
59
|
+
def get_raw(self, block=True, timeout=None):
|
|
60
|
+
"""
|
|
61
|
+
Wait for and return the next available ThreadReturn instance on the
|
|
62
|
+
queue, as enqueued by a finished callable started by the .start_thread()
|
|
63
|
+
method.
|
|
64
|
+
"""
|
|
65
|
+
with self.lock:
|
|
66
|
+
if block and timeout is None and not self.threads:
|
|
67
|
+
raise AssertionError("no threads are running, would block forever")
|
|
68
|
+
treturn = self.queue.get(block=block, timeout=timeout)
|
|
69
|
+
with self.lock:
|
|
70
|
+
self.threads.remove(treturn.thread)
|
|
71
|
+
return treturn
|
|
38
72
|
|
|
39
73
|
# get one return value from any thread's function, like .as_completed()
|
|
40
74
|
# or concurrent.futures.FIRST_COMPLETED
|
|
41
75
|
def get(self, block=True, timeout=None):
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
76
|
+
"""
|
|
77
|
+
Wait for and return the next available return value of a callable
|
|
78
|
+
enqueued via the .start_thread() method.
|
|
79
|
+
|
|
80
|
+
If the callable raised an exception, the exception is re-raised here.
|
|
81
|
+
"""
|
|
82
|
+
treturn = self.get_raw(block, timeout)
|
|
46
83
|
if treturn.exception is not None:
|
|
47
84
|
raise treturn.exception
|
|
48
85
|
else:
|
|
@@ -50,6 +87,20 @@ class ThreadQueue:
|
|
|
50
87
|
|
|
51
88
|
# wait for all threads to finish (ignoring queue contents)
|
|
52
89
|
def join(self):
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
90
|
+
"""
|
|
91
|
+
Wait for all threads to finish, ignoring the state of the queue.
|
|
92
|
+
"""
|
|
93
|
+
while True:
|
|
94
|
+
with self.lock:
|
|
95
|
+
try:
|
|
96
|
+
thread = self.threads.pop()
|
|
97
|
+
except KeyError:
|
|
98
|
+
break
|
|
99
|
+
thread.join()
|
|
100
|
+
|
|
101
|
+
def qsize(self):
|
|
102
|
+
"""
|
|
103
|
+
Return the amount of elements .get() can retrieve before it raises
|
|
104
|
+
queue.Empty.
|
|
105
|
+
"""
|
|
106
|
+
return self.queue.qsize()
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: atex
|
|
3
|
+
Version: 0.10
|
|
4
|
+
Summary: Ad-hoc Test EXecutor
|
|
5
|
+
Project-URL: Homepage, https://github.com/RHSecurityCompliance/atex
|
|
6
|
+
License-Expression: GPL-3.0-or-later
|
|
7
|
+
License-File: COPYING.txt
|
|
8
|
+
Classifier: Operating System :: POSIX :: Linux
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Topic :: Software Development :: Testing
|
|
11
|
+
Requires-Python: >=3.11
|
|
12
|
+
Requires-Dist: fmf>=1.6
|
|
13
|
+
Requires-Dist: pyyaml
|
|
14
|
+
Requires-Dist: urllib3<3,>=2
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
|
|
17
|
+
# ATEX = Ad-hoc Test EXecutor
|
|
18
|
+
|
|
19
|
+
A collections of Python APIs to provision operating systems, collect
|
|
20
|
+
and execute [FMF](https://github.com/teemtee/fmf/)-style tests, gather
|
|
21
|
+
and organize their results and generate reports from those results.
|
|
22
|
+
|
|
23
|
+
The name comes from a (fairly unique to FMF/TMT ecosystem) approach that
|
|
24
|
+
allows provisioning a pool of systems and scheduling tests on them as one would
|
|
25
|
+
on an ad-hoc pool of thread/process workers - once a worker becomes free,
|
|
26
|
+
it receives a test to run.
|
|
27
|
+
This is in contrast to splitting a large list of N tests onto M workers
|
|
28
|
+
like N/M, which yields significant time penalties due to tests having
|
|
29
|
+
very varies runtimes.
|
|
30
|
+
|
|
31
|
+
Above all, this project is meant to be a toolbox, not a silver-plate solution.
|
|
32
|
+
Use its Python APIs to build a CLI tool for your specific use case.
|
|
33
|
+
The CLI tool provided here is just for demonstration / testing, not for serious
|
|
34
|
+
use - we want to avoid huge modular CLIs for Every Possible Scenario. That's
|
|
35
|
+
the job of the Python API. Any CLI should be simple by nature.
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## License
|
|
40
|
+
|
|
41
|
+
Unless specified otherwise, any content within this repository is distributed
|
|
42
|
+
under the GNU GPLv3 license, see the [COPYING.txt](COPYING.txt) file for more.
|
|
43
|
+
|
|
44
|
+
## Environment variables
|
|
45
|
+
|
|
46
|
+
- `ATEX_DEBUG_TEST`
|
|
47
|
+
- Set to `1` to print out detailed runner-related trace within the test output
|
|
48
|
+
stream (as if it was printed out by the test).
|
|
49
|
+
|
|
50
|
+
## Testing this project
|
|
51
|
+
|
|
52
|
+
There are some limited sanity tests provided via `pytest`, although:
|
|
53
|
+
|
|
54
|
+
- Some require additional variables (ie. Testing Farm) and will ERROR
|
|
55
|
+
without them.
|
|
56
|
+
- Some take a long time (ie. Testing Farm) due to system provisioning
|
|
57
|
+
taking a long time, so install `pytest-xdist` and run with a large `-n`.
|
|
58
|
+
|
|
59
|
+
Currently, the recommended approach is to split the execution:
|
|
60
|
+
|
|
61
|
+
```
|
|
62
|
+
# synchronously, because podman CLI has concurrency issues
|
|
63
|
+
pytest tests/provision/test_podman.py
|
|
64
|
+
|
|
65
|
+
# in parallel, because provisioning takes a long time
|
|
66
|
+
export TESTING_FARM_API_TOKEN=...
|
|
67
|
+
export TESTING_FARM_COMPOSE=...
|
|
68
|
+
pytest -n 20 tests/provision/test_podman.py
|
|
69
|
+
|
|
70
|
+
# fast enough for synchronous execution
|
|
71
|
+
pytest tests/fmf
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Unsorted notes
|
|
75
|
+
|
|
76
|
+
TODO: codestyle from contest
|
|
77
|
+
|
|
78
|
+
```
|
|
79
|
+
- this is not tmt, the goal is to make a python toolbox *for* making runcontest
|
|
80
|
+
style tools easily, not to replace those tools with tmt-style CLI syntax
|
|
81
|
+
|
|
82
|
+
- the whole point is to make usecase-targeted easy-to-use tools that don't
|
|
83
|
+
intimidate users with 1 KB long command line, and runcontest is a nice example
|
|
84
|
+
|
|
85
|
+
- TL;DR - use a modular pythonic approach, not a gluetool-style long CLI
|
|
86
|
+
```
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
atex/__init__.py,sha256=LdX67gprtHYeAkjLhFPKzpc7ECv2rHxUbHKDGbGXO1c,517
|
|
2
|
+
atex/fmf.py,sha256=gkJXIaRO7_KvwJR-V6Tc1NVn4a9Hq7hoBLQLhxYIdbg,8834
|
|
3
|
+
atex/aggregator/__init__.py,sha256=uNnYSyDGXjknxckI8MFfl-C8_gin8FwQchiq-UOyP6I,1744
|
|
4
|
+
atex/aggregator/json.py,sha256=x1zim9O2olzBh185NYWo5N96fixB2oxCamoOZwmgR9w,3330
|
|
5
|
+
atex/cli/__init__.py,sha256=X5XxkDEDXE4tJAjwt5ShRHCFTXDK-2zvxQ34opmueUc,2768
|
|
6
|
+
atex/cli/fmf.py,sha256=HfbTgFbCwK4Nuyq6vtGutcq_4-4kj-tmoqzXUn3AYtY,3573
|
|
7
|
+
atex/cli/libvirt.py,sha256=6tt5ANb8XBBRXOQsYPTWILThKqf-gvt5AZh5Dctg2PA,3782
|
|
8
|
+
atex/cli/testingfarm.py,sha256=HGlqrkhanUMo2CqKxmM3ACgptWtxm0gICyEGf7O6Qc0,9078
|
|
9
|
+
atex/connection/__init__.py,sha256=dj8ZBcEspom7Z_UjecfLGBRNvLZ3dyGR9q19i_B4xpY,3880
|
|
10
|
+
atex/connection/podman.py,sha256=1T56gh1TgbcQWpTIJHL4NaxZOI6aMg7Xp7sn6PQQyBk,1911
|
|
11
|
+
atex/connection/ssh.py,sha256=9A57b9YR_HI-kIu06Asic1y__JPVXEheDZxjbG2Qcsc,13460
|
|
12
|
+
atex/executor/__init__.py,sha256=XCfhi7QDELjey7N1uzhMjc46Kp1Jsd5bOCf52I27SCE,85
|
|
13
|
+
atex/executor/duration.py,sha256=x06sItKOZi6XA8KszQwZGpIb1Z_L-HWqIwZKo2SDo0s,1759
|
|
14
|
+
atex/executor/executor.py,sha256=JLFR9cZjSlUdAlAlLct6WuzmYbtjGtSobxvsToQum6M,15738
|
|
15
|
+
atex/executor/reporter.py,sha256=MceFmHFt0bTEClBZbRI1WnFbfMhR0e1noOzcu7gjKuQ,3403
|
|
16
|
+
atex/executor/scripts.py,sha256=riJAQWsV-BFGkJwR2Dmf3R0ZRRZJs9w9iYnPpYaQNaE,5618
|
|
17
|
+
atex/executor/testcontrol.py,sha256=mVrLwQUnDRfUq-5diz-80UvCWWxn1TkcBgmAKhKNb5E,12696
|
|
18
|
+
atex/orchestrator/__init__.py,sha256=8Q1YknyibilXLjWRYkHm_Mr2HMm0SRw8Zv39KypeASM,2059
|
|
19
|
+
atex/orchestrator/adhoc.py,sha256=GnvHLlCHeJ_nQ8doEjMuDzqmu4XZorI7ZzOtG_C08tU,18451
|
|
20
|
+
atex/provisioner/__init__.py,sha256=2eepmEznq94tbam9VSWbsGFrZZpWeNSVlsTczGxjNuQ,4667
|
|
21
|
+
atex/provisioner/libvirt/VM_PROVISION,sha256=7pkZ-ozgTyK4qNGC-E-HUznr4IhbosWSASbB72Gknl8,2664
|
|
22
|
+
atex/provisioner/libvirt/__init__.py,sha256=pKG5IpZSC2IHs5wL2ecQx_fd9AzAXEbZmDzA7RyZsfM,119
|
|
23
|
+
atex/provisioner/libvirt/libvirt.py,sha256=rtxowv5DpgcWsGRXYF29n6S9x_cgXRVgqY41DiFu920,18431
|
|
24
|
+
atex/provisioner/libvirt/locking.py,sha256=AXtDyidZNmUoMmrit26g9iTHDqInrzL_RSQEoc_EAXw,5669
|
|
25
|
+
atex/provisioner/libvirt/setup-libvirt.sh,sha256=oCMy9SCnbC_QuAzO2sFwvB5ui1kMQ6uviHsgdXyoFXc,2428
|
|
26
|
+
atex/provisioner/podman/__init__.py,sha256=dM0JzQXWX7edtWSc0KH0cMFXAjArFn2Vme4j_ZMsdYA,138
|
|
27
|
+
atex/provisioner/podman/podman.py,sha256=ztRypoakSf-jF04iER58tEMUZ4Y6AuzIpNpFXp44bB4,4997
|
|
28
|
+
atex/provisioner/testingfarm/__init__.py,sha256=kZncgLGdRCR4FMaRQr2GTwJ8vjlA-24ri8JO2ueZJuw,113
|
|
29
|
+
atex/provisioner/testingfarm/api.py,sha256=UcMN61nBr3wqEd5KSR5Xhv1-TS7nSPFvk2byb6PdIs8,21811
|
|
30
|
+
atex/provisioner/testingfarm/testingfarm.py,sha256=OI-a99xALaiYf-y5037WFVxY1g2H2y1xEKxHBdUQvfg,8271
|
|
31
|
+
atex/util/__init__.py,sha256=cWHFbtQ4mDlKe6lXyPDWRmWJOTcHDGfVuW_-GYa8hB0,1473
|
|
32
|
+
atex/util/dedent.py,sha256=SEuJMtLzqz3dQ7g7qyZzEJ9VYynVlk52tQCJY-FveXo,603
|
|
33
|
+
atex/util/libvirt.py,sha256=kDZmT6xLYEZkQNLZY98gJ2M48DDWXxHF8rQY9PnjB3U,660
|
|
34
|
+
atex/util/log.py,sha256=KVR7ep8n5wtghsvBFCtHiPsMAQBdAmK83E_Jec5t4cU,2230
|
|
35
|
+
atex/util/named_mapping.py,sha256=UBMe9TetjV-DGPhjYjJ42YtC40FVPKAAEROXl9MA5fo,4700
|
|
36
|
+
atex/util/path.py,sha256=x-kXqiWCVodfZWbEwtC5A8LFvutpDIPYv2m0boZSlXU,504
|
|
37
|
+
atex/util/ssh_keygen.py,sha256=9yuSl2yBV7pG3Qfsf9tossVC00nbIUrAeLdbwTykpjk,384
|
|
38
|
+
atex/util/subprocess.py,sha256=PQBxcQJPapP1ZLO4LqENyrxxCbNAxtJDNNlBV5DcD9k,2953
|
|
39
|
+
atex/util/threads.py,sha256=c8hsEc-8SqJGodInorv_6JxpiHiSkGFGob4qbMmOD2M,3531
|
|
40
|
+
atex-0.10.dist-info/METADATA,sha256=evOBYvVboY2T8eGrAKy64UpyeuFKSMLGz8pUz8Sstm8,3050
|
|
41
|
+
atex-0.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
42
|
+
atex-0.10.dist-info/entry_points.txt,sha256=pLqJdcfeyQTgup2h6dWb6SvkHhtOl-W5Eg9zV8moK0o,39
|
|
43
|
+
atex-0.10.dist-info/licenses/COPYING.txt,sha256=oEuj51jdmbXcCUy7pZ-KE0BNcJTR1okudRp5zQ0yWnU,670
|
|
44
|
+
atex-0.10.dist-info/RECORD,,
|
atex/orchestrator/aggregator.py
DELETED
|
@@ -1,106 +0,0 @@
|
|
|
1
|
-
import csv
|
|
2
|
-
import gzip
|
|
3
|
-
import json
|
|
4
|
-
import shutil
|
|
5
|
-
import threading
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class CSVAggregator:
|
|
10
|
-
"""
|
|
11
|
-
Collects reported results as a GZIP-ed CSV and files (logs) from multiple
|
|
12
|
-
test runs under a shared directory.
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
class _ExcelWithUnixNewline(csv.excel):
|
|
16
|
-
lineterminator = "\n"
|
|
17
|
-
|
|
18
|
-
def __init__(self, csv_file, storage_dir):
|
|
19
|
-
"""
|
|
20
|
-
'csv_file' is a string/Path to a .csv.gz file with aggregated results.
|
|
21
|
-
|
|
22
|
-
'storage_dir' is a string/Path of the top-level parent for all
|
|
23
|
-
per-platform / per-test files uploaded by tests.
|
|
24
|
-
"""
|
|
25
|
-
self.lock = threading.RLock()
|
|
26
|
-
self.storage_dir = Path(storage_dir)
|
|
27
|
-
self.csv_file = Path(csv_file)
|
|
28
|
-
self.csv_writer = None
|
|
29
|
-
self.results_gzip_handle = None
|
|
30
|
-
|
|
31
|
-
def open(self):
|
|
32
|
-
if self.csv_file.exists():
|
|
33
|
-
raise FileExistsError(f"{self.csv_file} already exists")
|
|
34
|
-
f = gzip.open(self.csv_file, "wt", newline="")
|
|
35
|
-
try:
|
|
36
|
-
self.csv_writer = csv.writer(f, dialect=self._ExcelWithUnixNewline)
|
|
37
|
-
except:
|
|
38
|
-
f.close()
|
|
39
|
-
raise
|
|
40
|
-
self.results_gzip_handle = f
|
|
41
|
-
|
|
42
|
-
if self.storage_dir.exists():
|
|
43
|
-
raise FileExistsError(f"{self.storage_dir} already exists")
|
|
44
|
-
self.storage_dir.mkdir()
|
|
45
|
-
|
|
46
|
-
def close(self):
|
|
47
|
-
self.results_gzip_handle.close()
|
|
48
|
-
self.results_gzip_handle = None
|
|
49
|
-
self.csv_writer = None
|
|
50
|
-
|
|
51
|
-
def __enter__(self):
|
|
52
|
-
self.open()
|
|
53
|
-
return self
|
|
54
|
-
|
|
55
|
-
def __exit__(self, exc_type, exc_value, traceback):
|
|
56
|
-
self.close()
|
|
57
|
-
|
|
58
|
-
def ingest(self, platform, test_name, json_file, files_dir):
|
|
59
|
-
"""
|
|
60
|
-
Process 'json_file' (string/Path) for reported results and append them
|
|
61
|
-
to the overall aggregated CSV file, recursively copying over the dir
|
|
62
|
-
structure under 'files_dir' (string/Path) under the respective platform
|
|
63
|
-
and test name in the aggregated files storage dir.
|
|
64
|
-
"""
|
|
65
|
-
# parse the JSON separately, before writing any CSV lines, to ensure
|
|
66
|
-
# that either all results from the test are ingested, or none at all
|
|
67
|
-
# (if one of the lines contains JSON errors)
|
|
68
|
-
csv_lines = []
|
|
69
|
-
with open(json_file) as json_fobj:
|
|
70
|
-
for raw_line in json_fobj:
|
|
71
|
-
result_line = json.loads(raw_line)
|
|
72
|
-
|
|
73
|
-
result_name = result_line.get("name")
|
|
74
|
-
if result_name:
|
|
75
|
-
# sub-result; prefix test name
|
|
76
|
-
result_name = f"{test_name}/{result_name}"
|
|
77
|
-
else:
|
|
78
|
-
# result for test itself; use test name
|
|
79
|
-
result_name = test_name
|
|
80
|
-
|
|
81
|
-
file_names = []
|
|
82
|
-
if "testout" in result_line:
|
|
83
|
-
file_names.append(result_line["testout"])
|
|
84
|
-
if "files" in result_line:
|
|
85
|
-
file_names += (f["name"] for f in result_line["files"])
|
|
86
|
-
|
|
87
|
-
csv_lines.append((
|
|
88
|
-
platform,
|
|
89
|
-
result_line["status"],
|
|
90
|
-
result_name,
|
|
91
|
-
result_line.get("note", ""),
|
|
92
|
-
*file_names,
|
|
93
|
-
))
|
|
94
|
-
|
|
95
|
-
with self.lock:
|
|
96
|
-
self.csv_writer.writerows(csv_lines)
|
|
97
|
-
self.results_gzip_handle.flush()
|
|
98
|
-
|
|
99
|
-
Path(json_file).unlink()
|
|
100
|
-
|
|
101
|
-
platform_dir = self.storage_dir / platform
|
|
102
|
-
platform_dir.mkdir(exist_ok=True)
|
|
103
|
-
test_dir = platform_dir / test_name.lstrip("/")
|
|
104
|
-
if test_dir.exists():
|
|
105
|
-
raise FileExistsError(f"{test_dir} already exists for {test_name}")
|
|
106
|
-
shutil.move(files_dir, test_dir, copy_function=shutil.copy)
|
|
@@ -1,324 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import tempfile
|
|
3
|
-
import traceback
|
|
4
|
-
import concurrent
|
|
5
|
-
import collections
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
|
|
8
|
-
from .. import util, executor
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class Orchestrator:
|
|
12
|
-
"""
|
|
13
|
-
A scheduler for parallel execution on multiple resources (machines/systems).
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
SetupInfo = collections.namedtuple(
|
|
17
|
-
"SetupInfo",
|
|
18
|
-
(
|
|
19
|
-
# class Provisioner instance this machine is provided by
|
|
20
|
-
# (for logging purposes)
|
|
21
|
-
"provisioner",
|
|
22
|
-
# class Remote instance returned by the Provisioner
|
|
23
|
-
"remote",
|
|
24
|
-
# class Executor instance uploading tests / running setup or tests
|
|
25
|
-
"executor",
|
|
26
|
-
),
|
|
27
|
-
)
|
|
28
|
-
RunningInfo = collections.namedtuple(
|
|
29
|
-
"RunningInfo",
|
|
30
|
-
(
|
|
31
|
-
# "inherit" from SetupInfo
|
|
32
|
-
*SetupInfo._fields,
|
|
33
|
-
# string with /test/name
|
|
34
|
-
"test_name",
|
|
35
|
-
# class tempfile.TemporaryDirectory instance with 'json_file' and 'files_dir'
|
|
36
|
-
"tmp_dir",
|
|
37
|
-
),
|
|
38
|
-
)
|
|
39
|
-
FinishedInfo = collections.namedtuple(
|
|
40
|
-
"FinishedInfo",
|
|
41
|
-
(
|
|
42
|
-
# "inherit" from RunningInfo
|
|
43
|
-
*RunningInfo._fields,
|
|
44
|
-
# integer with exit code of the test
|
|
45
|
-
# (None if exception happened)
|
|
46
|
-
"exit_code",
|
|
47
|
-
# exception class instance if running the test failed
|
|
48
|
-
# (None if no exception happened (exit_code is defined))
|
|
49
|
-
"exception",
|
|
50
|
-
),
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
def __init__(self, platform, fmf_tests, provisioners, aggregator, tmp_dir, *, max_reruns=2):
|
|
54
|
-
"""
|
|
55
|
-
'platform' is a string with platform name.
|
|
56
|
-
|
|
57
|
-
'fmf_tests' is a class FMFTests instance of the tests to run.
|
|
58
|
-
|
|
59
|
-
'provisioners' is an iterable of class Provisioner instances.
|
|
60
|
-
|
|
61
|
-
'aggregator' is a class CSVAggregator instance.
|
|
62
|
-
|
|
63
|
-
'tmp_dir' is a string/Path to a temporary directory, to be used for
|
|
64
|
-
storing per-test results and uploaded files before being ingested
|
|
65
|
-
by the aggregator. Can be safely shared by Orchestrator instances.
|
|
66
|
-
"""
|
|
67
|
-
self.platform = platform
|
|
68
|
-
self.fmf_tests = fmf_tests
|
|
69
|
-
self.provisioners = tuple(provisioners)
|
|
70
|
-
self.aggregator = aggregator
|
|
71
|
-
self.tmp_dir = tmp_dir
|
|
72
|
-
# tests still waiting to be run
|
|
73
|
-
self.to_run = set(fmf_tests.tests)
|
|
74
|
-
# running setup functions, as a list of SetupInfo items
|
|
75
|
-
self.running_setups = []
|
|
76
|
-
# running tests as a dict, indexed by test name, with RunningInfo values
|
|
77
|
-
self.running_tests = {}
|
|
78
|
-
# indexed by test name, value being integer of how many times
|
|
79
|
-
self.reruns = collections.defaultdict(lambda: max_reruns)
|
|
80
|
-
# thread queue for actively running tests
|
|
81
|
-
self.test_queue = util.ThreadQueue(daemon=False)
|
|
82
|
-
# thread queue for remotes being set up (uploading tests, etc.)
|
|
83
|
-
self.setup_queue = util.ThreadQueue(daemon=True)
|
|
84
|
-
# NOTE: running_setups and test_running are just for debugging and
|
|
85
|
-
# cancellation, the execution flow itself uses ThreadQueues
|
|
86
|
-
|
|
87
|
-
@staticmethod
|
|
88
|
-
def _run_setup(sinfo):
|
|
89
|
-
sinfo.executor.setup()
|
|
90
|
-
sinfo.executor.upload_tests()
|
|
91
|
-
sinfo.executor.setup_plan()
|
|
92
|
-
# NOTE: we never run executor.cleanup() anywhere - instead, we assume
|
|
93
|
-
# the remote (and its connection) was invalidated by the test,
|
|
94
|
-
# so we just rely on remote.release() destroying the system
|
|
95
|
-
return sinfo
|
|
96
|
-
|
|
97
|
-
@classmethod
|
|
98
|
-
def _wrap_test(cls, rinfo, func, *args, **kwargs):
|
|
99
|
-
"""
|
|
100
|
-
Wrap 'func' (test execution function) to preserve extra metadata
|
|
101
|
-
('rinfo') and return it with the function return value.
|
|
102
|
-
"""
|
|
103
|
-
try:
|
|
104
|
-
return cls.FinishedInfo(*rinfo, func(*args, **kwargs), None)
|
|
105
|
-
except Exception as e:
|
|
106
|
-
return cls.FinishedInfo(*rinfo, None, e)
|
|
107
|
-
|
|
108
|
-
def _run_new_test(self, sinfo):
|
|
109
|
-
"""
|
|
110
|
-
'sinfo' is a SetupInfo instance.
|
|
111
|
-
"""
|
|
112
|
-
next_test_name = self.next_test(self.to_run, self.fmf_tests)
|
|
113
|
-
assert next_test_name in self.to_run, "next_test() returned valid test name"
|
|
114
|
-
|
|
115
|
-
self.to_run.remove(next_test_name)
|
|
116
|
-
|
|
117
|
-
rinfo = self.RunningInfo(
|
|
118
|
-
*sinfo,
|
|
119
|
-
test_name=next_test_name,
|
|
120
|
-
tmp_dir=tempfile.TemporaryDirectory(
|
|
121
|
-
prefix=next_test_name.strip("/").replace("/","-") + "-",
|
|
122
|
-
dir=self.tmp_dir,
|
|
123
|
-
delete=False,
|
|
124
|
-
),
|
|
125
|
-
)
|
|
126
|
-
|
|
127
|
-
tmp_dir_path = Path(rinfo.tmp_dir.name)
|
|
128
|
-
self.test_queue.start_thread(
|
|
129
|
-
target=self._wrap_test,
|
|
130
|
-
args=(
|
|
131
|
-
rinfo,
|
|
132
|
-
sinfo.executor.run_test,
|
|
133
|
-
next_test_name,
|
|
134
|
-
tmp_dir_path / "json_file",
|
|
135
|
-
tmp_dir_path / "files_dir",
|
|
136
|
-
),
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
self.running_tests[next_test_name] = rinfo
|
|
140
|
-
|
|
141
|
-
def _process_finished_test(self, finfo):
|
|
142
|
-
"""
|
|
143
|
-
'finfo' is a FinishedInfo instance.
|
|
144
|
-
"""
|
|
145
|
-
test_id = f"'{finfo.test_name}' on '{finfo.remote}'"
|
|
146
|
-
tmp_dir_path = Path(finfo.tmp_dir.name)
|
|
147
|
-
|
|
148
|
-
# NOTE: document that we intentionally don't .cleanup() executioner below,
|
|
149
|
-
# we rely on remote .release() destroying the OS, because we don't
|
|
150
|
-
# want to risk .cleanup() blocking on dead ssh into the remote after
|
|
151
|
-
# executing a destructive test
|
|
152
|
-
|
|
153
|
-
destructive = False
|
|
154
|
-
|
|
155
|
-
# if executor (or test) threw exception, schedule a re-run
|
|
156
|
-
if finfo.exception:
|
|
157
|
-
destructive = True
|
|
158
|
-
exc_str = "".join(traceback.format_exception(finfo.exception)).rstrip("\n")
|
|
159
|
-
util.info(f"unexpected exception happened while running {test_id}:\n{exc_str}")
|
|
160
|
-
finfo.remote.release()
|
|
161
|
-
if self.reruns[finfo.test_name] > 0:
|
|
162
|
-
self.reruns[finfo.test_name] -= 1
|
|
163
|
-
self.to_run.add(finfo.test_name)
|
|
164
|
-
else:
|
|
165
|
-
util.info(f"reruns for {test_id} exceeded, ignoring it")
|
|
166
|
-
|
|
167
|
-
# if the test exited as non-0, try a re-run
|
|
168
|
-
elif finfo.exit_code != 0:
|
|
169
|
-
destructive = True
|
|
170
|
-
finfo.remote.release()
|
|
171
|
-
if self.reruns[finfo.test_name] > 0:
|
|
172
|
-
util.info(
|
|
173
|
-
f"{test_id} exited with non-zero: {finfo.exit_code}, re-running "
|
|
174
|
-
f"({self.reruns[finfo.test_name]} reruns left)",
|
|
175
|
-
)
|
|
176
|
-
self.reruns[finfo.test_name] -= 1
|
|
177
|
-
self.to_run.add(finfo.test_name)
|
|
178
|
-
else:
|
|
179
|
-
util.info(
|
|
180
|
-
f"{test_id} exited with non-zero: {finfo.exit_code}, "
|
|
181
|
-
"all reruns exceeded, giving up",
|
|
182
|
-
)
|
|
183
|
-
# record the final result anyway
|
|
184
|
-
self.aggregator.ingest(
|
|
185
|
-
self.platform,
|
|
186
|
-
finfo.test_name,
|
|
187
|
-
tmp_dir_path / "json_file",
|
|
188
|
-
tmp_dir_path / "files_dir",
|
|
189
|
-
)
|
|
190
|
-
finfo.tmp_dir.cleanup()
|
|
191
|
-
|
|
192
|
-
# test finished successfully - ingest its results
|
|
193
|
-
else:
|
|
194
|
-
util.info(f"{test_id} finished successfully")
|
|
195
|
-
self.aggregator.ingest(
|
|
196
|
-
self.platform,
|
|
197
|
-
finfo.test_name,
|
|
198
|
-
tmp_dir_path / "json_file",
|
|
199
|
-
tmp_dir_path / "files_dir",
|
|
200
|
-
)
|
|
201
|
-
finfo.tmp_dir.cleanup()
|
|
202
|
-
|
|
203
|
-
# if the remote was not destroyed by traceback / failing test,
|
|
204
|
-
# check if the test always destroys it (even on success)
|
|
205
|
-
if not destructive:
|
|
206
|
-
test_data = self.fmf_tests.tests[finfo.test_name]
|
|
207
|
-
destructive = test_data.get("extra-atex", {}).get("destructive", False)
|
|
208
|
-
|
|
209
|
-
# if destroyed, release the remote
|
|
210
|
-
if destructive:
|
|
211
|
-
util.debug(f"{test_id} was destructive, releasing remote")
|
|
212
|
-
finfo.remote.release()
|
|
213
|
-
|
|
214
|
-
# if still not destroyed, run another test on it
|
|
215
|
-
# (without running plan setup, re-using already set up remote)
|
|
216
|
-
elif self.to_run:
|
|
217
|
-
sinfo = self.SetupInfo(
|
|
218
|
-
provisioner=finfo.provisioner,
|
|
219
|
-
remote=finfo.remote,
|
|
220
|
-
executor=finfo.executor,
|
|
221
|
-
)
|
|
222
|
-
util.debug(f"{test_id} was non-destructive, running next test")
|
|
223
|
-
self._run_new_test(sinfo)
|
|
224
|
-
|
|
225
|
-
def serve_once(self):
|
|
226
|
-
"""
|
|
227
|
-
Run the orchestration logic, processing any outstanding requests
|
|
228
|
-
(for provisioning, new test execution, etc.) and returning once these
|
|
229
|
-
are taken care of.
|
|
230
|
-
|
|
231
|
-
Returns True to indicate that it should be called again by the user
|
|
232
|
-
(more work to be done), False once all testing is concluded.
|
|
233
|
-
"""
|
|
234
|
-
util.debug(
|
|
235
|
-
f"to_run: {len(self.to_run)} tests / "
|
|
236
|
-
f"running: {len(self.running_tests)} tests, {len(self.running_setups)} setups",
|
|
237
|
-
)
|
|
238
|
-
# all done
|
|
239
|
-
if not self.to_run and not self.running_tests:
|
|
240
|
-
return False
|
|
241
|
-
|
|
242
|
-
# process all finished tests, potentially reusing remotes for executing
|
|
243
|
-
# further tests
|
|
244
|
-
while True:
|
|
245
|
-
try:
|
|
246
|
-
finfo = self.test_queue.get(block=False)
|
|
247
|
-
except util.ThreadQueue.Empty:
|
|
248
|
-
break
|
|
249
|
-
del self.running_tests[finfo.test_name]
|
|
250
|
-
self._process_finished_test(finfo)
|
|
251
|
-
|
|
252
|
-
# process any remotes with finished plan setup (uploaded tests,
|
|
253
|
-
# plan-defined pkgs / prepare scripts), start executing tests on them
|
|
254
|
-
while True:
|
|
255
|
-
try:
|
|
256
|
-
sinfo = self.setup_queue.get(block=False)
|
|
257
|
-
except util.ThreadQueue.Empty:
|
|
258
|
-
break
|
|
259
|
-
util.debug(f"setup finished for '{sinfo.remote}', running first test")
|
|
260
|
-
self.running_setups.remove(sinfo)
|
|
261
|
-
self._run_new_test(sinfo)
|
|
262
|
-
|
|
263
|
-
# try to get new remotes from Provisioners - if we get some, start
|
|
264
|
-
# running setup on them
|
|
265
|
-
for provisioner in self.provisioners:
|
|
266
|
-
while (remote := provisioner.get_remote(block=False)) is not None:
|
|
267
|
-
ex = executor.Executor(self.fmf_tests, remote)
|
|
268
|
-
sinfo = self.SetupInfo(
|
|
269
|
-
provisioner=provisioner,
|
|
270
|
-
remote=remote,
|
|
271
|
-
executor=ex,
|
|
272
|
-
)
|
|
273
|
-
self.setup_queue.start_thread(
|
|
274
|
-
target=self._run_setup,
|
|
275
|
-
args=(sinfo,),
|
|
276
|
-
)
|
|
277
|
-
self.running_setups.append(sinfo)
|
|
278
|
-
util.debug(f"got remote '{remote}' from '{provisioner}', running setup")
|
|
279
|
-
|
|
280
|
-
return True
|
|
281
|
-
|
|
282
|
-
def serve_forever(self):
|
|
283
|
-
"""
|
|
284
|
-
Run the orchestration logic, blocking until all testing is concluded.
|
|
285
|
-
"""
|
|
286
|
-
while self.serve_once():
|
|
287
|
-
time.sleep(1)
|
|
288
|
-
|
|
289
|
-
def __enter__(self):
|
|
290
|
-
# start all provisioners
|
|
291
|
-
for prov in self.provisioners:
|
|
292
|
-
prov.start()
|
|
293
|
-
return self
|
|
294
|
-
|
|
295
|
-
def __exit__(self, exc_type, exc_value, traceback):
|
|
296
|
-
# cancel all running tests and wait for them to clean up (up to 0.1sec)
|
|
297
|
-
for rinfo in self.running_tests.values():
|
|
298
|
-
rinfo.executor.cancel()
|
|
299
|
-
self.test_queue.join() # also ignore any exceptions raised
|
|
300
|
-
|
|
301
|
-
# stop all provisioners, also releasing all remotes
|
|
302
|
-
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as ex:
|
|
303
|
-
for provisioner in self.provisioners:
|
|
304
|
-
for func in provisioner.stop_defer():
|
|
305
|
-
ex.submit(func)
|
|
306
|
-
|
|
307
|
-
def next_test(self, tests, fmf_tests): # noqa: ARG002, PLR6301
|
|
308
|
-
"""
|
|
309
|
-
Return a test name (string) from a set of 'tests' (set of test name
|
|
310
|
-
strings) to be run next.
|
|
311
|
-
|
|
312
|
-
'fmf_tests' is a class FMFTests instance with additional test metadata.
|
|
313
|
-
|
|
314
|
-
This method is user-overridable, ie. by subclassing Orchestrator:
|
|
315
|
-
|
|
316
|
-
class CustomOrchestrator(Orchestrator):
|
|
317
|
-
@staticmethod
|
|
318
|
-
def next_test(tests):
|
|
319
|
-
...
|
|
320
|
-
"""
|
|
321
|
-
# TODO: more advanced algorithm
|
|
322
|
-
#
|
|
323
|
-
# simple:
|
|
324
|
-
return next(iter(tests))
|