atex 0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex-0.1/.editorconfig +6 -0
- atex-0.1/.gitignore +9 -0
- atex-0.1/COPYING.txt +14 -0
- atex-0.1/PKG-INFO +11 -0
- atex-0.1/README.md +87 -0
- atex-0.1/atex/__init__.py +35 -0
- atex-0.1/atex/cli/__init__.py +83 -0
- atex-0.1/atex/cli/testingfarm.py +171 -0
- atex-0.1/atex/fmf.py +168 -0
- atex-0.1/atex/minitmt/CONTROL_FILE.md +99 -0
- atex-0.1/atex/minitmt/README.md +191 -0
- atex-0.1/atex/minitmt/RESULTS.md +204 -0
- atex-0.1/atex/minitmt/__init__.py +109 -0
- atex-0.1/atex/minitmt/report.py +174 -0
- atex-0.1/atex/minitmt/scripts.py +51 -0
- atex-0.1/atex/minitmt/testme.py +3 -0
- atex-0.1/atex/orchestrator.py +38 -0
- atex-0.1/atex/provision/__init__.py +113 -0
- atex-0.1/atex/provision/libvirt/VM_PROVISION +51 -0
- atex-0.1/atex/provision/libvirt/__init__.py +23 -0
- atex-0.1/atex/provision/libvirt/setup-libvirt.sh +72 -0
- atex-0.1/atex/ssh.py +320 -0
- atex-0.1/atex/testingfarm.py +523 -0
- atex-0.1/atex/util/README.md +22 -0
- atex-0.1/atex/util/__init__.py +49 -0
- atex-0.1/atex/util/dedent.py +25 -0
- atex-0.1/atex/util/lockable_class.py +38 -0
- atex-0.1/atex/util/log.py +53 -0
- atex-0.1/atex/util/subprocess.py +51 -0
- atex-0.1/logtest.py +19 -0
- atex-0.1/pyproject.toml +85 -0
- atex-0.1/reporter.py +26 -0
- atex-0.1/ssh.py +41 -0
- atex-0.1/tests/PYTEST.md +11 -0
- atex-0.1/tests/foobar.py +5 -0
- atex-0.1/tf.py +18 -0
atex-0.1/.editorconfig
ADDED
atex-0.1/.gitignore
ADDED
atex-0.1/COPYING.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Copyright (C) 2025 Red Hat, Inc. <https://redhat.com/>
|
|
2
|
+
|
|
3
|
+
This program is free software: you can redistribute it and/or modify
|
|
4
|
+
it under the terms of the GNU General Public License as published by
|
|
5
|
+
the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
(at your option) any later version.
|
|
7
|
+
|
|
8
|
+
This program is distributed in the hope that it will be useful,
|
|
9
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11
|
+
GNU General Public License for more details.
|
|
12
|
+
|
|
13
|
+
You should have received a copy of the GNU General Public License
|
|
14
|
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
atex-0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: atex
|
|
3
|
+
Version: 0.1
|
|
4
|
+
License-Expression: GPL-3.0-or-later
|
|
5
|
+
License-File: COPYING.txt
|
|
6
|
+
Classifier: Operating System :: POSIX :: Linux
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Topic :: Software Development :: Testing
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Requires-Dist: fmf>=1.6
|
|
11
|
+
Requires-Dist: urllib3<3,>=2
|
atex-0.1/README.md
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# ATEX = Ad-hoc Test EXecutor
|
|
2
|
+
|
|
3
|
+
A collections of Python APIs to provision operating systems, collect
|
|
4
|
+
and execute [FMF](https://github.com/teemtee/fmf/)-style tests, gather
|
|
5
|
+
and organize their results and generate reports from those results.
|
|
6
|
+
|
|
7
|
+
The name comes from a (fairly unique to FMF/TMT ecosystem) approach that
|
|
8
|
+
allows provisioning a pool of systems and scheduling tests on them as one would
|
|
9
|
+
on an ad-hoc pool of thread/process workers - once a worker becomes free,
|
|
10
|
+
it receives a test to run.
|
|
11
|
+
This is in contrast to splitting a large list of N tests onto M workers
|
|
12
|
+
like N/M, which yields significant time penalties due to tests having
|
|
13
|
+
very varies runtimes.
|
|
14
|
+
|
|
15
|
+
Above all, this project is meant to be a toolbox, not a silver-plate solution.
|
|
16
|
+
Use its Python APIs to build a CLI tool for your specific use case.
|
|
17
|
+
The CLI tool provided here is just for demonstration / testing, not for serious
|
|
18
|
+
use - we want to avoid huge modular CLIs for Every Possible Scenario. That's
|
|
19
|
+
the job of the Python API. Any CLI should be simple by nature.
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
THIS PROJECT IS HEAVILY WIP, THINGS WILL MOVE AROUND, CHANGE AND OTHERWISE
|
|
24
|
+
BREAK. DO NOT USE IT (for now).
|
|
25
|
+
|
|
26
|
+
---
|
|
27
|
+
|
|
28
|
+
## License
|
|
29
|
+
|
|
30
|
+
Unless specified otherwise, any content within this repository is distributed
|
|
31
|
+
under the GNU GPLv3 license, see the [COPYING.txt](COPYING.txt) file for more.
|
|
32
|
+
|
|
33
|
+
## Unsorted notes
|
|
34
|
+
|
|
35
|
+
```
|
|
36
|
+
- this is not tmt, the goal is to make a python toolbox *for* making runcontest
|
|
37
|
+
style tools easily, not to replace those tools with tmt-style CLI syntax
|
|
38
|
+
|
|
39
|
+
- the whole point is to make usecase-targeted easy-to-use tools that don't
|
|
40
|
+
intimidate users with 1 KB long command line, and runcontest is a nice example
|
|
41
|
+
|
|
42
|
+
- TL;DR - use a modular pythonic approach, not a modular CLI like tmt
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
- Orchestrator with
|
|
46
|
+
- add_provisioner(<class>, max_workers=1) # will instantiate <class> at most max_workers at a time
|
|
47
|
+
- algo
|
|
48
|
+
- for all provisioner classes, spawns classes*max_workers as new Threads
|
|
49
|
+
- waits for any .reserve() to return
|
|
50
|
+
- creates a new Thread for minitmt, gives it p.get_ssh() details
|
|
51
|
+
- minitmt will
|
|
52
|
+
- establish a SSHConn
|
|
53
|
+
- install test deps, copy test repo over, prepare socket dir on SUT, etc.
|
|
54
|
+
- run the test in the background as
|
|
55
|
+
f=os.open('some/test/log', os.WRONLY); subprocess.Popen(..., stdout=f, stderr=f, stdin=subprocess.DEVNULL)
|
|
56
|
+
- read/process Unix sock results in the foreground, non-blocking,
|
|
57
|
+
probably calling some Orchestrator-provided function to store results persistently
|
|
58
|
+
- regularly check Popen proc status, re-accept UNIX sock connection, etc., etc.
|
|
59
|
+
- minitmt also has some Thread-independent way to .cancel(), killing the proc, closing SSHConn, etc.
|
|
60
|
+
|
|
61
|
+
- while waiting for minitmt Threads to finish, to re-assign existing Provisioner instances
|
|
62
|
+
to new minitmt Threads, .. Orchestrator uses some logic to select, which TestRun
|
|
63
|
+
would be ideal to run next
|
|
64
|
+
- TestRun probably has some "fitness" function that returns some priority number
|
|
65
|
+
when given a Provisioner instance (?) ...
|
|
66
|
+
- something from minitmt would also have access to the Provisioner instance
|
|
67
|
+
- the idea is to allow some logic to set "hey I set up nested VM snapshot on this thing"
|
|
68
|
+
on the Provisioner instance, and if another /hardening/oscap TestRun finds
|
|
69
|
+
a Provisioner instance like that, it would return high priority
|
|
70
|
+
- ...
|
|
71
|
+
- similar to "fitness" like function, we need some "applicability" function
|
|
72
|
+
- if TestRun is mixed to RHEL-9 && x86_64, we need it to return True
|
|
73
|
+
for a Provisioner instance that provides RHEL-9 and x86_64, but False otherwise
|
|
74
|
+
|
|
75
|
+
- basically Orchestrator has
|
|
76
|
+
- .add_provisioner()
|
|
77
|
+
- .run_test() # called with an exclusively-borrowed Provisioner instance
|
|
78
|
+
- if Provisioner is_alive()==False after .run_test(), instantiate a new one from the same inst.__class__
|
|
79
|
+
- if test failed and reruns > 0, try run_test() again (or maybe re-queue the test)
|
|
80
|
+
- .output_result() # called by run_test() to persistently log a test result
|
|
81
|
+
- .applicable() # return True if a passed TestRun is meant for a passed Platform (Provisioner?)
|
|
82
|
+
- if no TestRun returns True, the Provisioner is .release()d because we don't need it anymore
|
|
83
|
+
- .fitness() # return -inf / 0 / +inf with how much should a passed TestRun run on a Provisioner
|
|
84
|
+
- MAYBE combine applicable() and fitness() into one function, next_test() ?
|
|
85
|
+
- given the free Provisioner and a list of TestRuns, select which should run next on the Provisioner
|
|
86
|
+
- if none is chosen, .release() the Provisioner without replacement, continue
|
|
87
|
+
```
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ad-hoc Test EXecutor
|
|
3
|
+
|
|
4
|
+
Some documentation here.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import importlib as _importlib
|
|
8
|
+
import pkgutil as _pkgutil
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def __dir__():
|
|
16
|
+
return __all__
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# lazily import submodules
|
|
20
|
+
def __getattr__(attr):
|
|
21
|
+
# # from mod import *
|
|
22
|
+
# if attr == '__all__':
|
|
23
|
+
# print("importing all")
|
|
24
|
+
# for mod in __all__:
|
|
25
|
+
# _importlib.import_module(f'.{mod}', __name__)
|
|
26
|
+
# return __all__
|
|
27
|
+
# # accessing __all__, __getattr__, etc. directly
|
|
28
|
+
# elif attr in globals():
|
|
29
|
+
# print("importing globals")
|
|
30
|
+
# return globals()[attr]
|
|
31
|
+
# importing a module known to exist
|
|
32
|
+
if attr in __all__:
|
|
33
|
+
return _importlib.import_module(f'.{attr}', __name__)
|
|
34
|
+
else:
|
|
35
|
+
raise AttributeError(f'module {__name__} has no attribute {attr}')
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
r"""
|
|
2
|
+
Command line interface to atex
|
|
3
|
+
|
|
4
|
+
Submodules (subpackages) of this one must define a module-level dict with
|
|
5
|
+
these keys:
|
|
6
|
+
|
|
7
|
+
- help
|
|
8
|
+
- short oneliner about what the submodule is about (for argparse --help)
|
|
9
|
+
|
|
10
|
+
- aliases (optional)
|
|
11
|
+
- tuple of aliases of the module name, for argument parsing
|
|
12
|
+
|
|
13
|
+
- args
|
|
14
|
+
- function (or other callable) for argument specification/parsing,
|
|
15
|
+
gets passed one non-kw argument: argparse-style parser
|
|
16
|
+
|
|
17
|
+
- main
|
|
18
|
+
- function (or other callable) that will be called when invoked by the user,
|
|
19
|
+
gets passed one non-kw argument: argparse-style Namespace
|
|
20
|
+
|
|
21
|
+
This module-level dict must be named 'CLI_SPEC'.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import sys
|
|
25
|
+
import importlib
|
|
26
|
+
import pkgutil
|
|
27
|
+
import argparse
|
|
28
|
+
import logging
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def setup_logging(level):
|
|
32
|
+
logging.basicConfig(
|
|
33
|
+
level=level,
|
|
34
|
+
stream=sys.stderr,
|
|
35
|
+
format='%(asctime)s %(name)s: %(message)s',
|
|
36
|
+
datefmt='%Y-%m-%d %H:%M:%S',
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def collect_modules():
|
|
41
|
+
for info in pkgutil.iter_modules(__spec__.submodule_search_locations):
|
|
42
|
+
mod = importlib.import_module(f'.{info.name}', __name__)
|
|
43
|
+
if not hasattr(mod, 'CLI_SPEC'):
|
|
44
|
+
raise ValueError(f"CLI submodule {info.name} does not define CLI_SPEC")
|
|
45
|
+
yield (info.name, mod.CLI_SPEC)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def main():
|
|
49
|
+
parser = argparse.ArgumentParser()
|
|
50
|
+
|
|
51
|
+
log_grp = parser.add_mutually_exclusive_group()
|
|
52
|
+
log_grp.add_argument(
|
|
53
|
+
'--debug', '-d', action='store_const', dest='loglevel', const=logging.DEBUG,
|
|
54
|
+
help="enable extra debugging (logging.DEBUG)",
|
|
55
|
+
)
|
|
56
|
+
log_grp.add_argument(
|
|
57
|
+
'--quiet', '-q', action='store_const', dest='loglevel', const=logging.WARNING,
|
|
58
|
+
help="be quiet during normal operation (logging.WARNING)",
|
|
59
|
+
)
|
|
60
|
+
parser.set_defaults(loglevel=logging.INFO)
|
|
61
|
+
|
|
62
|
+
mains = {}
|
|
63
|
+
subparsers = parser.add_subparsers(dest='_module', metavar='<module>', required=True)
|
|
64
|
+
for name, spec in collect_modules():
|
|
65
|
+
aliases = spec['aliases'] if 'aliases' in spec else ()
|
|
66
|
+
subp = subparsers.add_parser(
|
|
67
|
+
name,
|
|
68
|
+
aliases=aliases,
|
|
69
|
+
help=spec['help'],
|
|
70
|
+
)
|
|
71
|
+
spec['args'](subp)
|
|
72
|
+
mains[name] = spec['main']
|
|
73
|
+
for alias in aliases:
|
|
74
|
+
mains[alias] = spec['main']
|
|
75
|
+
|
|
76
|
+
args = parser.parse_args()
|
|
77
|
+
|
|
78
|
+
setup_logging(args.loglevel)
|
|
79
|
+
|
|
80
|
+
try:
|
|
81
|
+
mains[args._module](args)
|
|
82
|
+
except KeyboardInterrupt:
|
|
83
|
+
raise SystemExit() from None
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
#from datetime import datetime
|
|
3
|
+
|
|
4
|
+
from .. import util
|
|
5
|
+
from .. import testingfarm as tf
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _get_api(args):
|
|
9
|
+
api_args = {}
|
|
10
|
+
if args.url:
|
|
11
|
+
api_args['url'] = args.url
|
|
12
|
+
if args.token:
|
|
13
|
+
api_args['token'] = args.token
|
|
14
|
+
return tf.TestingFarmAPI(**api_args)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def composes(args):
|
|
18
|
+
api = _get_api(args)
|
|
19
|
+
comps = api.composes(ranch=args.ranch)
|
|
20
|
+
comps_list = comps['composes']
|
|
21
|
+
for comp in comps_list:
|
|
22
|
+
print(comp['name'])
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_request(args):
|
|
26
|
+
api = _get_api(args)
|
|
27
|
+
request = tf.Request(args.request_id, api=api)
|
|
28
|
+
request.update()
|
|
29
|
+
print(str(request))
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def search_requests(args):
|
|
33
|
+
api = _get_api(args)
|
|
34
|
+
reply = api.search_requests(
|
|
35
|
+
state=args.state,
|
|
36
|
+
mine=not args.all,
|
|
37
|
+
ranch=args.ranch,
|
|
38
|
+
created_before=args.before,
|
|
39
|
+
created_after=args.after,
|
|
40
|
+
)
|
|
41
|
+
if not reply:
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
for req in sorted(reply, key=lambda x: x['created']):
|
|
45
|
+
req_id = req['id']
|
|
46
|
+
#created_utc = req['created'].partition('.')[0]
|
|
47
|
+
#created_dt = datetime.fromisoformat(f'{created_utc}+00:00')
|
|
48
|
+
#created = created_dt.astimezone().isoformat().partition('.')[0]
|
|
49
|
+
created = req['created'].partition('.')[0]
|
|
50
|
+
|
|
51
|
+
envs = []
|
|
52
|
+
for env in req['environments_requested']:
|
|
53
|
+
if 'os' in env and env['os'] and 'compose' in env['os']:
|
|
54
|
+
compose = env['os']['compose']
|
|
55
|
+
arch = env['arch']
|
|
56
|
+
if compose and arch:
|
|
57
|
+
envs.append(f'{compose}@{arch}')
|
|
58
|
+
envs_str = ', '.join(envs)
|
|
59
|
+
|
|
60
|
+
print(f'{created} {req_id} : {envs_str}')
|
|
61
|
+
#request = tf.Request(initial_data=req)
|
|
62
|
+
#print(str(request))
|
|
63
|
+
#request.update()
|
|
64
|
+
#print(str(request))
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def reserve(args):
|
|
68
|
+
util.info(f"Reserving {args.compose} on {args.arch} for {args.timeout} minutes")
|
|
69
|
+
|
|
70
|
+
api = _get_api(args)
|
|
71
|
+
res = tf.Reserve(
|
|
72
|
+
compose=args.compose,
|
|
73
|
+
arch=args.arch,
|
|
74
|
+
timeout=args.timeout,
|
|
75
|
+
api=api,
|
|
76
|
+
)
|
|
77
|
+
with res as m:
|
|
78
|
+
util.info(f"Got machine: {m}")
|
|
79
|
+
util.subprocess_run([
|
|
80
|
+
'ssh', '-q', '-i', m.ssh_key,
|
|
81
|
+
'-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/dev/null',
|
|
82
|
+
f'{m.user}@{m.host}',
|
|
83
|
+
])
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def watch_pipeline(args):
|
|
87
|
+
api = _get_api(args)
|
|
88
|
+
request = tf.Request(id=args.request_id, api=api)
|
|
89
|
+
|
|
90
|
+
util.info(f"Waiting for {args.request_id} to be 'running'")
|
|
91
|
+
try:
|
|
92
|
+
request.wait_for_state('running')
|
|
93
|
+
except tf.GoneAwayError:
|
|
94
|
+
util.info(f"Request {args.request_id} already finished")
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
util.info("Querying pipeline.log")
|
|
98
|
+
try:
|
|
99
|
+
for line in tf.PipelineLogStreamer(request):
|
|
100
|
+
sys.stdout.write(line)
|
|
101
|
+
sys.stdout.write('\n')
|
|
102
|
+
except tf.GoneAwayError:
|
|
103
|
+
util.info(f"Request {args.request_id} finished, exiting")
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def parse_args(parser):
|
|
107
|
+
parser.add_argument('--url', help='Testing Farm API URL')
|
|
108
|
+
parser.add_argument('--token', help='Testing Farm API auth token')
|
|
109
|
+
cmds = parser.add_subparsers(
|
|
110
|
+
dest='_cmd', help="TF helper to run", metavar='<cmd>', required=True,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
cmd = cmds.add_parser(
|
|
114
|
+
'composes',
|
|
115
|
+
help="list all composes available on a given ranch",
|
|
116
|
+
)
|
|
117
|
+
cmd.add_argument('ranch', nargs='?', help="Testing Farm ranch (autodetected if token)")
|
|
118
|
+
|
|
119
|
+
cmd = cmds.add_parser(
|
|
120
|
+
'get-request', aliases=('gr',),
|
|
121
|
+
help="retrieve and print JSON of a Testing Farm request",
|
|
122
|
+
)
|
|
123
|
+
cmd.add_argument('request_id', help="Testing Farm request UUID")
|
|
124
|
+
|
|
125
|
+
cmd = cmds.add_parser(
|
|
126
|
+
'search-requests', aliases=('sr',),
|
|
127
|
+
help="return a list of requests matching the criteria",
|
|
128
|
+
)
|
|
129
|
+
cmd.add_argument('--state', help="request state (running, etc.)", required=True)
|
|
130
|
+
cmd.add_argument('--all', help="all requests, not just owned by token", action='store_true')
|
|
131
|
+
cmd.add_argument('--ranch', help="Testing Farm ranch")
|
|
132
|
+
cmd.add_argument('--before', help="only requests created before ISO8601")
|
|
133
|
+
cmd.add_argument('--after', help="only requests created after ISO8601")
|
|
134
|
+
|
|
135
|
+
cmd = cmds.add_parser(
|
|
136
|
+
'reserve',
|
|
137
|
+
help="reserve a system and ssh into it",
|
|
138
|
+
)
|
|
139
|
+
cmd.add_argument('--compose', '-c', help="OS compose to install", required=True)
|
|
140
|
+
cmd.add_argument('--arch', '-a', help="system HW architecture", default='x86_64')
|
|
141
|
+
cmd.add_argument('--timeout', '-t', help="pipeline timeout (in minutes)", type=int, default=60)
|
|
142
|
+
cmd.add_argument('--ssh-key', help="path to a ssh private key file like 'id_rsa'")
|
|
143
|
+
|
|
144
|
+
cmd = cmds.add_parser(
|
|
145
|
+
'watch-pipeline', aliases=('wp',),
|
|
146
|
+
help="continuously output pipeline.log like 'tail -f'",
|
|
147
|
+
)
|
|
148
|
+
cmd.add_argument('request_id', help="Testing Farm request UUID")
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def main(args):
|
|
152
|
+
if args._cmd == 'composes':
|
|
153
|
+
composes(args)
|
|
154
|
+
elif args._cmd in ('get-request', 'gr'):
|
|
155
|
+
get_request(args)
|
|
156
|
+
elif args._cmd in ('search-requests', 'sr'):
|
|
157
|
+
search_requests(args)
|
|
158
|
+
elif args._cmd == 'reserve':
|
|
159
|
+
reserve(args)
|
|
160
|
+
elif args._cmd in ('watch-pipeline', 'wp'):
|
|
161
|
+
watch_pipeline(args)
|
|
162
|
+
else:
|
|
163
|
+
raise RuntimeError(f"unknown args: {args}")
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
CLI_SPEC = {
|
|
167
|
+
'aliases': ('tf',),
|
|
168
|
+
'help': "various utils for Testing Farm",
|
|
169
|
+
'args': parse_args,
|
|
170
|
+
'main': main,
|
|
171
|
+
}
|
atex-0.1/atex/fmf.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import collections
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
# from system-wide sys.path
|
|
6
|
+
import fmf
|
|
7
|
+
|
|
8
|
+
# name: fmf path to the test as string, ie. /some/test
|
|
9
|
+
# data: dict of the parsed fmf metadata (ie. {'tag': ... , 'environment': ...})
|
|
10
|
+
# dir: relative pathlib.Path of the test .fmf to repo root, ie. some/test
|
|
11
|
+
# (may be different from name for "virtual" tests that share the same dir)
|
|
12
|
+
FMFTest = collections.namedtuple('FMFTest', ['name', 'data', 'dir'])
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class FMFData:
|
|
16
|
+
"""
|
|
17
|
+
Helper class for reading and querying fmf metadata from the filesystem.
|
|
18
|
+
"""
|
|
19
|
+
# TODO: usage example ^^^^
|
|
20
|
+
|
|
21
|
+
@staticmethod
|
|
22
|
+
def _listlike(data, key):
|
|
23
|
+
"""
|
|
24
|
+
Get a piece of fmf metadata as an iterable regardless of whether it was
|
|
25
|
+
defined as a dict or a list.
|
|
26
|
+
|
|
27
|
+
This is needed because many fmf metadata keys can be used either as
|
|
28
|
+
some_key: 123
|
|
29
|
+
or as lists via YAML syntax
|
|
30
|
+
some_key:
|
|
31
|
+
- 123
|
|
32
|
+
- 456
|
|
33
|
+
and, for simplicity, we want to always deal with lists (iterables).
|
|
34
|
+
"""
|
|
35
|
+
if value := data.get(key):
|
|
36
|
+
return value if isinstance(value, list) else (value,)
|
|
37
|
+
else:
|
|
38
|
+
return ()
|
|
39
|
+
|
|
40
|
+
def __init__(self, fmf_tree, plan_name, context=None):
|
|
41
|
+
"""
|
|
42
|
+
'fmf_tree' is filesystem path somewhere inside fmf metadata tree,
|
|
43
|
+
or a root fmf.Tree instance.
|
|
44
|
+
|
|
45
|
+
'plan_name' is fmf identifier (like /some/thing) of a tmt plan
|
|
46
|
+
to use for discovering tests.
|
|
47
|
+
|
|
48
|
+
'context' is a dict like {'distro': 'rhel-9.6'} used for filtering
|
|
49
|
+
discovered tests.
|
|
50
|
+
"""
|
|
51
|
+
self.prepare_pkgs = []
|
|
52
|
+
self.prepare_scripts = []
|
|
53
|
+
self.tests = []
|
|
54
|
+
|
|
55
|
+
tree = fmf_tree.copy() if isinstance(fmf_tree, fmf.Tree) else fmf.Tree(fmf_tree)
|
|
56
|
+
ctx = fmf.Context(**context) if context else fmf.Context()
|
|
57
|
+
tree.adjust(context=ctx)
|
|
58
|
+
|
|
59
|
+
self.fmf_root = tree.root
|
|
60
|
+
|
|
61
|
+
# lookup the plan first
|
|
62
|
+
plan = tree.find(plan_name)
|
|
63
|
+
if not plan:
|
|
64
|
+
raise ValueError(f"plan {plan_name} not found in {tree.root}")
|
|
65
|
+
if 'test' in plan.data:
|
|
66
|
+
raise ValueError(f"plan {plan_name} appears to be a test")
|
|
67
|
+
|
|
68
|
+
# gather all prepare scripts / packages
|
|
69
|
+
#
|
|
70
|
+
# prepare:
|
|
71
|
+
# - how: install
|
|
72
|
+
# package:
|
|
73
|
+
# - some-rpm-name
|
|
74
|
+
# - how: shell
|
|
75
|
+
# script:
|
|
76
|
+
# - some-command
|
|
77
|
+
for entry in self._listlike(plan.data, 'prepare'):
|
|
78
|
+
if 'how' not in entry:
|
|
79
|
+
continue
|
|
80
|
+
if entry['how'] == 'install':
|
|
81
|
+
self.prepare_pkgs += self._listlike(entry, 'package')
|
|
82
|
+
elif entry['how'] == 'shell':
|
|
83
|
+
self.prepare_scripts += self._listlike(entry, 'script')
|
|
84
|
+
|
|
85
|
+
# gather all tests selected by the plan
|
|
86
|
+
#
|
|
87
|
+
# discover:
|
|
88
|
+
# - how: fmf
|
|
89
|
+
# filter:
|
|
90
|
+
# - tag:some_tag
|
|
91
|
+
# test:
|
|
92
|
+
# - some-test-regex
|
|
93
|
+
# exclude:
|
|
94
|
+
# - some-test-regex
|
|
95
|
+
if 'discover' in plan.data:
|
|
96
|
+
discover = plan.data['discover']
|
|
97
|
+
if not isinstance(discover, list):
|
|
98
|
+
discover = (discover,)
|
|
99
|
+
|
|
100
|
+
for entry in discover:
|
|
101
|
+
if entry.get('how') != 'fmf':
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
filtering = {}
|
|
105
|
+
for meta_name in ('filter', 'test', 'exclude'):
|
|
106
|
+
if value := self._listlike(entry, meta_name):
|
|
107
|
+
filtering[meta_name] = value
|
|
108
|
+
|
|
109
|
+
children = tree.prune(
|
|
110
|
+
names=filtering.get('test'),
|
|
111
|
+
filters=filtering.get('filter'),
|
|
112
|
+
)
|
|
113
|
+
for child in children:
|
|
114
|
+
# excludes not supported by .prune(), we have to do it here
|
|
115
|
+
excludes = filtering.get('exclude')
|
|
116
|
+
if excludes and any(re.match(x, child.name) for x in excludes):
|
|
117
|
+
continue
|
|
118
|
+
# only enabled tests
|
|
119
|
+
if 'enabled' in child.data and not child.data['enabled']:
|
|
120
|
+
continue
|
|
121
|
+
# no manual tests
|
|
122
|
+
if child.data.get('manual'):
|
|
123
|
+
continue
|
|
124
|
+
# after adjusting above, any adjusts are useless, free some space
|
|
125
|
+
if 'adjust' in child.data:
|
|
126
|
+
del child.data['adjust']
|
|
127
|
+
# ie. ['/abs/path/to/some.fmf', '/abs/path/to/some/node.fmf']
|
|
128
|
+
source_dir = Path(child.sources[-1]).parent.relative_to(self.fmf_root)
|
|
129
|
+
self.tests.append(
|
|
130
|
+
FMFTest(name=child.name, data=child.data, dir=source_dir),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
# Some extra notes for fmf.prune() arguments:
|
|
135
|
+
#
|
|
136
|
+
# Set 'names' to filter by a list of fmf node names, ie.
|
|
137
|
+
# ['/some/test', '/another/test']
|
|
138
|
+
#
|
|
139
|
+
# Set 'filters' to filter by a list of fmf-style filter expressions, see
|
|
140
|
+
# https://fmf.readthedocs.io/en/stable/modules.html#fmf.filter
|
|
141
|
+
#
|
|
142
|
+
# Set 'conditions' to filter by a list of python expressions whose namespace
|
|
143
|
+
# locals() are set up to be a dictionary of the tree. When any of the
|
|
144
|
+
# expressions returns True, the tree is returned, ie.
|
|
145
|
+
# ['environment["FOO"] == "BAR"']
|
|
146
|
+
# ['"enabled" not in locals() or enabled']
|
|
147
|
+
# Note that KeyError is silently ignored and treated as False.
|
|
148
|
+
#
|
|
149
|
+
# Set 'context' to a dictionary to post-process the tree metadata with
|
|
150
|
+
# adjust expressions (that may be present in a tree) using the specified
|
|
151
|
+
# context. Any other filters are applied afterwards to allow modification
|
|
152
|
+
# of tree metadata by the adjust expressions. Ie.
|
|
153
|
+
# {'distro': 'rhel-9.6.0', 'arch': 'x86_64'}
|
|
154
|
+
|
|
155
|
+
Platform = collections.namedtuple('Platform', ['distro', 'arch'])
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def combine_platforms(fmf_path, plan_name, platforms):
|
|
159
|
+
# TODO: document
|
|
160
|
+
fmf_datas = {}
|
|
161
|
+
tree = fmf.Tree(fmf_path)
|
|
162
|
+
for platform in platforms:
|
|
163
|
+
context = {'distro': platform.distro, 'arch': platform.arch}
|
|
164
|
+
fmf_datas[platform] = FMFData(tree, plan_name, context=context)
|
|
165
|
+
return fmf_datas
|
|
166
|
+
|
|
167
|
+
# TODO: in Orchestrator, when a Provisioner becomes free, have it pick a test
|
|
168
|
+
# from the appropriate tests[platform] per the Provisioner's platform
|