atex 0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex/__init__.py +35 -0
- atex/cli/__init__.py +83 -0
- atex/cli/testingfarm.py +171 -0
- atex/fmf.py +168 -0
- atex/minitmt/__init__.py +109 -0
- atex/minitmt/report.py +174 -0
- atex/minitmt/scripts.py +51 -0
- atex/minitmt/testme.py +3 -0
- atex/orchestrator.py +38 -0
- atex/provision/__init__.py +113 -0
- atex/provision/libvirt/VM_PROVISION +51 -0
- atex/provision/libvirt/__init__.py +23 -0
- atex/provision/libvirt/setup-libvirt.sh +72 -0
- atex/ssh.py +320 -0
- atex/testingfarm.py +523 -0
- atex/util/__init__.py +49 -0
- atex/util/dedent.py +25 -0
- atex/util/lockable_class.py +38 -0
- atex/util/log.py +53 -0
- atex/util/subprocess.py +51 -0
- atex-0.1.dist-info/METADATA +11 -0
- atex-0.1.dist-info/RECORD +25 -0
- atex-0.1.dist-info/WHEEL +4 -0
- atex-0.1.dist-info/entry_points.txt +2 -0
- atex-0.1.dist-info/licenses/COPYING.txt +14 -0
atex/minitmt/report.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import csv
|
|
3
|
+
import gzip
|
|
4
|
+
import ctypes
|
|
5
|
+
import ctypes.util
|
|
6
|
+
import contextlib
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from .. import util
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
libc = ctypes.CDLL(ctypes.util.find_library('c'))
|
|
13
|
+
|
|
14
|
+
# int linkat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath, int flags)
|
|
15
|
+
libc.linkat.argtypes = (
|
|
16
|
+
ctypes.c_int,
|
|
17
|
+
ctypes.c_char_p,
|
|
18
|
+
ctypes.c_int,
|
|
19
|
+
ctypes.c_char_p,
|
|
20
|
+
ctypes.c_int,
|
|
21
|
+
)
|
|
22
|
+
libc.linkat.restype = ctypes.c_int
|
|
23
|
+
|
|
24
|
+
# fcntl.h:#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
|
|
25
|
+
AT_EMPTY_PATH = 0x1000
|
|
26
|
+
|
|
27
|
+
# fcntl.h:#define AT_FDCWD -100 /* Special value used to indicate
|
|
28
|
+
AT_FDCWD = -100
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def linkat(*args):
|
|
32
|
+
if (ret := libc.linkat(*args)) == -1:
|
|
33
|
+
errno = ctypes.get_errno()
|
|
34
|
+
raise OSError(errno, os.strerror(errno))
|
|
35
|
+
return ret
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class CSVReporter(util.LockableClass):
|
|
39
|
+
"""
|
|
40
|
+
Stores reported results as a GZIP-ed CSV and files (logs) under a related
|
|
41
|
+
directory.
|
|
42
|
+
|
|
43
|
+
with CSVReporter('file/storage/dir', 'results.csv.gz') as reporter:
|
|
44
|
+
sub = reporter.make_subreporter('rhel-9', 'x86_64')
|
|
45
|
+
sub({'name': '/some/test', 'status': 'pass'})
|
|
46
|
+
sub({'name': '/another/test', 'status': 'pass'})
|
|
47
|
+
...
|
|
48
|
+
sub = reporter.make_subreporter('rhel-9', 'ppc64le')
|
|
49
|
+
...
|
|
50
|
+
sock = accept_unix_connection()
|
|
51
|
+
reporter.store_file('/some/test', 'debug.log', sock, 1234)
|
|
52
|
+
"""
|
|
53
|
+
class _ExcelWithUnixNewline(csv.excel):
|
|
54
|
+
lineterminator = '\n'
|
|
55
|
+
|
|
56
|
+
def __init__(self, storage_dir, results_file):
|
|
57
|
+
super().__init__()
|
|
58
|
+
self.storage_dir = Path(storage_dir)
|
|
59
|
+
if self.storage_dir.exists():
|
|
60
|
+
raise FileExistsError(f"{storage_dir} already exists")
|
|
61
|
+
self.results_file = Path(results_file)
|
|
62
|
+
if self.results_file.exists():
|
|
63
|
+
raise FileExistsError(f"{self.results_file} already exists")
|
|
64
|
+
self.storage_dir.mkdir()
|
|
65
|
+
self.csv_writer = None
|
|
66
|
+
self.results_gzip_handle = None
|
|
67
|
+
|
|
68
|
+
def __enter__(self):
|
|
69
|
+
f = gzip.open(self.results_file, 'wt', newline='')
|
|
70
|
+
try:
|
|
71
|
+
self.csv_writer = csv.writer(f, dialect=self._ExcelWithUnixNewline)
|
|
72
|
+
except:
|
|
73
|
+
f.close()
|
|
74
|
+
raise
|
|
75
|
+
self.results_gzip_handle = f
|
|
76
|
+
return self
|
|
77
|
+
|
|
78
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
79
|
+
self.results_gzip_handle.close()
|
|
80
|
+
self.results_gzip_handle = None
|
|
81
|
+
self.csv_writer = None
|
|
82
|
+
|
|
83
|
+
def report(self, distro, arch, status, name, note, *files):
|
|
84
|
+
"""
|
|
85
|
+
Persistently write out details of a test result.
|
|
86
|
+
"""
|
|
87
|
+
with self.lock:
|
|
88
|
+
self.csv_writer.writerow((distro, arch, status, name, note, *files))
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def _normalize_path(path):
|
|
92
|
+
# the magic here is to treat any dangerous path as starting at /
|
|
93
|
+
# and resolve any weird constructs relative to /, and then simply
|
|
94
|
+
# strip off the leading / and use it as a relative path
|
|
95
|
+
path = path.lstrip('/')
|
|
96
|
+
path = os.path.normpath(f'/{path}')
|
|
97
|
+
return path[1:]
|
|
98
|
+
|
|
99
|
+
def make_subreporter(self, distro, arch):
|
|
100
|
+
"""
|
|
101
|
+
Return a preconfigured reporter instance, suitable for use
|
|
102
|
+
by an Executor.
|
|
103
|
+
"""
|
|
104
|
+
def reporter(result_line):
|
|
105
|
+
if 'files' in result_line:
|
|
106
|
+
files = (self._normalize_path(x['name']) for x in result_line['files'])
|
|
107
|
+
else:
|
|
108
|
+
files = ()
|
|
109
|
+
self.report(
|
|
110
|
+
distro, arch, result_line['status'], result_line['name'],
|
|
111
|
+
result_line.get('note', ''), *files,
|
|
112
|
+
)
|
|
113
|
+
return reporter
|
|
114
|
+
|
|
115
|
+
def _files_dir(self, result_name):
|
|
116
|
+
dir_path = self.storage_dir / result_name.lstrip('/')
|
|
117
|
+
dir_path.mkdir(parents=True, exist_ok=True)
|
|
118
|
+
return dir_path
|
|
119
|
+
|
|
120
|
+
def _files_file(self, result_name, file_name):
|
|
121
|
+
file_name = self._normalize_path(file_name)
|
|
122
|
+
return self._files_dir(result_name) / file_name
|
|
123
|
+
|
|
124
|
+
@contextlib.contextmanager
|
|
125
|
+
def open_tmpfile(self, open_mode=os.O_WRONLY):
|
|
126
|
+
flags = open_mode | os.O_TMPFILE
|
|
127
|
+
fd = os.open(self.storage_dir, flags, 0o644)
|
|
128
|
+
try:
|
|
129
|
+
yield fd
|
|
130
|
+
finally:
|
|
131
|
+
os.close(fd)
|
|
132
|
+
# def open_tmpfile(self, result_name, open_mode=os.O_WRONLY):
|
|
133
|
+
# """
|
|
134
|
+
# Open an anonymous (name-less) file for writing, in a directory relevant
|
|
135
|
+
# to 'result_name' and yield its file descriptor (int) as context, closing
|
|
136
|
+
# it when the context is exited.
|
|
137
|
+
# """
|
|
138
|
+
# flags = open_mode | os.O_TMPFILE
|
|
139
|
+
# fd = os.open(self._files_dir(result_name), flags, 0o644)
|
|
140
|
+
# try:
|
|
141
|
+
# yield fd
|
|
142
|
+
# finally:
|
|
143
|
+
# os.close(fd)
|
|
144
|
+
|
|
145
|
+
def link_tmpfile_to(self, result_name, file_name, fd):
|
|
146
|
+
"""
|
|
147
|
+
Store a file named 'file_name' in a directory relevant to 'result_name'
|
|
148
|
+
whose 'fd' (a file descriptor) was created by open_tmpfile().
|
|
149
|
+
|
|
150
|
+
This function can be called multiple times with the same 'fd', and
|
|
151
|
+
does not close or otherwise alter the descriptor.
|
|
152
|
+
"""
|
|
153
|
+
final_path = self._files_file(result_name, file_name)
|
|
154
|
+
linkat(fd, b'', AT_FDCWD, bytes(final_path), AT_EMPTY_PATH)
|
|
155
|
+
|
|
156
|
+
def store_file(self, result_name, file_name, in_fd, count):
|
|
157
|
+
"""
|
|
158
|
+
Read 'count' bytes of binary data from an OS file descriptor 'in_fd'
|
|
159
|
+
and store them under 'result_name' as a file (or relative path)
|
|
160
|
+
named 'file_name', creating it.
|
|
161
|
+
"""
|
|
162
|
+
final_path = self._files_file(result_name, file_name)
|
|
163
|
+
# be as efficient as possible, let the kernel handle big data
|
|
164
|
+
out_fd = None
|
|
165
|
+
try:
|
|
166
|
+
out_fd = os.open(final_path, os.O_WRONLY | os.O_CREAT)
|
|
167
|
+
while count > 0:
|
|
168
|
+
written = os.sendfile(out_fd, in_fd, None, count)
|
|
169
|
+
if written == 0:
|
|
170
|
+
raise RuntimeError(f"got unexpected EOF when receiving {final_path}")
|
|
171
|
+
count -= written
|
|
172
|
+
finally:
|
|
173
|
+
if out_fd:
|
|
174
|
+
os.close(out_fd)
|
atex/minitmt/scripts.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
#from .. import util
|
|
2
|
+
|
|
3
|
+
#run_test = util.dedent(fr'''
|
|
4
|
+
# # create a temp dir for everything, send it to the controller
|
|
5
|
+
# tmpdir=$(mktemp -d /var/tmp/atex-XXXXXXXXX)
|
|
6
|
+
# echo "tmpdir=$tmpdir"
|
|
7
|
+
#
|
|
8
|
+
# # remove transient files if interrupted
|
|
9
|
+
# trap "rm -rf \"$tmpdir\"" INT
|
|
10
|
+
#
|
|
11
|
+
# # wait for result reporting unix socket to be created by sshd
|
|
12
|
+
# socket=$tmpdir/results.sock
|
|
13
|
+
# while [[ ! -e $socket ]]; do sleep 0.1; done
|
|
14
|
+
# echo "socket=$socket"
|
|
15
|
+
#
|
|
16
|
+
# # tell the controller to start logging test output
|
|
17
|
+
# echo ---
|
|
18
|
+
#
|
|
19
|
+
# # install test dependencies
|
|
20
|
+
# rpms=( {' '.join(requires)} )
|
|
21
|
+
# to_install=()
|
|
22
|
+
# for rpm in "${{rpms[@]}}"; do
|
|
23
|
+
# rpm -q --quiet "$rpm" || to_install+=("$rpm")
|
|
24
|
+
# done
|
|
25
|
+
# dnf -y --setopt=install_weak_deps=False install "${{to_install[@]}}"
|
|
26
|
+
#
|
|
27
|
+
# # run the test
|
|
28
|
+
# ...
|
|
29
|
+
# rc=$?
|
|
30
|
+
#
|
|
31
|
+
# # test finished, clean up
|
|
32
|
+
# rm -rf "$tmpdir"
|
|
33
|
+
#
|
|
34
|
+
# exit $rc
|
|
35
|
+
#''')
|
|
36
|
+
|
|
37
|
+
# TODO: have another version of ^^^^ for re-execution of test after a reboot
|
|
38
|
+
# or disconnect that sets tmpdir= from us (reusing on-disk test CWD)
|
|
39
|
+
# rather than creating a new one
|
|
40
|
+
# - the second script needs to rm -f the unix socket before echoing
|
|
41
|
+
# something back to let us re-create it via a new ssh channel open
|
|
42
|
+
# because StreamLocalBindUnlink doesn't seem to work
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# TODO: call ssh with -oStreamLocalBindUnlink=yes to re-initialize
|
|
46
|
+
# the listening socket after guest reboot
|
|
47
|
+
#
|
|
48
|
+
# -R /var/tmp/atex-BlaBla/results.sock:/var/tmp/controller.sock
|
|
49
|
+
#
|
|
50
|
+
# (make sure to start listening on /var/tmp/controller.sock before
|
|
51
|
+
# calling ssh to run the test)
|
atex/minitmt/testme.py
ADDED
atex/orchestrator.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from . import util
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class Orchestrator:
|
|
5
|
+
"""
|
|
6
|
+
A scheduler for parallel execution on multiple resources (machines/systems).
|
|
7
|
+
|
|
8
|
+
Given a list of Provisioner-derived class instances, it attempts to reserve
|
|
9
|
+
resources and uses them on-demand as they become available, calling run()
|
|
10
|
+
on each.
|
|
11
|
+
|
|
12
|
+
Note that run() and report() always run in a separate threads (are allowed
|
|
13
|
+
to block), and may access instance attributes, which are transparently
|
|
14
|
+
guarded by a thread-aware mutex.
|
|
15
|
+
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self):
|
|
19
|
+
pass
|
|
20
|
+
# TODO: configure via args, max workers, etc.
|
|
21
|
+
|
|
22
|
+
# def reserve(self, provisioner):
|
|
23
|
+
# # call provisioner.reserve(), return its return
|
|
24
|
+
# ...
|
|
25
|
+
|
|
26
|
+
def add_provisioner(self, provisioner):
|
|
27
|
+
# add to a self.* list of provisioners to be used for getting machines
|
|
28
|
+
...
|
|
29
|
+
|
|
30
|
+
def run(self, provisioner):
|
|
31
|
+
# run tests, if destructive, call provisioner.release()
|
|
32
|
+
# returns anything
|
|
33
|
+
...
|
|
34
|
+
|
|
35
|
+
def report(self):
|
|
36
|
+
# gets return from run
|
|
37
|
+
# writes it out to somewhere else
|
|
38
|
+
...
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
import pkgutil
|
|
3
|
+
|
|
4
|
+
from .. import util
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Provisioner(util.LockableClass):
|
|
8
|
+
"""
|
|
9
|
+
A resource (machine/system) provider.
|
|
10
|
+
|
|
11
|
+
Any class derived from Provisioner serves as a mechanisms for requesting
|
|
12
|
+
a resource (machine/system), waiting for it to be reserved, providing ssh
|
|
13
|
+
details on how to connect to it, and releasing it when no longer useful.
|
|
14
|
+
|
|
15
|
+
The 4 main API points for this are reserve(), connection(), release() and
|
|
16
|
+
alive().
|
|
17
|
+
If necessary, these methods can share data via class instance attributes,
|
|
18
|
+
which are transparently guarded by a thread-aware mutex. For any complex
|
|
19
|
+
reads/writes, use 'self.lock' via a context manager.
|
|
20
|
+
|
|
21
|
+
Note that reserve() always runs in a separate thread (and thus may block),
|
|
22
|
+
and other functions (incl. release()) may be called at any time from
|
|
23
|
+
a different thread, even while reserve() is still running.
|
|
24
|
+
It is thus recommended for reserve() to store metadata in self.* as soon
|
|
25
|
+
as the metadata becomes available (some job ID, request UUID, Popen proc
|
|
26
|
+
object with PID, etc.) so that release() can free the resource at any time.
|
|
27
|
+
|
|
28
|
+
Once release()'d, the instance is never reused for reserve() again.
|
|
29
|
+
However connection(), release() and alive() may be called several times at
|
|
30
|
+
any time and need to handle it safely.
|
|
31
|
+
Ie. once released(), an instance must never return alive() == True.
|
|
32
|
+
|
|
33
|
+
# explicit method calls
|
|
34
|
+
res = Provisioner(...)
|
|
35
|
+
res.reserve()
|
|
36
|
+
conn = res.connection()
|
|
37
|
+
conn.connect()
|
|
38
|
+
conn.ssh('ls /')
|
|
39
|
+
conn.disconnect()
|
|
40
|
+
res.release()
|
|
41
|
+
|
|
42
|
+
# via a context manager
|
|
43
|
+
with Provisioner(...) as res:
|
|
44
|
+
with res.connection() as conn:
|
|
45
|
+
conn.ssh('ls /')
|
|
46
|
+
|
|
47
|
+
If a Provisioner class needs additional configuration, it should do so via
|
|
48
|
+
class (not instance) attributes, allowing it to be instantiated many times.
|
|
49
|
+
|
|
50
|
+
class ConfiguredProvisioner(Provisioner):
|
|
51
|
+
resource_hub = 'https://...'
|
|
52
|
+
login = 'joe'
|
|
53
|
+
|
|
54
|
+
# or dynamically
|
|
55
|
+
name = 'joe'
|
|
56
|
+
cls = type(
|
|
57
|
+
f'Provisioner_for_{name}',
|
|
58
|
+
(Provisioner,),
|
|
59
|
+
{'resource_hub': 'https://...', 'login': name},
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
These attributes can then be accessed from __init__ or any other function.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(self):
|
|
66
|
+
"""
|
|
67
|
+
Initialize the provisioner instance.
|
|
68
|
+
If extending __init__, always call 'super().__init__()' at the top.
|
|
69
|
+
"""
|
|
70
|
+
super().__init__()
|
|
71
|
+
|
|
72
|
+
def reserve(self):
|
|
73
|
+
"""
|
|
74
|
+
Send a reservation request for a resource and wait for it to be
|
|
75
|
+
reserved.
|
|
76
|
+
"""
|
|
77
|
+
raise NotImplementedError(f"'reserve' not implemented for {self.__class__.__name__}")
|
|
78
|
+
|
|
79
|
+
def connection(self):
|
|
80
|
+
"""
|
|
81
|
+
Return an atex.ssh.SSHConn instance configured for connection to
|
|
82
|
+
the reserved resource, but not yet connected.
|
|
83
|
+
"""
|
|
84
|
+
raise NotImplementedError(f"'connection' not implemented for {self.__class__.__name__}")
|
|
85
|
+
|
|
86
|
+
def release(self):
|
|
87
|
+
"""
|
|
88
|
+
Release a reserved resource, or cancel a reservation-in-progress.
|
|
89
|
+
"""
|
|
90
|
+
raise NotImplementedError(f"'release' not implemented for {self.__class__.__name__}")
|
|
91
|
+
|
|
92
|
+
def alive(self):
|
|
93
|
+
"""
|
|
94
|
+
Return True if the resource is still reserved, False otherwise.
|
|
95
|
+
"""
|
|
96
|
+
raise NotImplementedError(f"'alive' not implemented for {self.__class__.__name__}")
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def find_provisioners():
|
|
100
|
+
provisioners = []
|
|
101
|
+
for info in pkgutil.iter_modules(__spec__.submodule_search_locations):
|
|
102
|
+
mod = importlib.import_module(f'.{info.name}', __name__)
|
|
103
|
+
# look for Provisioner-derived classes in the module
|
|
104
|
+
for attr in dir(mod):
|
|
105
|
+
if attr.startswith('_'):
|
|
106
|
+
continue
|
|
107
|
+
value = getattr(mod, attr)
|
|
108
|
+
try:
|
|
109
|
+
if issubclass(value, Provisioner):
|
|
110
|
+
provisioners.append(attr)
|
|
111
|
+
except TypeError:
|
|
112
|
+
pass
|
|
113
|
+
return provisioners
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
IMAGED INSTALLS:
|
|
2
|
+
|
|
3
|
+
- have an independent script to install/prepare an image from install URL / nightly name
|
|
4
|
+
- could be invoked as a "setup task" by the user
|
|
5
|
+
- could be run automatically via cron on the VM host, etc.
|
|
6
|
+
- some script that does virt-install --transient and installs new prepared
|
|
7
|
+
template images into some pool
|
|
8
|
+
- use virt-sysprep from guestfs-tools when preparing an image during setup
|
|
9
|
+
|
|
10
|
+
- have another independent script that pre-creates domains (XMLs) on the VM host
|
|
11
|
+
- script that (without locking, it should never be run alongside the above)
|
|
12
|
+
pre-creates domains on the system (specifying various options, etc.)
|
|
13
|
+
- maybe generating XML directly from given opts?
|
|
14
|
+
- maybe using virt-install?
|
|
15
|
+
|
|
16
|
+
- from the many (60+) provision modules:
|
|
17
|
+
- don't create new libvirt domains from scratch, that's too inefficient remotely
|
|
18
|
+
- have pre-created persistent domains (see above)
|
|
19
|
+
- have pre-installed ready-to-use images to be cloned
|
|
20
|
+
- or otherwise separate the process of installing images from using domains for testing
|
|
21
|
+
- have some limitation on volume storage for a domain, to make things simpler
|
|
22
|
+
- (don't deal with path-based image storage, many disks, etc.)
|
|
23
|
+
- we will ONLY touch volumes in some specified --pool that are named EXACTLY
|
|
24
|
+
the same as domain names (ie. volume called scap-t12 for scap-t12 domain)
|
|
25
|
+
- domains can have many other disks/sources/volumes, we don't care
|
|
26
|
+
|
|
27
|
+
- provision client would
|
|
28
|
+
- list all inactive domains, match some name pattern (scap-*)
|
|
29
|
+
- pick one at random (and remember it into some list, so it doesn't pick it again)
|
|
30
|
+
- check custom <metadata>
|
|
31
|
+
- if the metadata indicate it's unused (no metadata or not-ours-UUID)
|
|
32
|
+
- write our UUID to the metadata
|
|
33
|
+
- re-read it (completing the mutex)
|
|
34
|
+
- leave the XML otherwise as-is, just swap out volume via vol-clone
|
|
35
|
+
- support --reflink
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
KICKSTARTED INSTALLS:
|
|
39
|
+
|
|
40
|
+
- somewhat similar:
|
|
41
|
+
- also use pre-created libvirt domains, but do not use images
|
|
42
|
+
- use virt-install --reinstall to retain the original domain XML, while being able
|
|
43
|
+
to reinstall it ad-hoc with a custom kickstart
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
FULLY CUSTOM INSTALLS:
|
|
47
|
+
|
|
48
|
+
- useful for special use cases with custom HW (emulated TPM2, etc.)
|
|
49
|
+
- basically virt-install creating a new domain (ignoring any pre-defined ones)
|
|
50
|
+
- probably shouldn't be used by automation, only for one-VM-at-a-time on user request
|
|
51
|
+
- (no free memory/disk checking, no libvirt locking, etc.)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from .. import Provisioner as _Provisioner
|
|
2
|
+
from ... import util, ssh
|
|
3
|
+
|
|
4
|
+
class LibvirtProvisioner(_Provisioner):
|
|
5
|
+
number = 123
|
|
6
|
+
|
|
7
|
+
def reserve(self):
|
|
8
|
+
util.debug(f"reserving {self.number}")
|
|
9
|
+
|
|
10
|
+
# TODO: as simple attribute, to be guaranteed set when reserve() returns,
|
|
11
|
+
# can be overriden by a getter function if you need to keep track
|
|
12
|
+
# how many times it was accessed
|
|
13
|
+
def connection(self):
|
|
14
|
+
#return {'Hostname': '1.2.3.4', 'User': 'root', 'IdentityFile': ...}
|
|
15
|
+
util.debug(f"returning ssh for {self.number}")
|
|
16
|
+
return ssh.SSHConn({'Hostname': '1.2.3.4', 'User': 'root'})
|
|
17
|
+
|
|
18
|
+
def release(self):
|
|
19
|
+
util.debug(f"releasing {self.number}")
|
|
20
|
+
|
|
21
|
+
def alive(self):
|
|
22
|
+
util.debug(f"always alive: {self.number}")
|
|
23
|
+
return True
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
set -xe
|
|
4
|
+
|
|
5
|
+
# install RPM dependencies
|
|
6
|
+
dnf --setopt=install_weak_deps=False -y install \
|
|
7
|
+
libvirt-daemon-driver-qemu \
|
|
8
|
+
libvirt-daemon-driver-storage-disk \
|
|
9
|
+
libvirt-daemon-config-network \
|
|
10
|
+
libvirt-client \
|
|
11
|
+
qemu-kvm \
|
|
12
|
+
swtpm-tools
|
|
13
|
+
|
|
14
|
+
# start all sockets that were enabled by libvirt-daemon RPM scripts
|
|
15
|
+
# (simulating a reboot)
|
|
16
|
+
if systemctl --quiet is-enabled libvirtd.service; then
|
|
17
|
+
systemctl start libvirtd.service
|
|
18
|
+
else
|
|
19
|
+
sockets=$(
|
|
20
|
+
systemctl list-unit-files --full \
|
|
21
|
+
--type=socket --state=enabled 'virt*' \
|
|
22
|
+
| grep '^virt' | sed 's/ .*//'
|
|
23
|
+
)
|
|
24
|
+
for socket in $sockets; do
|
|
25
|
+
systemctl start "$socket"
|
|
26
|
+
done
|
|
27
|
+
fi
|
|
28
|
+
|
|
29
|
+
tmpfile=$(mktemp)
|
|
30
|
+
trap "rm -f \"$tmpfile\"" EXIT
|
|
31
|
+
|
|
32
|
+
# set up a default network
|
|
33
|
+
if virsh -q net-list --name | grep -q '^default *$'; then
|
|
34
|
+
virsh net-destroy default
|
|
35
|
+
virsh net-undefine default
|
|
36
|
+
elif virsh -q net-list --name --inactive | grep -q '^default *$'; then
|
|
37
|
+
virsh net-undefine default
|
|
38
|
+
fi
|
|
39
|
+
cat > "$tmpfile" <<EOF
|
|
40
|
+
<network>
|
|
41
|
+
<name>default</name>
|
|
42
|
+
<forward mode='nat'/>
|
|
43
|
+
<bridge name='virbr0' stp='off' delay='0'/>
|
|
44
|
+
<ip address='100.80.60.1' netmask='255.255.255.0'>
|
|
45
|
+
<dhcp>
|
|
46
|
+
<range start='100.80.60.2' end='100.80.60.250'/>
|
|
47
|
+
</dhcp>
|
|
48
|
+
</ip>
|
|
49
|
+
</network>
|
|
50
|
+
EOF
|
|
51
|
+
virsh net-define "$tmpfile"
|
|
52
|
+
virsh net-autostart default
|
|
53
|
+
virsh net-start default
|
|
54
|
+
|
|
55
|
+
# set up a default network
|
|
56
|
+
if virsh -q pool-list --name | grep -q '^default *$'; then
|
|
57
|
+
virsh pool-destroy default
|
|
58
|
+
virsh pool-undefine default
|
|
59
|
+
elif virsh -q pool-list --name --inactive | grep -q '^default *$'; then
|
|
60
|
+
virsh pool-undefine default
|
|
61
|
+
fi
|
|
62
|
+
cat > "$tmpfile" <<EOF
|
|
63
|
+
<pool type='dir'>
|
|
64
|
+
<name>default</name>
|
|
65
|
+
<target>
|
|
66
|
+
<path>/var/lib/libvirt/images</path>
|
|
67
|
+
</target>
|
|
68
|
+
</pool>
|
|
69
|
+
EOF
|
|
70
|
+
virsh pool-define "$tmpfile"
|
|
71
|
+
virsh pool-autostart default
|
|
72
|
+
virsh pool-start default
|