atex 0.5__py3-none-any.whl → 0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex/__init__.py +2 -12
- atex/cli/__init__.py +13 -13
- atex/cli/fmf.py +93 -0
- atex/cli/testingfarm.py +71 -61
- atex/connection/__init__.py +117 -0
- atex/connection/ssh.py +390 -0
- atex/executor/__init__.py +2 -0
- atex/executor/duration.py +60 -0
- atex/executor/executor.py +378 -0
- atex/executor/reporter.py +106 -0
- atex/executor/scripts.py +155 -0
- atex/executor/testcontrol.py +353 -0
- atex/fmf.py +217 -0
- atex/orchestrator/__init__.py +2 -0
- atex/orchestrator/aggregator.py +106 -0
- atex/orchestrator/orchestrator.py +324 -0
- atex/provision/__init__.py +101 -90
- atex/provision/libvirt/VM_PROVISION +8 -0
- atex/provision/libvirt/__init__.py +4 -4
- atex/provision/podman/README +59 -0
- atex/provision/podman/host_container.sh +74 -0
- atex/provision/testingfarm/__init__.py +2 -0
- atex/{testingfarm.py → provision/testingfarm/api.py} +170 -132
- atex/provision/testingfarm/testingfarm.py +236 -0
- atex/util/__init__.py +5 -10
- atex/util/dedent.py +1 -1
- atex/util/log.py +20 -12
- atex/util/path.py +16 -0
- atex/util/ssh_keygen.py +14 -0
- atex/util/subprocess.py +14 -13
- atex/util/threads.py +55 -0
- {atex-0.5.dist-info → atex-0.8.dist-info}/METADATA +97 -2
- atex-0.8.dist-info/RECORD +37 -0
- atex/cli/minitmt.py +0 -82
- atex/minitmt/__init__.py +0 -115
- atex/minitmt/fmf.py +0 -168
- atex/minitmt/report.py +0 -174
- atex/minitmt/scripts.py +0 -51
- atex/minitmt/testme.py +0 -3
- atex/orchestrator.py +0 -38
- atex/ssh.py +0 -320
- atex/util/lockable_class.py +0 -38
- atex-0.5.dist-info/RECORD +0 -26
- {atex-0.5.dist-info → atex-0.8.dist-info}/WHEEL +0 -0
- {atex-0.5.dist-info → atex-0.8.dist-info}/entry_points.txt +0 -0
- {atex-0.5.dist-info → atex-0.8.dist-info}/licenses/COPYING.txt +0 -0
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import tempfile
|
|
3
|
+
import traceback
|
|
4
|
+
import concurrent
|
|
5
|
+
import collections
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from .. import util, executor
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Orchestrator:
|
|
12
|
+
"""
|
|
13
|
+
A scheduler for parallel execution on multiple resources (machines/systems).
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
SetupInfo = collections.namedtuple(
|
|
17
|
+
"SetupInfo",
|
|
18
|
+
(
|
|
19
|
+
# class Provisioner instance this machine is provided by
|
|
20
|
+
# (for logging purposes)
|
|
21
|
+
"provisioner",
|
|
22
|
+
# class Remote instance returned by the Provisioner
|
|
23
|
+
"remote",
|
|
24
|
+
# class Executor instance uploading tests / running setup or tests
|
|
25
|
+
"executor",
|
|
26
|
+
),
|
|
27
|
+
)
|
|
28
|
+
RunningInfo = collections.namedtuple(
|
|
29
|
+
"RunningInfo",
|
|
30
|
+
(
|
|
31
|
+
# "inherit" from SetupInfo
|
|
32
|
+
*SetupInfo._fields,
|
|
33
|
+
# string with /test/name
|
|
34
|
+
"test_name",
|
|
35
|
+
# class tempfile.TemporaryDirectory instance with 'json_file' and 'files_dir'
|
|
36
|
+
"tmp_dir",
|
|
37
|
+
),
|
|
38
|
+
)
|
|
39
|
+
FinishedInfo = collections.namedtuple(
|
|
40
|
+
"FinishedInfo",
|
|
41
|
+
(
|
|
42
|
+
# "inherit" from RunningInfo
|
|
43
|
+
*RunningInfo._fields,
|
|
44
|
+
# integer with exit code of the test
|
|
45
|
+
# (None if exception happened)
|
|
46
|
+
"exit_code",
|
|
47
|
+
# exception class instance if running the test failed
|
|
48
|
+
# (None if no exception happened (exit_code is defined))
|
|
49
|
+
"exception",
|
|
50
|
+
),
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def __init__(self, platform, fmf_tests, provisioners, aggregator, tmp_dir, *, max_reruns=2):
|
|
54
|
+
"""
|
|
55
|
+
'platform' is a string with platform name.
|
|
56
|
+
|
|
57
|
+
'fmf_tests' is a class FMFTests instance of the tests to run.
|
|
58
|
+
|
|
59
|
+
'provisioners' is an iterable of class Provisioner instances.
|
|
60
|
+
|
|
61
|
+
'aggregator' is a class CSVAggregator instance.
|
|
62
|
+
|
|
63
|
+
'tmp_dir' is a string/Path to a temporary directory, to be used for
|
|
64
|
+
storing per-test results and uploaded files before being ingested
|
|
65
|
+
by the aggregator. Can be safely shared by Orchestrator instances.
|
|
66
|
+
"""
|
|
67
|
+
self.platform = platform
|
|
68
|
+
self.fmf_tests = fmf_tests
|
|
69
|
+
self.provisioners = tuple(provisioners)
|
|
70
|
+
self.aggregator = aggregator
|
|
71
|
+
self.tmp_dir = tmp_dir
|
|
72
|
+
# tests still waiting to be run
|
|
73
|
+
self.to_run = set(fmf_tests.tests)
|
|
74
|
+
# running setup functions, as a list of SetupInfo items
|
|
75
|
+
self.running_setups = []
|
|
76
|
+
# running tests as a dict, indexed by test name, with RunningInfo values
|
|
77
|
+
self.running_tests = {}
|
|
78
|
+
# indexed by test name, value being integer of how many times
|
|
79
|
+
self.reruns = collections.defaultdict(lambda: max_reruns)
|
|
80
|
+
# thread queue for actively running tests
|
|
81
|
+
self.test_queue = util.ThreadQueue(daemon=False)
|
|
82
|
+
# thread queue for remotes being set up (uploading tests, etc.)
|
|
83
|
+
self.setup_queue = util.ThreadQueue(daemon=True)
|
|
84
|
+
# NOTE: running_setups and test_running are just for debugging and
|
|
85
|
+
# cancellation, the execution flow itself uses ThreadQueues
|
|
86
|
+
|
|
87
|
+
@staticmethod
|
|
88
|
+
def _run_setup(sinfo):
|
|
89
|
+
sinfo.executor.setup()
|
|
90
|
+
sinfo.executor.upload_tests()
|
|
91
|
+
sinfo.executor.setup_plan()
|
|
92
|
+
# NOTE: we never run executor.cleanup() anywhere - instead, we assume
|
|
93
|
+
# the remote (and its connection) was invalidated by the test,
|
|
94
|
+
# so we just rely on remote.release() destroying the system
|
|
95
|
+
return sinfo
|
|
96
|
+
|
|
97
|
+
@classmethod
|
|
98
|
+
def _wrap_test(cls, rinfo, func, *args, **kwargs):
|
|
99
|
+
"""
|
|
100
|
+
Wrap 'func' (test execution function) to preserve extra metadata
|
|
101
|
+
('rinfo') and return it with the function return value.
|
|
102
|
+
"""
|
|
103
|
+
try:
|
|
104
|
+
return cls.FinishedInfo(*rinfo, func(*args, **kwargs), None)
|
|
105
|
+
except Exception as e:
|
|
106
|
+
return cls.FinishedInfo(*rinfo, None, e)
|
|
107
|
+
|
|
108
|
+
def _run_new_test(self, sinfo):
|
|
109
|
+
"""
|
|
110
|
+
'sinfo' is a SetupInfo instance.
|
|
111
|
+
"""
|
|
112
|
+
next_test_name = self.next_test(self.to_run, self.fmf_tests)
|
|
113
|
+
assert next_test_name in self.to_run, "next_test() returned valid test name"
|
|
114
|
+
|
|
115
|
+
self.to_run.remove(next_test_name)
|
|
116
|
+
|
|
117
|
+
rinfo = self.RunningInfo(
|
|
118
|
+
*sinfo,
|
|
119
|
+
test_name=next_test_name,
|
|
120
|
+
tmp_dir=tempfile.TemporaryDirectory(
|
|
121
|
+
prefix=next_test_name.strip("/").replace("/","-") + "-",
|
|
122
|
+
dir=self.tmp_dir,
|
|
123
|
+
delete=False,
|
|
124
|
+
),
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
tmp_dir_path = Path(rinfo.tmp_dir.name)
|
|
128
|
+
self.test_queue.start_thread(
|
|
129
|
+
target=self._wrap_test,
|
|
130
|
+
args=(
|
|
131
|
+
rinfo,
|
|
132
|
+
sinfo.executor.run_test,
|
|
133
|
+
next_test_name,
|
|
134
|
+
tmp_dir_path / "json_file",
|
|
135
|
+
tmp_dir_path / "files_dir",
|
|
136
|
+
),
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
self.running_tests[next_test_name] = rinfo
|
|
140
|
+
|
|
141
|
+
def _process_finished_test(self, finfo):
|
|
142
|
+
"""
|
|
143
|
+
'finfo' is a FinishedInfo instance.
|
|
144
|
+
"""
|
|
145
|
+
test_id = f"'{finfo.test_name}' on '{finfo.remote}'"
|
|
146
|
+
tmp_dir_path = Path(finfo.tmp_dir.name)
|
|
147
|
+
|
|
148
|
+
# NOTE: document that we intentionally don't .cleanup() executioner below,
|
|
149
|
+
# we rely on remote .release() destroying the OS, because we don't
|
|
150
|
+
# want to risk .cleanup() blocking on dead ssh into the remote after
|
|
151
|
+
# executing a destructive test
|
|
152
|
+
|
|
153
|
+
destructive = False
|
|
154
|
+
|
|
155
|
+
# if executor (or test) threw exception, schedule a re-run
|
|
156
|
+
if finfo.exception:
|
|
157
|
+
destructive = True
|
|
158
|
+
exc_str = "".join(traceback.format_exception(finfo.exception)).rstrip("\n")
|
|
159
|
+
util.info(f"unexpected exception happened while running {test_id}:\n{exc_str}")
|
|
160
|
+
finfo.remote.release()
|
|
161
|
+
if self.reruns[finfo.test_name] > 0:
|
|
162
|
+
self.reruns[finfo.test_name] -= 1
|
|
163
|
+
self.to_run.add(finfo.test_name)
|
|
164
|
+
else:
|
|
165
|
+
util.info(f"reruns for {test_id} exceeded, ignoring it")
|
|
166
|
+
|
|
167
|
+
# if the test exited as non-0, try a re-run
|
|
168
|
+
elif finfo.exit_code != 0:
|
|
169
|
+
destructive = True
|
|
170
|
+
finfo.remote.release()
|
|
171
|
+
if self.reruns[finfo.test_name] > 0:
|
|
172
|
+
util.info(
|
|
173
|
+
f"{test_id} exited with non-zero: {finfo.exit_code}, re-running "
|
|
174
|
+
f"({self.reruns[finfo.test_name]} reruns left)",
|
|
175
|
+
)
|
|
176
|
+
self.reruns[finfo.test_name] -= 1
|
|
177
|
+
self.to_run.add(finfo.test_name)
|
|
178
|
+
else:
|
|
179
|
+
util.info(
|
|
180
|
+
f"{test_id} exited with non-zero: {finfo.exit_code}, "
|
|
181
|
+
"all reruns exceeded, giving up",
|
|
182
|
+
)
|
|
183
|
+
# record the final result anyway
|
|
184
|
+
self.aggregator.ingest(
|
|
185
|
+
self.platform,
|
|
186
|
+
finfo.test_name,
|
|
187
|
+
tmp_dir_path / "json_file",
|
|
188
|
+
tmp_dir_path / "files_dir",
|
|
189
|
+
)
|
|
190
|
+
finfo.tmp_dir.cleanup()
|
|
191
|
+
|
|
192
|
+
# test finished successfully - ingest its results
|
|
193
|
+
else:
|
|
194
|
+
util.info(f"{test_id} finished successfully")
|
|
195
|
+
self.aggregator.ingest(
|
|
196
|
+
self.platform,
|
|
197
|
+
finfo.test_name,
|
|
198
|
+
tmp_dir_path / "json_file",
|
|
199
|
+
tmp_dir_path / "files_dir",
|
|
200
|
+
)
|
|
201
|
+
finfo.tmp_dir.cleanup()
|
|
202
|
+
|
|
203
|
+
# if the remote was not destroyed by traceback / failing test,
|
|
204
|
+
# check if the test always destroys it (even on success)
|
|
205
|
+
if not destructive:
|
|
206
|
+
test_data = self.fmf_tests.tests[finfo.test_name]
|
|
207
|
+
destructive = test_data.get("extra-atex", {}).get("destructive", False)
|
|
208
|
+
|
|
209
|
+
# if destroyed, release the remote
|
|
210
|
+
if destructive:
|
|
211
|
+
util.debug(f"{test_id} was destructive, releasing remote")
|
|
212
|
+
finfo.remote.release()
|
|
213
|
+
|
|
214
|
+
# if still not destroyed, run another test on it
|
|
215
|
+
# (without running plan setup, re-using already set up remote)
|
|
216
|
+
elif self.to_run:
|
|
217
|
+
sinfo = self.SetupInfo(
|
|
218
|
+
provisioner=finfo.provisioner,
|
|
219
|
+
remote=finfo.remote,
|
|
220
|
+
executor=finfo.executor,
|
|
221
|
+
)
|
|
222
|
+
util.debug(f"{test_id} was non-destructive, running next test")
|
|
223
|
+
self._run_new_test(sinfo)
|
|
224
|
+
|
|
225
|
+
def serve_once(self):
|
|
226
|
+
"""
|
|
227
|
+
Run the orchestration logic, processing any outstanding requests
|
|
228
|
+
(for provisioning, new test execution, etc.) and returning once these
|
|
229
|
+
are taken care of.
|
|
230
|
+
|
|
231
|
+
Returns True to indicate that it should be called again by the user
|
|
232
|
+
(more work to be done), False once all testing is concluded.
|
|
233
|
+
"""
|
|
234
|
+
util.debug(
|
|
235
|
+
f"to_run: {len(self.to_run)} tests / "
|
|
236
|
+
f"running: {len(self.running_tests)} tests, {len(self.running_setups)} setups",
|
|
237
|
+
)
|
|
238
|
+
# all done
|
|
239
|
+
if not self.to_run and not self.running_tests:
|
|
240
|
+
return False
|
|
241
|
+
|
|
242
|
+
# process all finished tests, potentially reusing remotes for executing
|
|
243
|
+
# further tests
|
|
244
|
+
while True:
|
|
245
|
+
try:
|
|
246
|
+
finfo = self.test_queue.get(block=False)
|
|
247
|
+
except util.ThreadQueue.Empty:
|
|
248
|
+
break
|
|
249
|
+
del self.running_tests[finfo.test_name]
|
|
250
|
+
self._process_finished_test(finfo)
|
|
251
|
+
|
|
252
|
+
# process any remotes with finished plan setup (uploaded tests,
|
|
253
|
+
# plan-defined pkgs / prepare scripts), start executing tests on them
|
|
254
|
+
while True:
|
|
255
|
+
try:
|
|
256
|
+
sinfo = self.setup_queue.get(block=False)
|
|
257
|
+
except util.ThreadQueue.Empty:
|
|
258
|
+
break
|
|
259
|
+
util.debug(f"setup finished for '{sinfo.remote}', running first test")
|
|
260
|
+
self.running_setups.remove(sinfo)
|
|
261
|
+
self._run_new_test(sinfo)
|
|
262
|
+
|
|
263
|
+
# try to get new remotes from Provisioners - if we get some, start
|
|
264
|
+
# running setup on them
|
|
265
|
+
for provisioner in self.provisioners:
|
|
266
|
+
while (remote := provisioner.get_remote(block=False)) is not None:
|
|
267
|
+
ex = executor.Executor(self.fmf_tests, remote)
|
|
268
|
+
sinfo = self.SetupInfo(
|
|
269
|
+
provisioner=provisioner,
|
|
270
|
+
remote=remote,
|
|
271
|
+
executor=ex,
|
|
272
|
+
)
|
|
273
|
+
self.setup_queue.start_thread(
|
|
274
|
+
target=self._run_setup,
|
|
275
|
+
args=(sinfo,),
|
|
276
|
+
)
|
|
277
|
+
self.running_setups.append(sinfo)
|
|
278
|
+
util.debug(f"got remote '{remote}' from '{provisioner}', running setup")
|
|
279
|
+
|
|
280
|
+
return True
|
|
281
|
+
|
|
282
|
+
def serve_forever(self):
|
|
283
|
+
"""
|
|
284
|
+
Run the orchestration logic, blocking until all testing is concluded.
|
|
285
|
+
"""
|
|
286
|
+
while self.serve_once():
|
|
287
|
+
time.sleep(1)
|
|
288
|
+
|
|
289
|
+
def __enter__(self):
|
|
290
|
+
# start all provisioners
|
|
291
|
+
for prov in self.provisioners:
|
|
292
|
+
prov.start()
|
|
293
|
+
return self
|
|
294
|
+
|
|
295
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
296
|
+
# cancel all running tests and wait for them to clean up (up to 0.1sec)
|
|
297
|
+
for rinfo in self.running_tests.values():
|
|
298
|
+
rinfo.executor.cancel()
|
|
299
|
+
self.test_queue.join() # also ignore any exceptions raised
|
|
300
|
+
|
|
301
|
+
# stop all provisioners, also releasing all remotes
|
|
302
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as ex:
|
|
303
|
+
for provisioner in self.provisioners:
|
|
304
|
+
for func in provisioner.stop_defer():
|
|
305
|
+
ex.submit(func)
|
|
306
|
+
|
|
307
|
+
def next_test(self, tests, fmf_tests): # noqa: ARG002, PLR6301
|
|
308
|
+
"""
|
|
309
|
+
Return a test name (string) from a set of 'tests' (set of test name
|
|
310
|
+
strings) to be run next.
|
|
311
|
+
|
|
312
|
+
'fmf_tests' is a class FMFTests instance with additional test metadata.
|
|
313
|
+
|
|
314
|
+
This method is user-overridable, ie. by subclassing Orchestrator:
|
|
315
|
+
|
|
316
|
+
class CustomOrchestrator(Orchestrator):
|
|
317
|
+
@staticmethod
|
|
318
|
+
def next_test(tests):
|
|
319
|
+
...
|
|
320
|
+
"""
|
|
321
|
+
# TODO: more advanced algorithm
|
|
322
|
+
#
|
|
323
|
+
# simple:
|
|
324
|
+
return next(iter(tests))
|
atex/provision/__init__.py
CHANGED
|
@@ -1,65 +1,33 @@
|
|
|
1
|
-
import importlib
|
|
2
|
-
import pkgutil
|
|
1
|
+
import importlib as _importlib
|
|
2
|
+
import pkgutil as _pkgutil
|
|
3
|
+
import threading as _threading
|
|
3
4
|
|
|
4
|
-
from .. import
|
|
5
|
+
from .. import connection as _connection
|
|
5
6
|
|
|
6
7
|
|
|
7
|
-
class Provisioner
|
|
8
|
+
class Provisioner:
|
|
8
9
|
"""
|
|
9
|
-
A resource (machine/system) provider.
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
any time and need to handle it safely.
|
|
31
|
-
Ie. once released(), an instance must never return alive() == True.
|
|
32
|
-
|
|
33
|
-
# explicit method calls
|
|
34
|
-
res = Provisioner(...)
|
|
35
|
-
res.reserve()
|
|
36
|
-
conn = res.connection()
|
|
37
|
-
conn.connect()
|
|
38
|
-
conn.ssh('ls /')
|
|
39
|
-
conn.disconnect()
|
|
40
|
-
res.release()
|
|
41
|
-
|
|
42
|
-
# via a context manager
|
|
43
|
-
with Provisioner(...) as res:
|
|
44
|
-
with res.connection() as conn:
|
|
45
|
-
conn.ssh('ls /')
|
|
46
|
-
|
|
47
|
-
If a Provisioner class needs additional configuration, it should do so via
|
|
48
|
-
class (not instance) attributes, allowing it to be instantiated many times.
|
|
49
|
-
|
|
50
|
-
class ConfiguredProvisioner(Provisioner):
|
|
51
|
-
resource_hub = 'https://...'
|
|
52
|
-
login = 'joe'
|
|
53
|
-
|
|
54
|
-
# or dynamically
|
|
55
|
-
name = 'joe'
|
|
56
|
-
cls = type(
|
|
57
|
-
f'Provisioner_for_{name}',
|
|
58
|
-
(Provisioner,),
|
|
59
|
-
{'resource_hub': 'https://...', 'login': name},
|
|
60
|
-
)
|
|
61
|
-
|
|
62
|
-
These attributes can then be accessed from __init__ or any other function.
|
|
10
|
+
A remote resource (machine/system) provider.
|
|
11
|
+
|
|
12
|
+
The main interface is .get_remote() that returns a connected class Remote
|
|
13
|
+
instance for use by the user, to be .release()d when not needed anymore,
|
|
14
|
+
with Provisioner automatically getting a replacement for it, to be returned
|
|
15
|
+
via .get_remote() later.
|
|
16
|
+
|
|
17
|
+
p = Provisioner()
|
|
18
|
+
p.start()
|
|
19
|
+
remote = p.get_remote()
|
|
20
|
+
remote.cmd(["ls", "/"])
|
|
21
|
+
remote.release()
|
|
22
|
+
p.stop()
|
|
23
|
+
|
|
24
|
+
with Provisioner() as p:
|
|
25
|
+
remote = p.get_remote()
|
|
26
|
+
...
|
|
27
|
+
remote.release()
|
|
28
|
+
|
|
29
|
+
Note that .stop() or .defer_stop() may be called from a different
|
|
30
|
+
thread, asynchronously to any other functions.
|
|
63
31
|
"""
|
|
64
32
|
|
|
65
33
|
def __init__(self):
|
|
@@ -67,47 +35,90 @@ class Provisioner(util.LockableClass):
|
|
|
67
35
|
Initialize the provisioner instance.
|
|
68
36
|
If extending __init__, always call 'super().__init__()' at the top.
|
|
69
37
|
"""
|
|
70
|
-
|
|
38
|
+
self.lock = _threading.RLock()
|
|
71
39
|
|
|
72
|
-
def
|
|
40
|
+
def get_remote(self, block=True):
|
|
73
41
|
"""
|
|
74
|
-
|
|
75
|
-
|
|
42
|
+
Get a connected class Remote instance.
|
|
43
|
+
|
|
44
|
+
If 'block' is True, wait for the remote to be available and connected,
|
|
45
|
+
otherwise return None if there is no Remote available yet.
|
|
76
46
|
"""
|
|
77
|
-
raise NotImplementedError(f"'
|
|
47
|
+
raise NotImplementedError(f"'get_remote' not implemented for {self.__class__.__name__}")
|
|
78
48
|
|
|
79
|
-
def
|
|
49
|
+
def start(self):
|
|
80
50
|
"""
|
|
81
|
-
|
|
82
|
-
|
|
51
|
+
Start the Provisioner instance, start any provisioning-related
|
|
52
|
+
processes that lead to systems being reserved.
|
|
83
53
|
"""
|
|
84
|
-
raise NotImplementedError(f"'
|
|
54
|
+
raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
|
|
85
55
|
|
|
86
|
-
def
|
|
56
|
+
def stop(self):
|
|
87
57
|
"""
|
|
88
|
-
|
|
58
|
+
Stop the Provisioner instance, freeing all reserved resources,
|
|
59
|
+
calling .release() on all Remote instances that were created.
|
|
89
60
|
"""
|
|
90
|
-
raise NotImplementedError(f"'
|
|
61
|
+
raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
|
|
62
|
+
|
|
63
|
+
def stop_defer(self):
|
|
64
|
+
"""
|
|
65
|
+
Enable an external caller to stop the Provisioner instance,
|
|
66
|
+
deferring resource deallocation to the caller.
|
|
67
|
+
|
|
68
|
+
Return an iterable of argument-free thread-safe callables that can be
|
|
69
|
+
called, possibly in parallel, to free up resources.
|
|
70
|
+
Ie. a list of 200 .release() functions, to be called in a thread pool
|
|
71
|
+
by the user, speeding up cleanup.
|
|
72
|
+
"""
|
|
73
|
+
return self.stop
|
|
74
|
+
|
|
75
|
+
def __enter__(self):
|
|
76
|
+
self.start()
|
|
77
|
+
return self
|
|
78
|
+
|
|
79
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
80
|
+
self.stop()
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class Remote(_connection.Connection):
|
|
84
|
+
"""
|
|
85
|
+
Representation of a provisioned (reserved) remote system, providing
|
|
86
|
+
a Connection-like API in addition to system management helpers.
|
|
87
|
+
|
|
88
|
+
An instance of Remote is typically prepared by a Provisioner and lent out
|
|
89
|
+
for further use, to be .release()d by the user (if destroyed).
|
|
90
|
+
It is not meant for repeated reserve/release cycles, hence the lack
|
|
91
|
+
of .reserve().
|
|
92
|
+
|
|
93
|
+
Also note that Remote can be used via Context Manager, but does not
|
|
94
|
+
do automatic .release(), the manager only handles the built-in Connection.
|
|
95
|
+
The intention is for a Provisioner to run via its own Contest Manager and
|
|
96
|
+
release all Remotes upon exit.
|
|
97
|
+
If you need automatic release of one Remote, use a contextlib.ExitStack
|
|
98
|
+
with a callback, or a try/finally block.
|
|
99
|
+
"""
|
|
91
100
|
|
|
92
|
-
def
|
|
101
|
+
def release(self):
|
|
93
102
|
"""
|
|
94
|
-
|
|
103
|
+
Release (de-provision) the remote resource.
|
|
95
104
|
"""
|
|
96
|
-
raise NotImplementedError(f"'
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
105
|
+
raise NotImplementedError(f"'release' not implemented for {self.__class__.__name__}")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
_submodules = [
|
|
109
|
+
info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
__all__ = [*_submodules, Provisioner.__name__, Remote.__name__] # noqa: PLE0604
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def __dir__():
|
|
116
|
+
return __all__
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
# lazily import submodules
|
|
120
|
+
def __getattr__(attr):
|
|
121
|
+
if attr in _submodules:
|
|
122
|
+
return _importlib.import_module(f".{attr}", __name__)
|
|
123
|
+
else:
|
|
124
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")
|
|
@@ -49,3 +49,11 @@ FULLY CUSTOM INSTALLS:
|
|
|
49
49
|
- basically virt-install creating a new domain (ignoring any pre-defined ones)
|
|
50
50
|
- probably shouldn't be used by automation, only for one-VM-at-a-time on user request
|
|
51
51
|
- (no free memory/disk checking, no libvirt locking, etc.)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# ssh via ProxyJump allowing ssh keys specification
|
|
56
|
+
ssh \
|
|
57
|
+
-o ProxyCommand='ssh -i /tmp/proxy_sshkey root@3.21.232.206 -W %h:%p' \
|
|
58
|
+
-i /tmp/destination_sshkey \
|
|
59
|
+
root@192.168.123.218
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
from .. import
|
|
1
|
+
from .. import base
|
|
2
2
|
from ... import util, ssh
|
|
3
3
|
|
|
4
4
|
|
|
5
|
-
class LibvirtProvisioner(
|
|
5
|
+
class LibvirtProvisioner(base.Provisioner):
|
|
6
6
|
number = 123
|
|
7
7
|
|
|
8
8
|
def reserve(self):
|
|
@@ -12,9 +12,9 @@ class LibvirtProvisioner(_Provisioner):
|
|
|
12
12
|
# can be overriden by a getter function if you need to keep track
|
|
13
13
|
# how many times it was accessed
|
|
14
14
|
def connection(self):
|
|
15
|
-
#return {
|
|
15
|
+
#return {"Hostname": "1.2.3.4", "User": "root", "IdentityFile": ...}
|
|
16
16
|
util.debug(f"returning ssh for {self.number}")
|
|
17
|
-
return ssh.SSHConn({
|
|
17
|
+
return ssh.SSHConn({"Hostname": "1.2.3.4", "User": "root"})
|
|
18
18
|
|
|
19
19
|
def release(self):
|
|
20
20
|
util.debug(f"releasing {self.number}")
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
|
|
2
|
+
making a podman image from the currently installed OS:
|
|
3
|
+
|
|
4
|
+
1) dnf install into a separate installroot
|
|
5
|
+
|
|
6
|
+
dnf
|
|
7
|
+
--installroot=$INSTALLROOT \
|
|
8
|
+
--setopt=install_weak_deps=False \
|
|
9
|
+
--setopt=tsflags=nodocs \
|
|
10
|
+
-y groupinstall minimal-environment
|
|
11
|
+
|
|
12
|
+
as root (doesn't work well with unshare, maybe could work via bwrap (bubblewrap))
|
|
13
|
+
|
|
14
|
+
maybe the unprivileged solution is pulling image from hub + installing @minimal-environment
|
|
15
|
+
into it (perhaps via podman build)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
2) post process it
|
|
19
|
+
|
|
20
|
+
echo -n > "$INSTALLROOT/etc/machine-id"
|
|
21
|
+
echo container > "$INSTALLROOT/etc/hostname"
|
|
22
|
+
|
|
23
|
+
rm -rf "$INSTALLROOT/etc/yum.repos.d"
|
|
24
|
+
cp -f /etc/yum.repos.d/* "$INSTALLROOT/etc/yum.repos.d/."
|
|
25
|
+
cp -f /etc/pki/rpm-gpg/* "$INSTALLROOT/etc/pki/rpm-gpg/."
|
|
26
|
+
|
|
27
|
+
echo install_weak_deps=False >> "$INSTALLROOT/etc/dnf/dnf.conf"
|
|
28
|
+
echo tsflags=nodocs >> "$INSTALLROOT/etc/dnf/dnf.conf"
|
|
29
|
+
|
|
30
|
+
ln -sf \
|
|
31
|
+
/usr/lib/systemd/system/multi-user.target \
|
|
32
|
+
"$INSTALLROOT/etc/systemd/system/default.target"
|
|
33
|
+
|
|
34
|
+
# disable auditd
|
|
35
|
+
# disable other services
|
|
36
|
+
# set root password
|
|
37
|
+
|
|
38
|
+
dnf clean all --installroot="$INSTALLROOT"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
3) pack it
|
|
42
|
+
|
|
43
|
+
tar --xattrs -C "$INSTALLROOT" -cvf tarball.tar .
|
|
44
|
+
|
|
45
|
+
rm -rf "$INSTALLROOT"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
4) import it to podman
|
|
49
|
+
|
|
50
|
+
podman import --change 'CMD ["/sbin/init"]' tarball.tar my-image-name
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
5) run it
|
|
54
|
+
|
|
55
|
+
podman {run,create} --systemd=always --cgroups=split ...
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
------------------------------
|