atex 0.7__py3-none-any.whl → 0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. atex/cli/fmf.py +143 -0
  2. atex/cli/libvirt.py +127 -0
  3. atex/cli/testingfarm.py +35 -13
  4. atex/connection/__init__.py +13 -19
  5. atex/connection/podman.py +63 -0
  6. atex/connection/ssh.py +34 -52
  7. atex/executor/__init__.py +2 -0
  8. atex/executor/duration.py +60 -0
  9. atex/executor/executor.py +402 -0
  10. atex/executor/reporter.py +101 -0
  11. atex/{minitmt → executor}/scripts.py +37 -25
  12. atex/{minitmt → executor}/testcontrol.py +54 -42
  13. atex/fmf.py +237 -0
  14. atex/orchestrator/__init__.py +3 -59
  15. atex/orchestrator/aggregator.py +82 -134
  16. atex/orchestrator/orchestrator.py +385 -0
  17. atex/provision/__init__.py +74 -105
  18. atex/provision/libvirt/__init__.py +2 -24
  19. atex/provision/libvirt/libvirt.py +465 -0
  20. atex/provision/libvirt/locking.py +168 -0
  21. atex/provision/libvirt/setup-libvirt.sh +21 -1
  22. atex/provision/podman/__init__.py +1 -0
  23. atex/provision/podman/podman.py +274 -0
  24. atex/provision/testingfarm/__init__.py +2 -29
  25. atex/provision/testingfarm/api.py +123 -65
  26. atex/provision/testingfarm/testingfarm.py +234 -0
  27. atex/util/__init__.py +1 -6
  28. atex/util/libvirt.py +18 -0
  29. atex/util/log.py +31 -8
  30. atex/util/named_mapping.py +158 -0
  31. atex/util/path.py +16 -0
  32. atex/util/ssh_keygen.py +14 -0
  33. atex/util/threads.py +99 -0
  34. atex-0.9.dist-info/METADATA +178 -0
  35. atex-0.9.dist-info/RECORD +43 -0
  36. atex/cli/minitmt.py +0 -175
  37. atex/minitmt/__init__.py +0 -23
  38. atex/minitmt/executor.py +0 -348
  39. atex/minitmt/fmf.py +0 -202
  40. atex/provision/nspawn/README +0 -74
  41. atex/provision/podman/README +0 -59
  42. atex/provision/podman/host_container.sh +0 -74
  43. atex/provision/testingfarm/foo.py +0 -1
  44. atex-0.7.dist-info/METADATA +0 -102
  45. atex-0.7.dist-info/RECORD +0 -32
  46. {atex-0.7.dist-info → atex-0.9.dist-info}/WHEEL +0 -0
  47. {atex-0.7.dist-info → atex-0.9.dist-info}/entry_points.txt +0 -0
  48. {atex-0.7.dist-info → atex-0.9.dist-info}/licenses/COPYING.txt +0 -0
atex/minitmt/executor.py DELETED
@@ -1,348 +0,0 @@
1
- import os
2
- import re
3
- import time
4
- import select
5
- import threading
6
- import contextlib
7
- import subprocess
8
-
9
- from .. import util
10
- from . import testcontrol, scripts, fmf
11
-
12
-
13
- class Duration:
14
- """
15
- A helper for parsing, keeping and manipulating test run time based on
16
- FMF-defined 'duration' attribute.
17
- """
18
-
19
- def __init__(self, fmf_duration):
20
- """
21
- 'fmf_duration' is the string specified as 'duration' in FMF metadata.
22
- """
23
- duration = self._fmf_to_seconds(fmf_duration)
24
- self.end = time.monotonic() + duration
25
- # keep track of only the first 'save' and the last 'restore',
26
- # ignore any nested ones (as tracked by '_count')
27
- self.saved = None
28
- self.saved_count = 0
29
-
30
- @staticmethod
31
- def _fmf_to_seconds(string):
32
- match = re.fullmatch(r"([0-9]+)([a-z]*)", string)
33
- if not match:
34
- raise RuntimeError(f"'duration' has invalid format: {string}")
35
- length, unit = match.groups()
36
- if unit == "m":
37
- return int(length)*60
38
- elif unit == "h":
39
- return int(length)*60*60
40
- elif unit == "d":
41
- return int(length)*60*60*24
42
- else:
43
- return int(length)
44
-
45
- def set(self, to):
46
- self.end = time.monotonic() + self._fmf_to_seconds(to)
47
-
48
- def increment(self, by):
49
- self.end += self._fmf_to_seconds(by)
50
-
51
- def decrement(self, by):
52
- self.end -= self._fmf_to_seconds(by)
53
-
54
- def save(self):
55
- if self.saved_count == 0:
56
- self.saved = self.end - time.monotonic()
57
- self.saved_count += 1
58
-
59
- def restore(self):
60
- if self.saved_count > 1:
61
- self.saved_count -= 1
62
- elif self.saved_count == 1:
63
- self.end = time.monotonic() + self.saved
64
- self.saved_count = 0
65
- self.saved = None
66
-
67
- def out_of_time(self):
68
- return time.monotonic() > self.end
69
-
70
-
71
- # SETUP global:
72
- # - create CSVAggregator instance on some destination dir
73
-
74
- # SETUP of new Provisioner instance:
75
- # - with SSHConn
76
- # - rsync test repo to some tmpdir on the remote
77
- # - install packages from plan
78
- # - create TMT_PLAN_ENVIRONMENT_FILE and export it to plan setup scripts
79
- # - run plan setup scripts
80
-
81
- # SETUP + CLEANUP for one test
82
- # - create Executor instance
83
- # - pass TMT_PLAN_ENVIRONMENT_FILE to it
84
- # - probably as general "environment variables" input,
85
- # could be reused in the future for ie. TMT_PLAN_NAME or so
86
- # - pass env from CLI -e switches
87
- # - pass disconnected SSHConn to it
88
- # - pass one FMFTest namedtuple to it (test to run)
89
- # - pass test repo location on the remote host
90
- # - pass CSVAggregator instance to it, so it can write results/logs
91
- # - with SSHConn
92
- # - create wrapper script on the remote
93
- # - run wrapper script, redirecting stderr to tmpfile via CSVAggregator
94
- # - poll() / select() over test stdout, parse TEST_CONTROL protocol
95
- # - on 0.1sec poll timeout
96
- # - check if SSHConn master is alive, re-connect if not
97
- # - check test duration against fmf-defined (control-adjusted) duration
98
- # - ...
99
- # - make Executor return some complex status
100
- # - whether testing was destructive (just leave SSHConn disconnected?)
101
-
102
-
103
- class TestAbortedError(Exception):
104
- """
105
- Raised when an infrastructure-related issue happened while running a test.
106
- """
107
- pass
108
-
109
-
110
- # TODO: automatic reporting of all partial results that were not finished
111
- class Executor:
112
- """
113
- Logic for running tests on a remote system and processing results
114
- and uploaded files by those tests.
115
- """
116
-
117
- def __init__(self, remote, aggregator, remote_dir=None, env=None):
118
- """
119
- 'remote' is a reserved class Remote instance, with an active connection.
120
-
121
- 'aggregator' is an instance of a ResultAggregator all the results
122
- and uploaded files will be written to.
123
-
124
- 'remote_dir' is a path on the remote for storing tests and other
125
- metadata. If unspecified, a tmpdir is created and used instead.
126
-
127
- 'env' is a dict of extra environment variables to pass to the
128
- plan prepare scripts and tests.
129
- """
130
- self.lock = threading.RLock()
131
- self.remote = remote
132
- self.aggregator = aggregator
133
- self.cancelled = False
134
- self.remote_dir = remote_dir
135
- self.plan_env = env.copy() if env else {}
136
-
137
- def _get_remote_dir(self):
138
- # TODO: do not mktemp here, do it in the parent, have remote_dir be mandatory,
139
- # renamed to 'tests_dir' (just the test repo), cleaned up by external logic
140
- # - handle custom metadata remote dir in run_test() and clean it up there
141
- # (for TMT_PLAN_ENVIRONMENT_FILE, wrapper, etc.)
142
- if not self.remote_dir:
143
- self.remote_dir = self.remote.cmd(
144
- ("mktemp", "-d", "-p", "/var/tmp"),
145
- func=util.subprocess_output,
146
- )
147
- return self.remote_dir
148
-
149
- # TODO: do not do this in Executor
150
- def upload_tests(self, tests_dir):
151
- """
152
- Upload a directory of all tests from a local 'tests_dir' path to
153
- a temporary directory on the remote host.
154
- """
155
- remote_dir = self._get_remote_dir()
156
- self.remote.rsync(
157
- "-rv", "--delete", "--exclude=.git/",
158
- f"{tests_dir}/",
159
- f"remote:{remote_dir}/tests",
160
- )
161
-
162
- def setup_plan(self, fmf_tests):
163
- """
164
- Install packages and run scripts, presumably extracted from a TMT plan
165
- provided as 'fmf_tests', an initialized FMFTests instance.
166
-
167
- Also prepare additional environment for tests, ie. create and export
168
- a path to TMT_PLAN_ENVIRONMENT_FILE.
169
- """
170
- # install packages from the plan
171
- if fmf_tests.prepare_pkgs:
172
- self.remote.cmd(
173
- (
174
- "dnf", "-y", "--setopt=install_weak_deps=False",
175
- "install", *fmf_tests.prepare_pkgs,
176
- ),
177
- check=True,
178
- )
179
-
180
- # create TMT_PLAN_ENVIRONMENT_FILE
181
- self.plan_env.update(fmf_tests.plan_env)
182
- env_file = f"{self._get_remote_dir()}/TMT_PLAN_ENVIRONMENT_FILE"
183
- self.remote.cmd(("truncate", "-s", "0", env_file), check=True)
184
- self.plan_env["TMT_PLAN_ENVIRONMENT_FILE"] = env_file
185
-
186
- # run prepare scripts
187
- env_args = (f"{k}={v}" for k, v in self.plan_env.items())
188
- for script in fmf_tests.prepare_scripts:
189
- self.remote.cmd(
190
- ("env", *env_args, "bash"),
191
- input=script,
192
- text=True,
193
- check=True,
194
- )
195
-
196
- def run_test(self, fmf_test, env=None):
197
- """
198
- Run one test on the remote system.
199
-
200
- 'fmf_test' is a FMFTest namedtuple with the test to run.
201
-
202
- 'env' is a dict of extra environment variables to pass to the test.
203
- """
204
- env_vars = self.plan_env.copy()
205
- for item in fmf.listlike(fmf_test.data, "environment"):
206
- env_vars.update(item)
207
- env_vars["ATEX_TEST_NAME"] = fmf_test.name
208
- env_vars["TMT_TEST_NAME"] = fmf_test.name
209
- if env:
210
- env_vars.update(env)
211
- env_args = (f"{k}={v}" for k, v in env_vars.items())
212
-
213
- # run a setup script, preparing wrapper + test scripts
214
- remote_dir = self._get_remote_dir()
215
- setup_script = scripts.test_setup(
216
- test=fmf_test,
217
- tests_dir=f"{remote_dir}/tests",
218
- wrapper_exec=f"{remote_dir}/wrapper.sh",
219
- test_exec=f"{remote_dir}/test.sh",
220
- debug=True,
221
- )
222
- self.remote.cmd(("bash",), input=setup_script, text=True, check=True)
223
-
224
- with contextlib.ExitStack() as stack:
225
- testout_fd = stack.enter_context(self.aggregator.open_tmpfile())
226
-
227
- duration = Duration(fmf_test.data.get("duration", "5m"))
228
-
229
- test_proc = None
230
- control_fd = None
231
- stack.callback(lambda: os.close(control_fd) if control_fd else None)
232
-
233
- def abort(msg):
234
- if test_proc:
235
- test_proc.kill()
236
- test_proc.wait()
237
- self.remote.release()
238
- raise TestAbortedError(msg) from None
239
-
240
- try:
241
- # TODO: probably enum
242
- state = "starting_test"
243
- while not duration.out_of_time():
244
- with self.lock:
245
- if self.cancelled:
246
- abort("cancel requested")
247
- return
248
-
249
- if state == "starting_test":
250
- control_fd, pipe_w = os.pipe()
251
- os.set_blocking(control_fd, False)
252
- control = testcontrol.TestControl(
253
- control_fd=control_fd,
254
- aggregator=self.aggregator,
255
- duration=duration,
256
- testout_fd=testout_fd,
257
- )
258
- # run the test in the background, letting it log output directly to
259
- # an opened file (we don't handle it, cmd client sends it to kernel)
260
- test_proc = self.remote.cmd(
261
- ("env", *env_args, f"{remote_dir}/wrapper.sh"),
262
- stdout=pipe_w,
263
- stderr=testout_fd,
264
- func=util.subprocess_Popen,
265
- )
266
- os.close(pipe_w)
267
- state = "reading_control"
268
-
269
- elif state == "reading_control":
270
- rlist, _, xlist = select.select((control_fd,), (), (control_fd,), 0.1)
271
- if xlist:
272
- abort(f"got exceptional condition on control_fd {control_fd}")
273
- elif rlist:
274
- control.process()
275
- if control.eof:
276
- os.close(control_fd)
277
- control_fd = None
278
- state = "waiting_for_exit"
279
-
280
- elif state == "waiting_for_exit":
281
- # control stream is EOF and it has nothing for us to read,
282
- # we're now just waiting for proc to cleanly terminate
283
- try:
284
- code = test_proc.wait(0.1)
285
- if code == 0:
286
- # wrapper exited cleanly, testing is done
287
- break
288
- else:
289
- # unexpected error happened (crash, disconnect, etc.)
290
- self.remote.disconnect()
291
- # if reconnect was requested, do so, otherwise abort
292
- if control.reconnect:
293
- state = "reconnecting"
294
- if control.reconnect != "always":
295
- control.reconnect = None
296
- else:
297
- abort(f"test wrapper unexpectedly exited with {code}")
298
- test_proc = None
299
- except subprocess.TimeoutExpired:
300
- pass
301
-
302
- elif state == "reconnecting":
303
- try:
304
- self.remote.connect(block=False)
305
- state = "reading_control"
306
- except BlockingIOError:
307
- pass
308
-
309
- else:
310
- raise AssertionError("reached unexpected state")
311
-
312
- else:
313
- abort("test duration timeout reached")
314
-
315
- # testing successful, do post-testing tasks
316
-
317
- # test wrapper hasn't provided exitcode
318
- if control.exit_code is None:
319
- abort("exitcode not reported, wrapper bug?")
320
-
321
- # partial results that were never reported
322
- if control.partial_results:
323
- control.result_seen = True # partial result is also a result
324
- for result in control.partial_results.values():
325
- self.aggregator.report(result)
326
-
327
- # test hasn't reported a single result, add an automatic one
328
- # as specified in RESULTS.md
329
- # {"status": "pass", "name": "/some/test", "testout": "output.txt"}
330
- if not control.result_seen:
331
- self.aggregator.link_tmpfile_to(fmf_test.name, "output.txt", testout_fd)
332
- self.aggregator.report({
333
- "status": "pass" if control.exit_code == 0 else "fail",
334
- "name": fmf_test.name,
335
- "testout": "output.txt",
336
- })
337
-
338
- except Exception:
339
- # if the test hasn't reported a single result, but still
340
- # managed to break something, provide at least the default log
341
- # for manual investigation - otherwise test output disappears
342
- if not control.result_seen:
343
- self.aggregator.link_tmpfile_to(fmf_test.name, "output.txt", testout_fd)
344
- raise
345
-
346
- def cancel(self):
347
- with self.lock:
348
- self.cancelled = True
atex/minitmt/fmf.py DELETED
@@ -1,202 +0,0 @@
1
- import re
2
- import collections
3
- from pathlib import Path
4
-
5
- # from system-wide sys.path
6
- import fmf
7
-
8
- # name: fmf path to the test as string, ie. /some/test
9
- # data: dict of the parsed fmf metadata (ie. {'tag': ... , 'environment': ...})
10
- # dir: relative pathlib.Path of the test .fmf to repo root, ie. some/test
11
- # (may be different from name for "virtual" tests that share the same dir)
12
- FMFTest = collections.namedtuple("FMFTest", ["name", "data", "dir"])
13
-
14
-
15
- def listlike(data, key):
16
- """
17
- Get a piece of fmf metadata as an iterable regardless of whether it was
18
- defined as a dict or a list.
19
-
20
- This is needed because many fmf metadata keys can be used either as
21
- some_key: 123
22
- or as lists via YAML syntax
23
- some_key:
24
- - 123
25
- - 456
26
- and, for simplicity, we want to always deal with lists (iterables).
27
- """
28
- if value := data.get(key):
29
- return value if isinstance(value, list) else (value,)
30
- else:
31
- return ()
32
-
33
-
34
- class FMFTests:
35
- """
36
- FMF test metadata parsed from on-disk metadata using a specific plan name,
37
- with all metadata dictionaries for all nodes being adjusted by that plan
38
- and (optionally) a specified context.
39
- """
40
- # TODO: usage example ^^^^
41
-
42
- def __init__(self, fmf_tree, plan_name, context=None):
43
- """
44
- 'fmf_tree' is filesystem path somewhere inside fmf metadata tree,
45
- or a root fmf.Tree instance.
46
-
47
- 'plan_name' is fmf identifier (like /some/thing) of a tmt plan
48
- to use for discovering tests.
49
-
50
- 'context' is a dict like {'distro': 'rhel-9.6'} used for filtering
51
- discovered tests.
52
- """
53
- # list of packages to install, as extracted from plan
54
- self.prepare_pkgs = []
55
- # list of scripts to run, as extracted from plan
56
- self.prepare_scripts = []
57
- # dict of environment, as extracted from plan
58
- self.plan_env = {}
59
- # dict indexed by test name, value is dict with fmf-parsed metadata
60
- self.tests = {}
61
- # dict indexed by test name, value is pathlib.Path of relative path
62
- # of the fmf metadata root towards the test metadata location
63
- self.test_dirs = {}
64
-
65
- tree = fmf_tree.copy() if isinstance(fmf_tree, fmf.Tree) else fmf.Tree(fmf_tree)
66
- ctx = fmf.Context(**context) if context else fmf.Context()
67
- tree.adjust(context=ctx)
68
-
69
- self.fmf_root = tree.root
70
-
71
- # lookup the plan first
72
- plan = tree.find(plan_name)
73
- if not plan:
74
- raise ValueError(f"plan {plan_name} not found in {tree.root}")
75
- if "test" in plan.data:
76
- raise ValueError(f"plan {plan_name} appears to be a test")
77
-
78
- # gather and merge plan-defined environment variables
79
- #
80
- # environment:
81
- # - FOO: BAR
82
- # BAR: BAZ
83
- for entry in listlike(plan.data, "environment"):
84
- self.plan_env.update(entry)
85
-
86
- # gather all prepare scripts / packages
87
- #
88
- # prepare:
89
- # - how: install
90
- # package:
91
- # - some-rpm-name
92
- # - how: shell
93
- # script:
94
- # - some-command
95
- for entry in listlike(plan.data, "prepare"):
96
- if "how" not in entry:
97
- continue
98
- if entry["how"] == "install":
99
- self.prepare_pkgs += listlike(entry, "package")
100
- elif entry["how"] == "shell":
101
- self.prepare_scripts += listlike(entry, "script")
102
-
103
- # gather all tests selected by the plan
104
- #
105
- # discover:
106
- # - how: fmf
107
- # filter:
108
- # - tag:some_tag
109
- # test:
110
- # - some-test-regex
111
- # exclude:
112
- # - some-test-regex
113
- if "discover" in plan.data:
114
- discover = plan.data["discover"]
115
- if not isinstance(discover, list):
116
- discover = (discover,)
117
-
118
- for entry in discover:
119
- if entry.get("how") != "fmf":
120
- continue
121
-
122
- filtering = {}
123
- for meta_name in ("filter", "test", "exclude"):
124
- if value := listlike(entry, meta_name):
125
- filtering[meta_name] = value
126
-
127
- children = tree.prune(
128
- names=filtering.get("test"),
129
- filters=filtering.get("filter"),
130
- )
131
- for child in children:
132
- # excludes not supported by .prune(), we have to do it here
133
- excludes = filtering.get("exclude")
134
- if excludes and any(re.match(x, child.name) for x in excludes):
135
- continue
136
- # only enabled tests
137
- if "enabled" in child.data and not child.data["enabled"]:
138
- continue
139
- # no manual tests and no stories
140
- if child.data.get("manual") or child.data.get("story"):
141
- continue
142
- # after adjusting above, any adjusts are useless, free some space
143
- if "adjust" in child.data:
144
- del child.data["adjust"]
145
-
146
- self.tests[child.name] = child.data
147
- # child.sources ie. ['/abs/path/to/some.fmf', '/abs/path/to/some/node.fmf']
148
- self.test_dirs[child.name] = \
149
- Path(child.sources[-1]).parent.relative_to(self.fmf_root)
150
-
151
- def as_fmftest(self, name):
152
- return FMFTest(name, self.tests[name], self.test_dirs[name])
153
-
154
- def as_fmftests(self):
155
- for name, data in self.tests.items():
156
- yield FMFTest(name, data, self.test_dirs[name])
157
-
158
- def match(self, regex):
159
- """
160
- Return an iterable of FMFTest instances with test names matching the
161
- specified regex via re.match(), just like how 'tmt' discovers tests.
162
- """
163
- for name, data in self.tests.items():
164
- if re.match(regex, name):
165
- yield FMFTest(name, data, self.test_dirs[name])
166
-
167
-
168
- # Some extra notes for fmf.prune() arguments:
169
- #
170
- # Set 'names' to filter by a list of fmf node names, ie.
171
- # ['/some/test', '/another/test']
172
- #
173
- # Set 'filters' to filter by a list of fmf-style filter expressions, see
174
- # https://fmf.readthedocs.io/en/stable/modules.html#fmf.filter
175
- #
176
- # Set 'conditions' to filter by a list of python expressions whose namespace
177
- # locals() are set up to be a dictionary of the tree. When any of the
178
- # expressions returns True, the tree is returned, ie.
179
- # ['environment["FOO"] == "BAR"']
180
- # ['"enabled" not in locals() or enabled']
181
- # Note that KeyError is silently ignored and treated as False.
182
- #
183
- # Set 'context' to a dictionary to post-process the tree metadata with
184
- # adjust expressions (that may be present in a tree) using the specified
185
- # context. Any other filters are applied afterwards to allow modification
186
- # of tree metadata by the adjust expressions. Ie.
187
- # {'distro': 'rhel-9.6.0', 'arch': 'x86_64'}
188
-
189
- Platform = collections.namedtuple("Platform", ["distro", "arch"])
190
-
191
-
192
- def combine_platforms(fmf_path, plan_name, platforms):
193
- # TODO: document
194
- fmf_tests = {}
195
- tree = fmf.Tree(fmf_path)
196
- for platform in platforms:
197
- context = {"distro": platform.distro, "arch": platform.arch}
198
- fmf_tests[platform] = FMFTests(tree, plan_name, context=context)
199
- return fmf_tests
200
-
201
- # TODO: in Orchestrator, when a Provisioner becomes free, have it pick a test
202
- # from the appropriate tests[platform] per the Provisioner's platform
@@ -1,74 +0,0 @@
1
- The idea is to use systemd-nspawn containers on the host, binding
2
- /dev/kvm to each, thus avoiding the need for nested virt as our first layer
3
- of Contest tests will run in the containers (installing libvirtd, etc.)
4
- and the second layer (VMs created by tests) will use virtual machines,
5
- via a non-nested HVM.
6
-
7
- systemd-nspawn containers can have CPU core limits, memory limits, etc.
8
- done via cgroups, so we can provide some level of isolation/safety.
9
-
10
-
11
- systemd-nspawn can create its own veth via --network-veth=... and put it into
12
- a bridge automatically via --network-bridge=...
13
-
14
- We can then use NetworkManager + firewalld to pre-create a bridge with built-in
15
- DHCP and NAT to the outside, via something like
16
-
17
- nmcli connection add type bridge ifname br0 con-name br0 ipv4.method shared ipv6.method ignore
18
-
19
- According to https://fedoramagazine.org/internet-connection-sharing-networkmanager/
20
- the ipv4.method=shared :
21
-
22
- enables IP forwarding for the interface;
23
- adds firewall rules and enables masquerading;
24
- starts dnsmasq as a DHCP and DNS server.
25
-
26
- Specifically it should add MASQUERADE on packets *outgoing* from the bridge subnet,
27
- so shouldn't need any modification of the upstream eth0 device or any fw rules tied to it.
28
-
29
- There also seems to be ipv4.addresses 192.168.42.1/24 to modify the subnet?
30
-
31
- If that doesn't work, firewalld has an External zone that has <masquerade/>
32
- by default, so
33
-
34
- nmcli connection modify br0 connection.zone external
35
-
36
- should work.
37
-
38
-
39
- --------
40
-
41
- TODO: We need some way to get DHCP leases for started containers (so we can connect
42
- to the containerized sshd).
43
-
44
- If there is no command for it via nmcli, it should be possible to just
45
- extract it from wherever NetworkManager pointed dnsmasq to store its leases file.
46
-
47
- We can then probably correlate --network-veth=... device from systemd-nspawn
48
- (named after --machine=... name, prefixed with ve-* or vb-* if --network-bridge=* is used)
49
- to the leased IP address.
50
-
51
- ls -l /var/lib/NetworkManager/dnsmasq-*.leases
52
-
53
- Or perhaps parse it out of 'ip neigh' to make sure the guest is *really* up.
54
- - 'ip neigh' gives us MAC-to-IP, but device is always br0
55
- - 'ip link show dev vb-contname' should give us the MAC for 'ip neigh'
56
- - if container veth endpoint uses different mac, we can query bridge forward DB
57
- via 'bridge fdb' to get all MACs that appeared on the veth
58
-
59
- --------
60
-
61
- Containers can be installed via ie.
62
-
63
- dnf --releasever=41 --installroot=/var/lib/machines/f41 --use-host-config \
64
- --setopt=install_weak_deps=False \
65
- install \
66
- passwd dnf fedora-release vim-minimal util-linux systemd NetworkManager
67
-
68
- where --use-host-config re-uses host repositories.
69
-
70
- Maybe consider 'machinectl'-managed containers (start/terminate/kill/reboot/etc.)
71
- which are just repackaged systemd-nspawn@ services.
72
- - Especially since there is no concept of "throw away disk snapshot with container exit",
73
- we always need some copy/clone of the --installroot for each instance of the container,
74
- so using ie. 'machinectl clone ...' would provide a nice interface for it.
@@ -1,59 +0,0 @@
1
-
2
- making a podman image from the currently installed OS:
3
-
4
- 1) dnf install into a separate installroot
5
-
6
- dnf
7
- --installroot=$INSTALLROOT \
8
- --setopt=install_weak_deps=False \
9
- --setopt=tsflags=nodocs \
10
- -y groupinstall minimal-environment
11
-
12
- as root (doesn't work well with unshare, maybe could work via bwrap (bubblewrap))
13
-
14
- maybe the unprivileged solution is pulling image from hub + installing @minimal-environment
15
- into it (perhaps via podman build)
16
-
17
-
18
- 2) post process it
19
-
20
- echo -n > "$INSTALLROOT/etc/machine-id"
21
- echo container > "$INSTALLROOT/etc/hostname"
22
-
23
- rm -rf "$INSTALLROOT/etc/yum.repos.d"
24
- cp -f /etc/yum.repos.d/* "$INSTALLROOT/etc/yum.repos.d/."
25
- cp -f /etc/pki/rpm-gpg/* "$INSTALLROOT/etc/pki/rpm-gpg/."
26
-
27
- echo install_weak_deps=False >> "$INSTALLROOT/etc/dnf/dnf.conf"
28
- echo tsflags=nodocs >> "$INSTALLROOT/etc/dnf/dnf.conf"
29
-
30
- ln -sf \
31
- /usr/lib/systemd/system/multi-user.target \
32
- "$INSTALLROOT/etc/systemd/system/default.target"
33
-
34
- # disable auditd
35
- # disable other services
36
- # set root password
37
-
38
- dnf clean all --installroot="$INSTALLROOT"
39
-
40
-
41
- 3) pack it
42
-
43
- tar --xattrs -C "$INSTALLROOT" -cvf tarball.tar .
44
-
45
- rm -rf "$INSTALLROOT"
46
-
47
-
48
- 4) import it to podman
49
-
50
- podman import --change 'CMD ["/sbin/init"]' tarball.tar my-image-name
51
-
52
-
53
- 5) run it
54
-
55
- podman {run,create} --systemd=always --cgroups=split ...
56
-
57
-
58
-
59
- ------------------------------