atex 0.4__py3-none-any.whl → 0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,163 @@
1
+ """
2
+ Functions and utilities for persistently storing test results and files (logs).
3
+
4
+ There is a global aggregator (ie. CSVAggregator) that handles all the results
5
+ from all platforms (arches and distros), and several per-platform aggregators
6
+ that are used by test execution logic.
7
+
8
+ with CSVAggregator("results.csv.gz", "file/storage/dir") as global_aggr:
9
+ reporter = global_aggr.for_platform("rhel-9@x86_64")
10
+ reporter.report({"name": "/some/test", "status": "pass"})
11
+ with reporter.open_tmpfile() as fd:
12
+ os.write(fd, "some contents")
13
+ reporter.link_tmpfile_to("/some/test", "test.log", fd)
14
+ """
15
+
16
+ import os
17
+ import csv
18
+ import gzip
19
+ import ctypes
20
+ import ctypes.util
21
+ import threading
22
+ import contextlib
23
+ from pathlib import Path
24
+
25
+
26
+ libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
27
+
28
+ # int linkat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath, int flags)
29
+ libc.linkat.argtypes = (
30
+ ctypes.c_int,
31
+ ctypes.c_char_p,
32
+ ctypes.c_int,
33
+ ctypes.c_char_p,
34
+ ctypes.c_int,
35
+ )
36
+ libc.linkat.restype = ctypes.c_int
37
+
38
+ # fcntl.h:#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
39
+ AT_EMPTY_PATH = 0x1000
40
+
41
+ # fcntl.h:#define AT_FDCWD -100 /* Special value used to indicate
42
+ AT_FDCWD = -100
43
+
44
+
45
+ def linkat(*args):
46
+ if (ret := libc.linkat(*args)) == -1:
47
+ errno = ctypes.get_errno()
48
+ raise OSError(errno, os.strerror(errno))
49
+ return ret
50
+
51
+
52
+ def _normalize_path(path):
53
+ # the magic here is to treat any dangerous path as starting at /
54
+ # and resolve any weird constructs relative to /, and then simply
55
+ # strip off the leading / and use it as a relative path
56
+ path = path.lstrip("/")
57
+ path = os.path.normpath(f"/{path}")
58
+ return path[1:]
59
+
60
+
61
+ class CSVAggregator:
62
+ """
63
+ Collects reported results as a GZIP-ed CSV and files (logs) under a related
64
+ directory.
65
+ """
66
+
67
+ class _ExcelWithUnixNewline(csv.excel):
68
+ lineterminator = "\n"
69
+
70
+ def __init__(self, results_file, storage_dir):
71
+ self.lock = threading.RLock()
72
+ self.storage_dir = Path(storage_dir)
73
+ self.results_file = Path(results_file)
74
+ self.csv_writer = None
75
+ self.results_gzip_handle = None
76
+
77
+ def __enter__(self):
78
+ if self.results_file.exists():
79
+ raise FileExistsError(f"{self.results_file} already exists")
80
+ f = gzip.open(self.results_file, "wt", newline="")
81
+ try:
82
+ self.csv_writer = csv.writer(f, dialect=self._ExcelWithUnixNewline)
83
+ except:
84
+ f.close()
85
+ raise
86
+ self.results_gzip_handle = f
87
+
88
+ if self.storage_dir.exists():
89
+ raise FileExistsError(f"{self.storage_dir} already exists")
90
+ self.storage_dir.mkdir()
91
+
92
+ return self
93
+
94
+ def __exit__(self, exc_type, exc_value, traceback):
95
+ self.results_gzip_handle.close()
96
+ self.results_gzip_handle = None
97
+ self.csv_writer = None
98
+
99
+ def report(self, platform, status, name, note, *files):
100
+ with self.lock:
101
+ self.csv_writer.writerow((platform, status, name, note, *files))
102
+
103
+ def for_platform(self, platform_string):
104
+ """
105
+ Return a ResultAggregator instance that writes results into this
106
+ CSVAgreggator instance.
107
+ """
108
+ def report(result_line):
109
+ file_names = []
110
+ if "testout" in result_line:
111
+ file_names.append(result_line["testout"])
112
+ if "files" in result_line:
113
+ file_names += (f["name"] for f in result_line["files"])
114
+ self.report(
115
+ platform_string, result_line["status"], result_line["name"],
116
+ result_line.get("note", ""), *file_names,
117
+ )
118
+ platform_dir = self.storage_dir / platform_string
119
+ platform_dir.mkdir(exist_ok=True)
120
+ return ResultAggregator(report, platform_dir)
121
+
122
+
123
+ class ResultAggregator:
124
+ """
125
+ Collects reported results (in a format specified by RESULTS.md) for
126
+ a specific platform, storing them persistently.
127
+ """
128
+
129
+ def __init__(self, callback, storage_dir):
130
+ """
131
+ 'callback' is a function to call to record a result, with the
132
+ result dict passed as an argument.
133
+
134
+ 'storage_dir' is a directory for storing uploaded files.
135
+ """
136
+ self.report = callback
137
+ self.storage_dir = storage_dir
138
+
139
+ @contextlib.contextmanager
140
+ def open_tmpfile(self, open_mode=os.O_WRONLY):
141
+ """
142
+ Open an anonymous (name-less) file for writing and yield its file
143
+ descriptor (int) as context, closing it when the context is exited.
144
+ """
145
+ flags = open_mode | os.O_TMPFILE
146
+ fd = os.open(self.storage_dir, flags, 0o644)
147
+ try:
148
+ yield fd
149
+ finally:
150
+ os.close(fd)
151
+
152
+ def link_tmpfile_to(self, result_name, file_name, fd):
153
+ """
154
+ Store a file named 'file_name' in a directory relevant to 'result_name'
155
+ whose 'fd' (a file descriptor) was created by .open_tmpfile().
156
+
157
+ This function can be called multiple times with the same 'fd', and
158
+ does not close or otherwise alter the descriptor.
159
+ """
160
+ # /path/to/all/logs / some/test/name / path/to/file.log
161
+ file_path = self.storage_dir / result_name.lstrip("/") / _normalize_path(file_name)
162
+ file_path.parent.mkdir(parents=True, exist_ok=True)
163
+ linkat(fd, b"", AT_FDCWD, bytes(file_path), AT_EMPTY_PATH)
@@ -1,10 +1,11 @@
1
- import importlib
2
- import pkgutil
1
+ import importlib as _importlib
2
+ import pkgutil as _pkgutil
3
+ import threading as _threading
3
4
 
4
- from .. import util
5
+ from .. import connection as _connection
5
6
 
6
7
 
7
- class Provisioner(util.LockableClass):
8
+ class Provisioner:
8
9
  """
9
10
  A resource (machine/system) provider.
10
11
 
@@ -67,47 +68,88 @@ class Provisioner(util.LockableClass):
67
68
  Initialize the provisioner instance.
68
69
  If extending __init__, always call 'super().__init__()' at the top.
69
70
  """
70
- super().__init__()
71
-
72
- def reserve(self):
73
- """
74
- Send a reservation request for a resource and wait for it to be
75
- reserved.
76
- """
77
- raise NotImplementedError(f"'reserve' not implemented for {self.__class__.__name__}")
71
+ self.lock = _threading.RLock()
72
+
73
+ # def reserve(self):
74
+ # """
75
+ # Send a reservation request for a resource and wait for it to be
76
+ # reserved.
77
+ # """
78
+ # raise NotImplementedError(f"'reserve' not implemented for {self.__class__.__name__}")
79
+ #
80
+ # def connection(self):
81
+ # """
82
+ # Return an atex.ssh.SSHConn instance configured for connection to
83
+ # the reserved resource, but not yet connected.
84
+ # """
85
+ # raise NotImplementedError(f"'connection' not implemented for {self.__class__.__name__}")
86
+ #
87
+ # def release(self):
88
+ # """
89
+ # Release a reserved resource, or cancel a reservation-in-progress.
90
+ # """
91
+ # raise NotImplementedError(f"'release' not implemented for {self.__class__.__name__}")
92
+ #
93
+ # def alive(self):
94
+ # """
95
+ # Return True if the resource is still reserved, False otherwise.
96
+ # """
97
+ # raise NotImplementedError(f"'alive' not implemented for {self.__class__.__name__}")
98
+
99
+
100
+ class Remote(_connection.Connection):
101
+ """
102
+ Representation of a provisioned (reserved) remote system, providing
103
+ a Connection-like API in addition system management helpers.
104
+
105
+ An instance of Remote is typically prepared by a Provisioner and given
106
+ away for further use, to be .release()d by the user. It is not meant
107
+ for repeated reserve/release cycles, hence the lack of .reserve().
108
+
109
+ Also note that Remote can be used via Context Manager, but does not
110
+ do automatic .release(), the manager only handles the built-in Connection.
111
+ The intention is for a Provisioner to run via its own Contest Manager and
112
+ release all Remotes upon exit.
113
+ If you need automatic release of one Remote, use a contextlib.ExitStack
114
+ with a callback, or a try/finally block.
115
+ """
78
116
 
79
- def connection(self):
80
- """
81
- Return an atex.ssh.SSHConn instance configured for connection to
82
- the reserved resource, but not yet connected.
83
- """
84
- raise NotImplementedError(f"'connection' not implemented for {self.__class__.__name__}")
117
+ # TODO: pass platform as arg ?
118
+ #def __init__(self, platform, *args, **kwargs):
119
+ # """
120
+ # Initialize a new Remote instance based on a Connection instance.
121
+ # If extending __init__, always call 'super().__init__(conn)' at the top.
122
+ # """
123
+ # self.lock = _threading.RLock()
124
+ # self.platform = platform
85
125
 
86
126
  def release(self):
87
127
  """
88
- Release a reserved resource, or cancel a reservation-in-progress.
128
+ Release (de-provision) the remote resource, freeing resources.
89
129
  """
90
130
  raise NotImplementedError(f"'release' not implemented for {self.__class__.__name__}")
91
131
 
92
132
  def alive(self):
93
133
  """
94
- Return True if the resource is still reserved, False otherwise.
134
+ Return True if the remote resource is still valid and reserved.
95
135
  """
96
136
  raise NotImplementedError(f"'alive' not implemented for {self.__class__.__name__}")
97
137
 
98
138
 
99
- def find_provisioners():
100
- provisioners = []
101
- for info in pkgutil.iter_modules(__spec__.submodule_search_locations):
102
- mod = importlib.import_module(f'.{info.name}', __name__)
103
- # look for Provisioner-derived classes in the module
104
- for attr in dir(mod):
105
- if attr.startswith('_'):
106
- continue
107
- value = getattr(mod, attr)
108
- try:
109
- if issubclass(value, Provisioner):
110
- provisioners.append(attr)
111
- except TypeError:
112
- pass
113
- return provisioners
139
+ _submodules = [
140
+ info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
141
+ ]
142
+
143
+ __all__ = [*_submodules, Provisioner.__name__, Remote.__name__] # noqa: PLE0604
144
+
145
+
146
+ def __dir__():
147
+ return __all__
148
+
149
+
150
+ # lazily import submodules
151
+ def __getattr__(attr):
152
+ if attr in _submodules:
153
+ return _importlib.import_module(f".{attr}", __name__)
154
+ else:
155
+ raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")
@@ -49,3 +49,11 @@ FULLY CUSTOM INSTALLS:
49
49
  - basically virt-install creating a new domain (ignoring any pre-defined ones)
50
50
  - probably shouldn't be used by automation, only for one-VM-at-a-time on user request
51
51
  - (no free memory/disk checking, no libvirt locking, etc.)
52
+
53
+
54
+
55
+ # ssh via ProxyJump allowing ssh keys specification
56
+ ssh \
57
+ -o ProxyCommand='ssh -i /tmp/proxy_sshkey root@3.21.232.206 -W %h:%p' \
58
+ -i /tmp/destination_sshkey \
59
+ root@192.168.123.218
@@ -1,8 +1,8 @@
1
- from .. import Provisioner as _Provisioner
1
+ from .. import base
2
2
  from ... import util, ssh
3
3
 
4
4
 
5
- class LibvirtProvisioner(_Provisioner):
5
+ class LibvirtProvisioner(base.Provisioner):
6
6
  number = 123
7
7
 
8
8
  def reserve(self):
@@ -12,9 +12,9 @@ class LibvirtProvisioner(_Provisioner):
12
12
  # can be overriden by a getter function if you need to keep track
13
13
  # how many times it was accessed
14
14
  def connection(self):
15
- #return {'Hostname': '1.2.3.4', 'User': 'root', 'IdentityFile': ...}
15
+ #return {"Hostname": "1.2.3.4", "User": "root", "IdentityFile": ...}
16
16
  util.debug(f"returning ssh for {self.number}")
17
- return ssh.SSHConn({'Hostname': '1.2.3.4', 'User': 'root'})
17
+ return ssh.SSHConn({"Hostname": "1.2.3.4", "User": "root"})
18
18
 
19
19
  def release(self):
20
20
  util.debug(f"releasing {self.number}")
@@ -0,0 +1,74 @@
1
+ The idea is to use systemd-nspawn containers on the host, binding
2
+ /dev/kvm to each, thus avoiding the need for nested virt as our first layer
3
+ of Contest tests will run in the containers (installing libvirtd, etc.)
4
+ and the second layer (VMs created by tests) will use virtual machines,
5
+ via a non-nested HVM.
6
+
7
+ systemd-nspawn containers can have CPU core limits, memory limits, etc.
8
+ done via cgroups, so we can provide some level of isolation/safety.
9
+
10
+
11
+ systemd-nspawn can create its own veth via --network-veth=... and put it into
12
+ a bridge automatically via --network-bridge=...
13
+
14
+ We can then use NetworkManager + firewalld to pre-create a bridge with built-in
15
+ DHCP and NAT to the outside, via something like
16
+
17
+ nmcli connection add type bridge ifname br0 con-name br0 ipv4.method shared ipv6.method ignore
18
+
19
+ According to https://fedoramagazine.org/internet-connection-sharing-networkmanager/
20
+ the ipv4.method=shared :
21
+
22
+ enables IP forwarding for the interface;
23
+ adds firewall rules and enables masquerading;
24
+ starts dnsmasq as a DHCP and DNS server.
25
+
26
+ Specifically it should add MASQUERADE on packets *outgoing* from the bridge subnet,
27
+ so shouldn't need any modification of the upstream eth0 device or any fw rules tied to it.
28
+
29
+ There also seems to be ipv4.addresses 192.168.42.1/24 to modify the subnet?
30
+
31
+ If that doesn't work, firewalld has an External zone that has <masquerade/>
32
+ by default, so
33
+
34
+ nmcli connection modify br0 connection.zone external
35
+
36
+ should work.
37
+
38
+
39
+ --------
40
+
41
+ TODO: We need some way to get DHCP leases for started containers (so we can connect
42
+ to the containerized sshd).
43
+
44
+ If there is no command for it via nmcli, it should be possible to just
45
+ extract it from wherever NetworkManager pointed dnsmasq to store its leases file.
46
+
47
+ We can then probably correlate --network-veth=... device from systemd-nspawn
48
+ (named after --machine=... name, prefixed with ve-* or vb-* if --network-bridge=* is used)
49
+ to the leased IP address.
50
+
51
+ ls -l /var/lib/NetworkManager/dnsmasq-*.leases
52
+
53
+ Or perhaps parse it out of 'ip neigh' to make sure the guest is *really* up.
54
+ - 'ip neigh' gives us MAC-to-IP, but device is always br0
55
+ - 'ip link show dev vb-contname' should give us the MAC for 'ip neigh'
56
+ - if container veth endpoint uses different mac, we can query bridge forward DB
57
+ via 'bridge fdb' to get all MACs that appeared on the veth
58
+
59
+ --------
60
+
61
+ Containers can be installed via ie.
62
+
63
+ dnf --releasever=41 --installroot=/var/lib/machines/f41 --use-host-config \
64
+ --setopt=install_weak_deps=False \
65
+ install \
66
+ passwd dnf fedora-release vim-minimal util-linux systemd NetworkManager
67
+
68
+ where --use-host-config re-uses host repositories.
69
+
70
+ Maybe consider 'machinectl'-managed containers (start/terminate/kill/reboot/etc.)
71
+ which are just repackaged systemd-nspawn@ services.
72
+ - Especially since there is no concept of "throw away disk snapshot with container exit",
73
+ we always need some copy/clone of the --installroot for each instance of the container,
74
+ so using ie. 'machinectl clone ...' would provide a nice interface for it.
@@ -0,0 +1,59 @@
1
+
2
+ making a podman image from the currently installed OS:
3
+
4
+ 1) dnf install into a separate installroot
5
+
6
+ dnf
7
+ --installroot=$INSTALLROOT \
8
+ --setopt=install_weak_deps=False \
9
+ --setopt=tsflags=nodocs \
10
+ -y groupinstall minimal-environment
11
+
12
+ as root (doesn't work well with unshare, maybe could work via bwrap (bubblewrap))
13
+
14
+ maybe the unprivileged solution is pulling image from hub + installing @minimal-environment
15
+ into it (perhaps via podman build)
16
+
17
+
18
+ 2) post process it
19
+
20
+ echo -n > "$INSTALLROOT/etc/machine-id"
21
+ echo container > "$INSTALLROOT/etc/hostname"
22
+
23
+ rm -rf "$INSTALLROOT/etc/yum.repos.d"
24
+ cp -f /etc/yum.repos.d/* "$INSTALLROOT/etc/yum.repos.d/."
25
+ cp -f /etc/pki/rpm-gpg/* "$INSTALLROOT/etc/pki/rpm-gpg/."
26
+
27
+ echo install_weak_deps=False >> "$INSTALLROOT/etc/dnf/dnf.conf"
28
+ echo tsflags=nodocs >> "$INSTALLROOT/etc/dnf/dnf.conf"
29
+
30
+ ln -sf \
31
+ /usr/lib/systemd/system/multi-user.target \
32
+ "$INSTALLROOT/etc/systemd/system/default.target"
33
+
34
+ # disable auditd
35
+ # disable other services
36
+ # set root password
37
+
38
+ dnf clean all --installroot="$INSTALLROOT"
39
+
40
+
41
+ 3) pack it
42
+
43
+ tar --xattrs -C "$INSTALLROOT" -cvf tarball.tar .
44
+
45
+ rm -rf "$INSTALLROOT"
46
+
47
+
48
+ 4) import it to podman
49
+
50
+ podman import --change 'CMD ["/sbin/init"]' tarball.tar my-image-name
51
+
52
+
53
+ 5) run it
54
+
55
+ podman {run,create} --systemd=always --cgroups=split ...
56
+
57
+
58
+
59
+ ------------------------------
@@ -0,0 +1,74 @@
1
+ #!/bin/bash
2
+
3
+ if [[ $# -lt 1 ]]; then
4
+ echo "usage: $0 <podman-image-name>" >&2
5
+ exit 1
6
+ fi
7
+ image_name="$1"
8
+
9
+ set -e -x
10
+
11
+ tmpdir=$(mktemp -d -p /var/tmp)
12
+ trap "rm -rf '$tmpdir'" EXIT
13
+
14
+ installroot="$tmpdir/root"
15
+
16
+ dnf \
17
+ --installroot="$installroot" \
18
+ --setopt=install_weak_deps=False \
19
+ --setopt=tsflags=nodocs \
20
+ -q -y groupinstall minimal-environment
21
+
22
+ echo -n > "$installroot/etc/machine-id"
23
+ #echo container > "$installroot/etc/hostname"
24
+
25
+ cp -f /etc/yum.repos.d/* "$installroot/etc/yum.repos.d/."
26
+ cp -f /etc/pki/rpm-gpg/* "$installroot/etc/pki/rpm-gpg/."
27
+
28
+ echo install_weak_deps=False >> "$installroot/etc/dnf/dnf.conf"
29
+ echo tsflags=nodocs >> "$installroot/etc/dnf/dnf.conf"
30
+
31
+ ln -sf \
32
+ /usr/lib/systemd/system/multi-user.target \
33
+ "$installroot/etc/systemd/system/default.target"
34
+
35
+ systemctl --root="$installroot" disable \
36
+ auditd.service crond.service rhsmcertd.service sshd.service
37
+
38
+ #encrypted=$(openssl passwd -6 somepass)
39
+ #usermod --root="$installroot" --password "$encrypted" root
40
+
41
+ dnf clean packages --installroot="$installroot"
42
+
43
+ tar --xattrs -C "$installroot" -cf "$tmpdir/packed.tar" .
44
+
45
+ rm -rf "$installroot"
46
+
47
+ podman import \
48
+ --change 'CMD ["/sbin/init"]' \
49
+ "$tmpdir/packed.tar" "$image_name"
50
+
51
+ # start as
52
+ # podmn {run,create} --systemd=always --cgroups=split --device /dev/kvm ...
53
+ #
54
+ # podman run -t -i \
55
+ # --systemd=always --cgroups=split \
56
+ # --device /dev/kvm \
57
+ # --network=bridge \
58
+ # --cap-add NET_ADMIN --cap-add NET_RAW --cap-add SYS_MODULE \
59
+ # --mount type=bind,src=/lib/modules,dst=/lib/modules,ro \
60
+ # --mount type=bind,src=/proc/sys/net,dst=/proc/sys/net,rw \
61
+ # my_container
62
+ #
63
+ # as unprivileged user:
64
+ # podman run -t -i \
65
+ # --systemd=always --cgroups=split --network=bridge --privileged \
66
+ # my_container
67
+ #
68
+ # container setup:
69
+ # dnf -y install libvirt-daemon qemu-kvm libvirt-client libvirt-daemon-driver-qemu virt-install libvirt-daemon-driver-storage libvirt-daemon-config-network
70
+ # echo $'user = "root"\ngroup = "root"\nremember_owner = 0' >> /etc/libvirt/qemu.conf
71
+ # systemctl start virtqemud.socket virtstoraged.socket virtnetworkd.socket
72
+ # virsh net-start default
73
+ # virt-install --install fedora40 --disk /var/lib/libvirt/images/foo.qcow2,size=20 --console pty --check disk_size=off --unattended --graphics none
74
+
@@ -0,0 +1,29 @@
1
+ #from ... import connection
2
+
3
+ from .. import Provisioner, Remote
4
+
5
+ #from . import api
6
+
7
+
8
+ class TestingFarmRemote(Remote):
9
+ def __init__(self, connection, request):
10
+ """
11
+ 'connection' is a class Connection instance.
12
+
13
+ 'request' is a testing farm Request class instance.
14
+ """
15
+ super().__init__(connection)
16
+ self.request = request
17
+ self.valid = True
18
+
19
+ def release(self):
20
+ self.disconnect()
21
+ self.request.cancel()
22
+ self.valid = False
23
+
24
+ def alive(self):
25
+ return self.valid
26
+
27
+
28
+ class TestingFarmProvisioner(Provisioner):
29
+ pass