atex 0.8__py3-none-any.whl → 0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atex/aggregator/__init__.py +60 -0
- atex/aggregator/json.py +96 -0
- atex/cli/__init__.py +11 -1
- atex/cli/fmf.py +73 -23
- atex/cli/libvirt.py +128 -0
- atex/cli/testingfarm.py +60 -3
- atex/connection/__init__.py +13 -11
- atex/connection/podman.py +61 -0
- atex/connection/ssh.py +38 -47
- atex/executor/executor.py +144 -119
- atex/executor/reporter.py +66 -71
- atex/executor/scripts.py +13 -5
- atex/executor/testcontrol.py +43 -30
- atex/fmf.py +94 -74
- atex/orchestrator/__init__.py +76 -2
- atex/orchestrator/adhoc.py +465 -0
- atex/{provision → provisioner}/__init__.py +54 -42
- atex/provisioner/libvirt/__init__.py +2 -0
- atex/provisioner/libvirt/libvirt.py +472 -0
- atex/provisioner/libvirt/locking.py +170 -0
- atex/{provision → provisioner}/libvirt/setup-libvirt.sh +21 -1
- atex/provisioner/podman/__init__.py +2 -0
- atex/provisioner/podman/podman.py +169 -0
- atex/{provision → provisioner}/testingfarm/api.py +121 -69
- atex/{provision → provisioner}/testingfarm/testingfarm.py +44 -52
- atex/util/libvirt.py +18 -0
- atex/util/log.py +53 -43
- atex/util/named_mapping.py +158 -0
- atex/util/subprocess.py +46 -12
- atex/util/threads.py +71 -20
- atex-0.10.dist-info/METADATA +86 -0
- atex-0.10.dist-info/RECORD +44 -0
- atex/orchestrator/aggregator.py +0 -106
- atex/orchestrator/orchestrator.py +0 -324
- atex/provision/libvirt/__init__.py +0 -24
- atex/provision/podman/README +0 -59
- atex/provision/podman/host_container.sh +0 -74
- atex-0.8.dist-info/METADATA +0 -197
- atex-0.8.dist-info/RECORD +0 -37
- /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
- /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
- {atex-0.8.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
|
@@ -8,73 +8,64 @@ from .. import Provisioner, Remote
|
|
|
8
8
|
from . import api
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
class TestingFarmRemote(Remote, connection.ssh.
|
|
11
|
+
class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConnection):
|
|
12
12
|
"""
|
|
13
13
|
Built on the official Remote API, pulling in the Connection API
|
|
14
|
-
as implemented by
|
|
14
|
+
as implemented by ManagedSSHConnection.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
def __init__(self, ssh_options, *, release_hook
|
|
17
|
+
def __init__(self, request_id, ssh_options, *, release_hook):
|
|
18
18
|
"""
|
|
19
|
-
'
|
|
19
|
+
'request_id' is a string with Testing Farm request UUID (for printouts).
|
|
20
|
+
|
|
21
|
+
'ssh_options' are a dict, passed to ManagedSSHConnection __init__().
|
|
20
22
|
|
|
21
23
|
'release_hook' is a callable called on .release() in addition
|
|
22
24
|
to disconnecting the connection.
|
|
23
25
|
"""
|
|
24
|
-
#
|
|
26
|
+
# NOTE: self.lock inherited from ManagedSSHConnection
|
|
25
27
|
super().__init__(options=ssh_options)
|
|
28
|
+
self.request_id = request_id
|
|
26
29
|
self.release_hook = release_hook
|
|
27
|
-
self.provisioner = provisioner
|
|
28
|
-
self.lock = threading.RLock()
|
|
29
30
|
self.release_called = False
|
|
30
31
|
|
|
31
32
|
def release(self):
|
|
32
33
|
with self.lock:
|
|
33
|
-
if
|
|
34
|
-
self.release_called = True
|
|
35
|
-
else:
|
|
34
|
+
if self.release_called:
|
|
36
35
|
return
|
|
37
|
-
|
|
36
|
+
else:
|
|
37
|
+
self.release_called = True
|
|
38
38
|
self.disconnect()
|
|
39
|
+
self.release_hook(self)
|
|
39
40
|
|
|
40
41
|
# not /technically/ a valid repr(), but meh
|
|
41
42
|
def __repr__(self):
|
|
42
43
|
class_name = self.__class__.__name__
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
# return self.valid
|
|
49
|
-
|
|
50
|
-
# TODO: def __str__(self): as root@1.2.3.4 and arch, ranch, etc.
|
|
44
|
+
ssh_user = self.options.get("User", "unknown")
|
|
45
|
+
ssh_host = self.options.get("Hostname", "unknown")
|
|
46
|
+
ssh_port = self.options.get("Port", "unknown")
|
|
47
|
+
ssh_key = self.options.get("IdentityFile", "unknown")
|
|
48
|
+
return f"{class_name}({ssh_user}@{ssh_host}:{ssh_port}@{ssh_key}, {self.request_id})"
|
|
51
49
|
|
|
52
50
|
|
|
53
51
|
class TestingFarmProvisioner(Provisioner):
|
|
54
|
-
|
|
55
|
-
# starts at default and scales up/down as needed
|
|
52
|
+
absolute_max_remotes = 100
|
|
56
53
|
|
|
57
|
-
def __init__(self, compose, arch="x86_64", *,
|
|
54
|
+
def __init__(self, compose, arch="x86_64", *, max_retries=10, **reserve_kwargs):
|
|
58
55
|
"""
|
|
59
56
|
'compose' is a Testing Farm compose to prepare.
|
|
60
57
|
|
|
61
58
|
'arch' is an architecture associated with the compose.
|
|
62
59
|
|
|
63
|
-
'max_systems' is an int of how many systems to reserve (and keep
|
|
64
|
-
reserved) in an internal pool.
|
|
65
|
-
|
|
66
|
-
'timeout' is the maximum Testing Farm pipeline timeout (waiting for
|
|
67
|
-
a system + OS installation + reservation time).
|
|
68
|
-
|
|
69
60
|
'max_retries' is a maximum number of provisioning (Testing Farm) errors
|
|
70
61
|
that will be reprovisioned before giving up.
|
|
71
62
|
"""
|
|
72
|
-
|
|
73
|
-
self.compose = compose
|
|
63
|
+
self.lock = threading.RLock()
|
|
64
|
+
self.compose = compose
|
|
74
65
|
self.arch = arch
|
|
75
|
-
self.
|
|
76
|
-
self.timeout = timeout
|
|
66
|
+
self.reserve_kwargs = reserve_kwargs
|
|
77
67
|
self.retries = max_retries
|
|
68
|
+
|
|
78
69
|
self._tmpdir = None
|
|
79
70
|
self.ssh_key = self.ssh_pubkey = None
|
|
80
71
|
self.queue = util.ThreadQueue(daemon=True)
|
|
@@ -104,7 +95,9 @@ class TestingFarmProvisioner(Provisioner):
|
|
|
104
95
|
"Hostname": machine.host,
|
|
105
96
|
"User": machine.user,
|
|
106
97
|
"Port": machine.port,
|
|
107
|
-
"IdentityFile": machine.ssh_key,
|
|
98
|
+
"IdentityFile": machine.ssh_key.absolute(),
|
|
99
|
+
"ConnectionAttempts": "1000",
|
|
100
|
+
"Compression": "yes",
|
|
108
101
|
}
|
|
109
102
|
|
|
110
103
|
def release_hook(remote):
|
|
@@ -118,9 +111,9 @@ class TestingFarmProvisioner(Provisioner):
|
|
|
118
111
|
tf_reserve.release()
|
|
119
112
|
|
|
120
113
|
remote = TestingFarmRemote(
|
|
114
|
+
tf_reserve.request.id,
|
|
121
115
|
ssh_options,
|
|
122
116
|
release_hook=release_hook,
|
|
123
|
-
provisioner=self,
|
|
124
117
|
)
|
|
125
118
|
remote.connect()
|
|
126
119
|
|
|
@@ -139,9 +132,9 @@ class TestingFarmProvisioner(Provisioner):
|
|
|
139
132
|
tf_reserve = api.Reserve(
|
|
140
133
|
compose=self.compose,
|
|
141
134
|
arch=self.arch,
|
|
142
|
-
timeout=self.timeout,
|
|
143
135
|
ssh_key=self.ssh_key,
|
|
144
136
|
api=self.tf_api,
|
|
137
|
+
**self.reserve_kwargs,
|
|
145
138
|
)
|
|
146
139
|
|
|
147
140
|
# add it to self.reserving even before we schedule a provision,
|
|
@@ -152,29 +145,24 @@ class TestingFarmProvisioner(Provisioner):
|
|
|
152
145
|
# start a background wait
|
|
153
146
|
self.queue.start_thread(
|
|
154
147
|
target=self._wait_for_reservation,
|
|
155
|
-
|
|
148
|
+
target_args=(tf_reserve, initial_delay),
|
|
156
149
|
)
|
|
157
150
|
|
|
158
151
|
def start(self):
|
|
159
152
|
with self.lock:
|
|
160
153
|
self._tmpdir = tempfile.TemporaryDirectory()
|
|
161
154
|
self.ssh_key, self.ssh_pubkey = util.ssh_keygen(self._tmpdir.name)
|
|
162
|
-
# start up all initial reservations
|
|
163
|
-
for i in range(self.max_systems):
|
|
164
|
-
delay = (api.API_QUERY_DELAY / self.max_systems) * i
|
|
165
|
-
#self.queue.start_thread(target=self._schedule_one_reservation, args=(delay,))
|
|
166
|
-
self._schedule_one_reservation(delay)
|
|
167
155
|
|
|
168
156
|
def stop(self):
|
|
169
157
|
with self.lock:
|
|
170
158
|
# abort reservations in progress
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
159
|
+
while self.reserving:
|
|
160
|
+
# testingfarm api.Reserve instances
|
|
161
|
+
self.reserving.pop().release()
|
|
174
162
|
# cancel/release all Remotes ever created by us
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
163
|
+
while self.remotes:
|
|
164
|
+
# TestingFarmRemote instances
|
|
165
|
+
self.remotes.pop().release()
|
|
178
166
|
# explicitly remove the tmpdir rather than relying on destructor
|
|
179
167
|
self._tmpdir.cleanup()
|
|
180
168
|
self._tmpdir = None
|
|
@@ -190,14 +178,18 @@ class TestingFarmProvisioner(Provisioner):
|
|
|
190
178
|
self._tmpdir = None
|
|
191
179
|
return callables
|
|
192
180
|
|
|
193
|
-
def
|
|
194
|
-
# fill .release()d remotes back up with reservations
|
|
181
|
+
def provision(self, count=1):
|
|
195
182
|
with self.lock:
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
183
|
+
reservations = len(self.remotes) + len(self.reserving)
|
|
184
|
+
# clamp count to absolute_max_remotes
|
|
185
|
+
if count + reservations > self.absolute_max_remotes:
|
|
186
|
+
count = self.absolute_max_remotes - reservations
|
|
187
|
+
# spread out the request submissions
|
|
188
|
+
for i in range(count):
|
|
189
|
+
delay = (api.Request.api_query_limit / count) * i
|
|
199
190
|
self._schedule_one_reservation(delay)
|
|
200
191
|
|
|
192
|
+
def get_remote(self, block=True):
|
|
201
193
|
while True:
|
|
202
194
|
# otherwise wait on a queue of Remotes being provisioned
|
|
203
195
|
try:
|
atex/util/libvirt.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def import_libvirt():
|
|
5
|
+
try:
|
|
6
|
+
libvirt = importlib.import_module("libvirt")
|
|
7
|
+
except ModuleNotFoundError:
|
|
8
|
+
raise ModuleNotFoundError(
|
|
9
|
+
"No module named 'libvirt', you need to install it from the package"
|
|
10
|
+
" manager of your distro, ie. 'dnf install python3-libvirt' as it"
|
|
11
|
+
" requires distro-wide headers to compile. It won't work from PyPI."
|
|
12
|
+
" If using venv, create it with '--system-site-packages'.",
|
|
13
|
+
) from None
|
|
14
|
+
|
|
15
|
+
# suppress console error printing, behave like a good python module
|
|
16
|
+
libvirt.registerErrorHandler(lambda _ctx, _err: None, None)
|
|
17
|
+
|
|
18
|
+
return libvirt
|
atex/util/log.py
CHANGED
|
@@ -1,61 +1,71 @@
|
|
|
1
|
-
import
|
|
1
|
+
import os
|
|
2
2
|
import logging
|
|
3
|
-
|
|
3
|
+
import inspect
|
|
4
4
|
|
|
5
5
|
_logger = logging.getLogger("atex")
|
|
6
6
|
|
|
7
|
+
# which functions to skip when determining the logger function caller;
|
|
8
|
+
# typically, these are wrappers and we want to see their caller in the trace
|
|
9
|
+
# instead of them
|
|
10
|
+
#
|
|
11
|
+
# ( file basename , qualname )
|
|
12
|
+
# where qualname is '<module>' or 'funcname' or 'Classname.funcname'
|
|
13
|
+
skip_levels = {
|
|
14
|
+
("subprocess.py", "subprocess_run"),
|
|
15
|
+
("subprocess.py", "subprocess_output"),
|
|
16
|
+
("subprocess.py", "subprocess_Popen"),
|
|
17
|
+
("subprocess.py", "subprocess_stream"),
|
|
18
|
+
("subprocess.py", "subprocess_log"),
|
|
7
19
|
|
|
8
|
-
|
|
9
|
-
"""
|
|
10
|
-
Return True if the root logger is using the DEBUG (or more verbose) level.
|
|
11
|
-
"""
|
|
12
|
-
root_level = logging.getLogger().level
|
|
13
|
-
return root_level > 0 and root_level <= logging.DEBUG
|
|
20
|
+
("podman.py", "PodmanConnection.cmd"),
|
|
21
|
+
("podman.py", "PodmanConnection.rsync"),
|
|
14
22
|
|
|
23
|
+
("ssh.py", "StatelessSSHConnection.cmd"),
|
|
24
|
+
("ssh.py", "StatelessSSHConnection.rsync"),
|
|
25
|
+
("ssh.py", "ManagedSSHConnection.forward"),
|
|
26
|
+
("ssh.py", "ManagedSSHConnection.cmd"),
|
|
27
|
+
("ssh.py", "ManagedSSHConnection.rsync"),
|
|
28
|
+
}
|
|
15
29
|
|
|
16
|
-
def _format_msg(msg, *, skip_frames=0):
|
|
17
|
-
stack = inspect.stack()
|
|
18
|
-
if len(stack)-1 <= skip_frames:
|
|
19
|
-
raise SyntaxError("skip_frames exceeds call stack (frame count)")
|
|
20
|
-
stack = stack[skip_frames+1:]
|
|
21
30
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
31
|
+
def _log_msg(logger_func, *args, stacklevel=1, **kwargs):
|
|
32
|
+
# inspect.stack() is MUCH slower
|
|
33
|
+
caller = inspect.currentframe().f_back.f_back
|
|
34
|
+
extra_levels = 2 # skip this func and the debug/info/warning parent
|
|
35
|
+
while caller.f_back:
|
|
36
|
+
code = caller.f_code
|
|
37
|
+
# pathlib is much slower
|
|
38
|
+
basename = os.path.basename(code.co_filename) # noqa: PTH119
|
|
39
|
+
qualname = code.co_qualname
|
|
40
|
+
if (basename, qualname) in skip_levels:
|
|
41
|
+
extra_levels += 1
|
|
42
|
+
caller = caller.f_back
|
|
43
|
+
else:
|
|
25
44
|
break
|
|
26
|
-
|
|
45
|
+
return logger_func(*args, stacklevel=stacklevel+extra_levels, **kwargs)
|
|
46
|
+
|
|
27
47
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
function = parent.function
|
|
48
|
+
def warning(*args, **kwargs):
|
|
49
|
+
return _log_msg(_logger.warning, *args, **kwargs)
|
|
31
50
|
|
|
32
|
-
# if the function has 'self' and it looks like a class instance,
|
|
33
|
-
# prepend it to the function name
|
|
34
|
-
p_locals = parent.frame.f_locals
|
|
35
|
-
if "self" in p_locals:
|
|
36
|
-
self = p_locals["self"]
|
|
37
|
-
if hasattr(self, "__class__") and inspect.isclass(self.__class__):
|
|
38
|
-
function = f"{self.__class__.__name__}.{function}"
|
|
39
51
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
parent_modname = parent.frame.f_globals["__name__"]
|
|
43
|
-
# avoid everything having the package name prefixed
|
|
44
|
-
parent_modname = parent_modname.partition(".")[2] or parent_modname
|
|
45
|
-
return f"{parent_modname}.{function}:{parent.lineno}: {msg}"
|
|
46
|
-
elif parent.function != "<module>":
|
|
47
|
-
return f"{function}:{parent.lineno}: {msg}"
|
|
48
|
-
else:
|
|
49
|
-
return f"{Path(parent.filename).name}:{parent.lineno}: {msg}"
|
|
52
|
+
def info(*args, **kwargs):
|
|
53
|
+
return _log_msg(_logger.info, *args, **kwargs)
|
|
50
54
|
|
|
51
55
|
|
|
52
|
-
def debug(
|
|
53
|
-
_logger.debug
|
|
56
|
+
def debug(*args, **kwargs):
|
|
57
|
+
return _log_msg(_logger.debug, *args, **kwargs)
|
|
54
58
|
|
|
55
59
|
|
|
56
|
-
|
|
57
|
-
|
|
60
|
+
# add a log level more verbose than logging.DEBUG, for verbose command
|
|
61
|
+
# outputs, big JSON / XML printouts, and other outputs unsuitable for
|
|
62
|
+
# large parallel runs; to be used in targeted debugging
|
|
63
|
+
#
|
|
64
|
+
# logging.DEBUG is 10, and programs tend to add TRACE as 5, so be somewhere
|
|
65
|
+
# in between
|
|
66
|
+
EXTRADEBUG = 8
|
|
67
|
+
logging.addLevelName(EXTRADEBUG, "EXTRADEBUG")
|
|
58
68
|
|
|
59
69
|
|
|
60
|
-
def
|
|
61
|
-
_logger.
|
|
70
|
+
def extradebug(*args, **kwargs):
|
|
71
|
+
return _log_msg(_logger.log, EXTRADEBUG, *args, **kwargs)
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Provides a namedtuple-inspired frozen mapping-backed data structure.
|
|
3
|
+
|
|
4
|
+
class MyMap(NamedMapping):
|
|
5
|
+
pass
|
|
6
|
+
|
|
7
|
+
m = MyMap(a=123, b=456)
|
|
8
|
+
|
|
9
|
+
m["a"] # 123
|
|
10
|
+
m.a # 123
|
|
11
|
+
m["a"] = 9 # KeyError (is read-only)
|
|
12
|
+
m.a = 9 # AttributeError (is read-only)
|
|
13
|
+
|
|
14
|
+
Like namedtuple, you can specify required keys that always need to be given
|
|
15
|
+
to the constructor:
|
|
16
|
+
|
|
17
|
+
class MyMap(NamedMapping, required=("key1", "key2")):
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
m = MyMap(a=123, b=456, key1=999) # KeyError (key2 not specified)
|
|
21
|
+
|
|
22
|
+
Similarly, you can specify defaults (for required or non-required keys),
|
|
23
|
+
as a dict, that are used if omitted from the constructor:
|
|
24
|
+
|
|
25
|
+
class MyMap(NamedMapping, defaults={"key": 678}):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
m = MyMap() # will have m.key == 678
|
|
29
|
+
|
|
30
|
+
A class instance can unpack via ** with the entirety of its mapping contents:
|
|
31
|
+
|
|
32
|
+
m = MyMap(key2=456)
|
|
33
|
+
both = {'key1': 123, **m} # contains both keys
|
|
34
|
+
|
|
35
|
+
You can also chain (append to) required / default values through inheritance:
|
|
36
|
+
|
|
37
|
+
class MyMap(NamedMapping, required=("key1",), defaults={"key2": 234}):
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
class AnotherMap(MyMap, required=("key3",))
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
m = AnotherMap() # KeyError (key1 and key3 are required)
|
|
44
|
+
|
|
45
|
+
isinstance(m, MyMap) # would be True
|
|
46
|
+
|
|
47
|
+
When instantiating, it is also possible to copy just the required keys from
|
|
48
|
+
another dict-like object (does not have to be a parent of the class):
|
|
49
|
+
|
|
50
|
+
class SmallMap(NamedMapping, required=("key1", "key2")):
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
class BigMap(SmallMap, required=("key3", "key4")):
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
b = BigMap(key1=123, key2=456, key3=789, key4=0)
|
|
57
|
+
|
|
58
|
+
s = SmallMap._from(b) # will copy just key1 and key2
|
|
59
|
+
s = SmallMap._from(b, extra=555) # can pass extra **kwargs to __init__
|
|
60
|
+
s = SmallMap(**b) # will copy all keys
|
|
61
|
+
|
|
62
|
+
Note that this is a fairly basic implementation without __hash__, etc.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
import abc
|
|
66
|
+
import collections
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class _NamedMappingMeta(abc.ABCMeta):
|
|
70
|
+
def __new__(
|
|
71
|
+
metacls, name, bases, namespace, *, required=None, default=None, **kwargs, # noqa: N804
|
|
72
|
+
):
|
|
73
|
+
new_required = []
|
|
74
|
+
for base in bases:
|
|
75
|
+
new_required.extend(getattr(base, "_required", ()))
|
|
76
|
+
if required:
|
|
77
|
+
new_required.extend(required)
|
|
78
|
+
namespace["_required"] = tuple(set(new_required))
|
|
79
|
+
|
|
80
|
+
new_default = {}
|
|
81
|
+
for base in bases:
|
|
82
|
+
new_default.update(getattr(base, "_default", {}))
|
|
83
|
+
if default:
|
|
84
|
+
new_default.update(default)
|
|
85
|
+
namespace["_default"] = new_default
|
|
86
|
+
|
|
87
|
+
return super().__new__(metacls, name, bases, namespace, **kwargs)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class NamedMapping(collections.abc.Mapping, metaclass=_NamedMappingMeta):
|
|
91
|
+
__slots__ = ("_data",)
|
|
92
|
+
|
|
93
|
+
def __init__(self, **keys):
|
|
94
|
+
data = {}
|
|
95
|
+
if hasattr(self, "_default"):
|
|
96
|
+
data.update(self._default)
|
|
97
|
+
data.update(keys)
|
|
98
|
+
if hasattr(self, "_required"):
|
|
99
|
+
for key in self._required:
|
|
100
|
+
if key not in data:
|
|
101
|
+
raise KeyError(f"'{self.__class__.__name__}' requires key '{key}'")
|
|
102
|
+
object.__setattr__(self, "_data", data)
|
|
103
|
+
|
|
104
|
+
@classmethod
|
|
105
|
+
def _from(cls, foreign, **keys):
|
|
106
|
+
"""
|
|
107
|
+
(keys is like for __init__)
|
|
108
|
+
"""
|
|
109
|
+
foreign_data = {}
|
|
110
|
+
if hasattr(cls, "_required"):
|
|
111
|
+
for key in cls._required:
|
|
112
|
+
if key in foreign:
|
|
113
|
+
foreign_data[key] = foreign[key]
|
|
114
|
+
foreign_data.update(keys)
|
|
115
|
+
return cls(**foreign_data)
|
|
116
|
+
|
|
117
|
+
def __getattr__(self, item):
|
|
118
|
+
if item in ("_data", "_required", "_default"):
|
|
119
|
+
return super().__getattr__(item)
|
|
120
|
+
try:
|
|
121
|
+
return self._data[item]
|
|
122
|
+
except KeyError:
|
|
123
|
+
raise AttributeError(
|
|
124
|
+
f"'{self.__class__.__name__}' object has no attribute '{item}'",
|
|
125
|
+
name=item,
|
|
126
|
+
) from None
|
|
127
|
+
|
|
128
|
+
def __setattr__(self, name, value):
|
|
129
|
+
raise AttributeError(
|
|
130
|
+
f"'{self}' is read-only, cannot set '{name}'",
|
|
131
|
+
name=name,
|
|
132
|
+
obj=value,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def __getitem__(self, key):
|
|
136
|
+
return self._data[key]
|
|
137
|
+
|
|
138
|
+
def __setitem__(self, key, value):
|
|
139
|
+
raise ValueError(f"'{self}' is read-only, cannot set '{key}'")
|
|
140
|
+
|
|
141
|
+
def __delitem__(self, key):
|
|
142
|
+
raise ValueError(f"'{self}' is read-only, cannot delete '{key}'")
|
|
143
|
+
|
|
144
|
+
def __contains__(self, key):
|
|
145
|
+
return key in self._data
|
|
146
|
+
|
|
147
|
+
def __iter__(self):
|
|
148
|
+
return iter(self._data)
|
|
149
|
+
|
|
150
|
+
def __len__(self):
|
|
151
|
+
return len(self._data)
|
|
152
|
+
|
|
153
|
+
def __repr__(self):
|
|
154
|
+
return (
|
|
155
|
+
f"{self.__class__.__name__}("
|
|
156
|
+
+ ", ".join((f"{k}={repr(v)}" for k,v in self._data.items()))
|
|
157
|
+
+ ")"
|
|
158
|
+
)
|
atex/util/subprocess.py
CHANGED
|
@@ -1,52 +1,86 @@
|
|
|
1
1
|
import subprocess
|
|
2
2
|
|
|
3
|
-
from .log import debug
|
|
3
|
+
from .log import debug, extradebug
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
def subprocess_run(cmd,
|
|
6
|
+
def subprocess_run(cmd, **kwargs):
|
|
7
7
|
"""
|
|
8
8
|
A simple wrapper for the real subprocess.run() that logs the command used.
|
|
9
9
|
"""
|
|
10
10
|
# when logging, skip current stack frame - report the place we were called
|
|
11
11
|
# from, not util.subprocess_run itself
|
|
12
|
-
debug(f"running: {cmd}
|
|
12
|
+
debug(f"running: '{cmd}' with {kwargs=}")
|
|
13
13
|
return subprocess.run(cmd, **kwargs)
|
|
14
14
|
|
|
15
15
|
|
|
16
|
-
def subprocess_output(cmd, *,
|
|
16
|
+
def subprocess_output(cmd, *, check=True, text=True, **kwargs):
|
|
17
17
|
"""
|
|
18
18
|
A wrapper simulating subprocess.check_output() via a modern .run() API.
|
|
19
19
|
"""
|
|
20
|
-
debug(f"running: {cmd}
|
|
20
|
+
debug(f"running: '{cmd}' with {check=}, {text=} and {kwargs=}")
|
|
21
21
|
proc = subprocess.run(cmd, check=check, text=text, stdout=subprocess.PIPE, **kwargs)
|
|
22
22
|
return proc.stdout.rstrip("\n") if text else proc.stdout
|
|
23
23
|
|
|
24
24
|
|
|
25
|
-
def subprocess_Popen(cmd,
|
|
25
|
+
def subprocess_Popen(cmd, **kwargs): # noqa: N802
|
|
26
26
|
"""
|
|
27
27
|
A simple wrapper for the real subprocess.Popen() that logs the command used.
|
|
28
28
|
"""
|
|
29
|
-
debug(f"running: {cmd}
|
|
29
|
+
debug(f"running: '{cmd}' with {kwargs=}")
|
|
30
30
|
return subprocess.Popen(cmd, **kwargs)
|
|
31
31
|
|
|
32
32
|
|
|
33
|
-
def subprocess_stream(cmd, *, check=False,
|
|
33
|
+
def subprocess_stream(cmd, *, stream="stdout", check=False, input=None, **kwargs):
|
|
34
34
|
"""
|
|
35
35
|
Run 'cmd' via subprocess.Popen() and return an iterator over any lines
|
|
36
36
|
the command outputs on stdout, in text mode.
|
|
37
37
|
|
|
38
|
+
The 'stream' is a subprocess.Popen attribute (either 'stdout' or 'stderr')
|
|
39
|
+
to read from.
|
|
40
|
+
To capture both stdout and stderr as yielded lines, use 'stream="stdout"'
|
|
41
|
+
and pass an additional 'stderr=subprocess.STDOUT'.
|
|
42
|
+
|
|
38
43
|
With 'check' set to True, raise a CalledProcessError if the 'cmd' failed.
|
|
39
44
|
|
|
40
|
-
|
|
45
|
+
Similarly, 'input' simulates the 'input' arg of subprocess.run().
|
|
46
|
+
Note that the input is written to stdin of the process *before* any outputs
|
|
47
|
+
are streamed, so it should be sufficiently small and/or not cause a deadlock
|
|
48
|
+
with the process waiting for outputs to be read before consuming more input.
|
|
49
|
+
Use 'stdin=subprocess.PIPE' and write to it manually if you need more.
|
|
41
50
|
"""
|
|
42
|
-
|
|
43
|
-
|
|
51
|
+
all_kwargs = {
|
|
52
|
+
"text": True,
|
|
53
|
+
stream: subprocess.PIPE,
|
|
54
|
+
}
|
|
55
|
+
if input is not None:
|
|
56
|
+
all_kwargs["stdin"] = subprocess.PIPE
|
|
57
|
+
all_kwargs |= kwargs
|
|
58
|
+
|
|
59
|
+
debug(f"running: '{cmd}' with {all_kwargs=}")
|
|
60
|
+
proc = subprocess.Popen(cmd, **all_kwargs)
|
|
44
61
|
|
|
45
62
|
def generate_lines():
|
|
46
|
-
|
|
63
|
+
if input is not None:
|
|
64
|
+
proc.stdin.write(input)
|
|
65
|
+
proc.stdin.close()
|
|
66
|
+
line_stream = getattr(proc, stream)
|
|
67
|
+
for line in line_stream:
|
|
47
68
|
yield line.rstrip("\n")
|
|
48
69
|
code = proc.wait()
|
|
49
70
|
if code > 0 and check:
|
|
50
71
|
raise subprocess.CalledProcessError(cmd=cmd, returncode=code)
|
|
51
72
|
|
|
52
73
|
return (proc, generate_lines())
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def subprocess_log(cmd, **kwargs):
|
|
77
|
+
"""
|
|
78
|
+
A wrapper to stream every (text) line output from the process to the
|
|
79
|
+
logging module.
|
|
80
|
+
|
|
81
|
+
Uses subprocess_stream() to gather the lines.
|
|
82
|
+
"""
|
|
83
|
+
debug(f"running: '{cmd}' with {kwargs=}")
|
|
84
|
+
_, lines = subprocess_stream(cmd, **kwargs)
|
|
85
|
+
for line in lines:
|
|
86
|
+
extradebug(line)
|