atex 0.7__py3-none-any.whl → 0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. atex/cli/fmf.py +143 -0
  2. atex/cli/libvirt.py +127 -0
  3. atex/cli/testingfarm.py +35 -13
  4. atex/connection/__init__.py +13 -19
  5. atex/connection/podman.py +63 -0
  6. atex/connection/ssh.py +34 -52
  7. atex/executor/__init__.py +2 -0
  8. atex/executor/duration.py +60 -0
  9. atex/executor/executor.py +402 -0
  10. atex/executor/reporter.py +101 -0
  11. atex/{minitmt → executor}/scripts.py +37 -25
  12. atex/{minitmt → executor}/testcontrol.py +54 -42
  13. atex/fmf.py +237 -0
  14. atex/orchestrator/__init__.py +3 -59
  15. atex/orchestrator/aggregator.py +82 -134
  16. atex/orchestrator/orchestrator.py +385 -0
  17. atex/provision/__init__.py +74 -105
  18. atex/provision/libvirt/__init__.py +2 -24
  19. atex/provision/libvirt/libvirt.py +465 -0
  20. atex/provision/libvirt/locking.py +168 -0
  21. atex/provision/libvirt/setup-libvirt.sh +21 -1
  22. atex/provision/podman/__init__.py +1 -0
  23. atex/provision/podman/podman.py +274 -0
  24. atex/provision/testingfarm/__init__.py +2 -29
  25. atex/provision/testingfarm/api.py +123 -65
  26. atex/provision/testingfarm/testingfarm.py +234 -0
  27. atex/util/__init__.py +1 -6
  28. atex/util/libvirt.py +18 -0
  29. atex/util/log.py +31 -8
  30. atex/util/named_mapping.py +158 -0
  31. atex/util/path.py +16 -0
  32. atex/util/ssh_keygen.py +14 -0
  33. atex/util/threads.py +99 -0
  34. atex-0.9.dist-info/METADATA +178 -0
  35. atex-0.9.dist-info/RECORD +43 -0
  36. atex/cli/minitmt.py +0 -175
  37. atex/minitmt/__init__.py +0 -23
  38. atex/minitmt/executor.py +0 -348
  39. atex/minitmt/fmf.py +0 -202
  40. atex/provision/nspawn/README +0 -74
  41. atex/provision/podman/README +0 -59
  42. atex/provision/podman/host_container.sh +0 -74
  43. atex/provision/testingfarm/foo.py +0 -1
  44. atex-0.7.dist-info/METADATA +0 -102
  45. atex-0.7.dist-info/RECORD +0 -32
  46. {atex-0.7.dist-info → atex-0.9.dist-info}/WHEEL +0 -0
  47. {atex-0.7.dist-info → atex-0.9.dist-info}/entry_points.txt +0 -0
  48. {atex-0.7.dist-info → atex-0.9.dist-info}/licenses/COPYING.txt +0 -0
@@ -0,0 +1,234 @@
1
+ import time
2
+ import tempfile
3
+ import threading
4
+
5
+ from ... import connection, util
6
+ from .. import Provisioner, Remote
7
+
8
+ from . import api
9
+
10
+
11
+ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConn):
12
+ """
13
+ Built on the official Remote API, pulling in the Connection API
14
+ as implemented by ManagedSSHConn.
15
+ """
16
+
17
+ def __init__(self, request_id, ssh_options, *, release_hook):
18
+ """
19
+ 'request_id' is a string with Testing Farm request UUID (for printouts).
20
+
21
+ 'ssh_options' are a dict, passed to ManagedSSHConn __init__().
22
+
23
+ 'release_hook' is a callable called on .release() in addition
24
+ to disconnecting the connection.
25
+ """
26
+ # NOTE: self.lock inherited from ManagedSSHConn
27
+ super().__init__(options=ssh_options)
28
+ self.request_id = request_id
29
+ self.release_hook = release_hook
30
+ self.release_called = False
31
+
32
+ def release(self):
33
+ with self.lock:
34
+ if self.release_called:
35
+ return
36
+ else:
37
+ self.release_called = True
38
+ self.release_hook(self)
39
+ self.disconnect()
40
+
41
+ # not /technically/ a valid repr(), but meh
42
+ def __repr__(self):
43
+ class_name = self.__class__.__name__
44
+ ssh_user = self.options.get("User", "unknown")
45
+ ssh_host = self.options.get("Hostname", "unknown")
46
+ ssh_port = self.options.get("Port", "unknown")
47
+ ssh_key = self.options.get("IdentityFile", "unknown")
48
+ return f"{class_name}({ssh_user}@{ssh_host}:{ssh_port}@{ssh_key}, {self.request_id})"
49
+
50
+
51
+ class TestingFarmProvisioner(Provisioner):
52
+ # TODO: have max_systems as (min,default,max) tuple; have an algorithm that
53
+ # starts at default and scales up/down as needed
54
+
55
+ def __init__(self, compose, arch="x86_64", *, max_systems=1, max_retries=10, **reserve_kwargs):
56
+ """
57
+ 'compose' is a Testing Farm compose to prepare.
58
+
59
+ 'arch' is an architecture associated with the compose.
60
+
61
+ 'max_systems' is an int of how many systems to reserve (and keep
62
+ reserved) in an internal pool.
63
+
64
+ 'max_retries' is a maximum number of provisioning (Testing Farm) errors
65
+ that will be reprovisioned before giving up.
66
+ """
67
+ self.lock = threading.RLock()
68
+ self.compose = compose
69
+ self.arch = arch
70
+ self.max_systems = max_systems
71
+ self.reserve_kwargs = reserve_kwargs
72
+ self.retries = max_retries
73
+
74
+ self._tmpdir = None
75
+ self.ssh_key = self.ssh_pubkey = None
76
+ self.queue = util.ThreadQueue(daemon=True)
77
+ self.tf_api = api.TestingFarmAPI()
78
+
79
+ # TF Reserve instances (not Remotes) actively being provisioned,
80
+ # in case we need to call their .release() on abort
81
+ self.reserving = []
82
+
83
+ # active TestingFarmRemote instances, ready to be handed over to the user,
84
+ # or already in use by the user
85
+ self.remotes = []
86
+
87
+ def _wait_for_reservation(self, tf_reserve, initial_delay):
88
+ # assuming this function will be called many times, attempt to
89
+ # distribute load on TF servers
90
+ # (we can sleep here as this code is running in a separate thread)
91
+ if initial_delay:
92
+ util.debug(f"delaying for {initial_delay}s to distribute load")
93
+ time.sleep(initial_delay)
94
+
95
+ # 'machine' is api.Reserve.ReservedMachine namedtuple
96
+ machine = tf_reserve.reserve()
97
+
98
+ # connect our Remote to the machine via its class Connection API
99
+ ssh_options = {
100
+ "Hostname": machine.host,
101
+ "User": machine.user,
102
+ "Port": machine.port,
103
+ "IdentityFile": machine.ssh_key,
104
+ "ConnectionAttempts": "1000",
105
+ "Compression": "yes",
106
+ }
107
+
108
+ def release_hook(remote):
109
+ # remove from the list of remotes inside this Provisioner
110
+ with self.lock:
111
+ try:
112
+ self.remotes.remove(remote)
113
+ except ValueError:
114
+ pass
115
+ # call TF API, cancel the request, etc.
116
+ tf_reserve.release()
117
+
118
+ remote = TestingFarmRemote(
119
+ tf_reserve.request.id,
120
+ ssh_options,
121
+ release_hook=release_hook,
122
+ )
123
+ remote.connect()
124
+
125
+ # since the system is fully ready, stop tracking its reservation
126
+ # and return the finished Remote instance
127
+ with self.lock:
128
+ self.remotes.append(remote)
129
+ self.reserving.remove(tf_reserve)
130
+
131
+ return remote
132
+
133
+ def _schedule_one_reservation(self, initial_delay=None):
134
+ # instantiate a class Reserve from the Testing Farm api module
135
+ # (which typically provides context manager, but we use its .reserve()
136
+ # and .release() functions directly)
137
+ tf_reserve = api.Reserve(
138
+ compose=self.compose,
139
+ arch=self.arch,
140
+ ssh_key=self.ssh_key,
141
+ api=self.tf_api,
142
+ **self.reserve_kwargs,
143
+ )
144
+
145
+ # add it to self.reserving even before we schedule a provision,
146
+ # to avoid races on suddent abort
147
+ with self.lock:
148
+ self.reserving.append(tf_reserve)
149
+
150
+ # start a background wait
151
+ self.queue.start_thread(
152
+ target=self._wait_for_reservation,
153
+ target_args=(tf_reserve, initial_delay),
154
+ )
155
+
156
+ def start(self):
157
+ with self.lock:
158
+ self._tmpdir = tempfile.TemporaryDirectory()
159
+ self.ssh_key, self.ssh_pubkey = util.ssh_keygen(self._tmpdir.name)
160
+ # start up all initial reservations
161
+ for i in range(self.max_systems):
162
+ delay = (api.API_QUERY_DELAY / self.max_systems) * i
163
+ #self.queue.start_thread(target=self._schedule_one_reservation, args=(delay,))
164
+ self._schedule_one_reservation(delay)
165
+
166
+ def stop(self):
167
+ with self.lock:
168
+ # abort reservations in progress
169
+ while self.reserving:
170
+ # testingfarm api.Reserve instances
171
+ self.reserving.pop().release()
172
+ # cancel/release all Remotes ever created by us
173
+ while self.remotes:
174
+ # TestingFarmRemote instances
175
+ self.remotes.pop().release()
176
+ # explicitly remove the tmpdir rather than relying on destructor
177
+ self._tmpdir.cleanup()
178
+ self._tmpdir = None
179
+
180
+ def stop_defer(self):
181
+ callables = []
182
+ with self.lock:
183
+ callables += (f.release for f in self.reserving)
184
+ self.reserving = []
185
+ callables += (r.release for r in self.remotes)
186
+ self.remotes = [] # just in case
187
+ callables.append(self._tmpdir.cleanup)
188
+ self._tmpdir = None
189
+ return callables
190
+
191
+ def get_remote(self, block=True):
192
+ # fill .release()d remotes back up with reservations
193
+ with self.lock:
194
+ deficit = self.max_systems - len(self.remotes) - len(self.reserving)
195
+ for i in range(deficit):
196
+ delay = (api.API_QUERY_DELAY / deficit) * i
197
+ self._schedule_one_reservation(delay)
198
+
199
+ while True:
200
+ # otherwise wait on a queue of Remotes being provisioned
201
+ try:
202
+ return self.queue.get(block=block) # thread-safe
203
+ except util.ThreadQueue.Empty:
204
+ # always non-blocking
205
+ return None
206
+ except (api.TestingFarmError, connection.ssh.SSHError) as e:
207
+ with self.lock:
208
+ if self.retries > 0:
209
+ util.warning(
210
+ f"caught while reserving a TF system: {repr(e)}, "
211
+ f"retrying ({self.retries} left)",
212
+ )
213
+ self.retries -= 1
214
+ self._schedule_one_reservation()
215
+ if block:
216
+ continue
217
+ else:
218
+ return None
219
+ else:
220
+ util.warning(
221
+ f"caught while reserving a TF system: {repr(e)}, "
222
+ "exhausted all retries, giving up",
223
+ )
224
+ raise
225
+
226
+ # not /technically/ a valid repr(), but meh
227
+ def __repr__(self):
228
+ class_name = self.__class__.__name__
229
+ reserving = len(self.reserving)
230
+ remotes = len(self.remotes)
231
+ return (
232
+ f"{class_name}({self.compose} @ {self.arch}, {reserving} reserving, "
233
+ f"{remotes} remotes, {hex(id(self))})"
234
+ )
atex/util/__init__.py CHANGED
@@ -1,7 +1,3 @@
1
- """
2
- TODO some description about utilities
3
- """
4
-
5
1
  import importlib as _importlib
6
2
  import pkgutil as _pkgutil
7
3
  import inspect as _inspect
@@ -39,8 +35,7 @@ def _import_submodules():
39
35
  if _inspect.ismodule(attr):
40
36
  continue
41
37
  # do not override already processed objects (avoid duplicates)
42
- if key in __all__:
43
- raise AssertionError(f"tried to override already-imported '{key}'")
38
+ assert key not in __all__, f"tried to override already-imported '{key}'"
44
39
 
45
40
  globals()[key] = attr
46
41
  __all__.append(key)
atex/util/libvirt.py ADDED
@@ -0,0 +1,18 @@
1
+ import importlib
2
+
3
+
4
+ def import_libvirt():
5
+ try:
6
+ libvirt = importlib.import_module("libvirt")
7
+ except ModuleNotFoundError:
8
+ raise ModuleNotFoundError(
9
+ "No module named 'libvirt', you need to install it from the package"
10
+ " manager of your distro, ie. 'dnf install python3-libvirt' as it"
11
+ " requires distro-wide headers to compile. It won't work from PyPI."
12
+ " If using venv, create it with '--system-site-packages'.",
13
+ ) from None
14
+
15
+ # suppress console error printing, behave like a good python module
16
+ libvirt.registerErrorHandler(lambda _ctx, _err: None, None)
17
+
18
+ return libvirt
atex/util/log.py CHANGED
@@ -5,6 +5,15 @@ from pathlib import Path
5
5
  _logger = logging.getLogger("atex")
6
6
 
7
7
 
8
+ def in_debug_mode():
9
+ """
10
+ Return True if the root logger is using the DEBUG (or more verbose) level.
11
+ """
12
+ # TODO: use _logger.isEnabledFor() ?
13
+ root_level = logging.getLogger().level
14
+ return root_level > 0 and root_level <= logging.DEBUG
15
+
16
+
8
17
  def _format_msg(msg, *, skip_frames=0):
9
18
  stack = inspect.stack()
10
19
  if len(stack)-1 <= skip_frames:
@@ -23,11 +32,16 @@ def _format_msg(msg, *, skip_frames=0):
23
32
 
24
33
  # if the function has 'self' and it looks like a class instance,
25
34
  # prepend it to the function name
26
- p_locals = parent.frame.f_locals
27
- if "self" in p_locals:
28
- self = p_locals["self"]
29
- if hasattr(self, "__class__") and inspect.isclass(self.__class__):
30
- function = f"{self.__class__.__name__}.{function}"
35
+ argvals = inspect.getargvalues(parent.frame)
36
+ if argvals.args:
37
+ if argvals.args[0] == "self":
38
+ self = argvals.locals["self"]
39
+ if hasattr(self, "__class__") and inspect.isclass(self.__class__):
40
+ function = f"{self.__class__.__name__}.{function}"
41
+ elif argvals.args[0] == "cls":
42
+ cls = argvals.locals["cls"]
43
+ if inspect.isclass(cls):
44
+ function = f"{cls.__name__}.{function}"
31
45
 
32
46
  # don't report module name of a function if it's the same as running module
33
47
  if parent.filename != module.filename:
@@ -42,12 +56,21 @@ def _format_msg(msg, *, skip_frames=0):
42
56
 
43
57
 
44
58
  def debug(msg, *, skip_frames=0):
45
- _logger.debug(_format_msg(msg, skip_frames=skip_frames+1))
59
+ if in_debug_mode():
60
+ _logger.debug(_format_msg(msg, skip_frames=skip_frames+1))
61
+ else:
62
+ _logger.debug(msg)
46
63
 
47
64
 
48
65
  def info(msg, *, skip_frames=0):
49
- _logger.info(_format_msg(msg, skip_frames=skip_frames+1))
66
+ if in_debug_mode():
67
+ _logger.info(_format_msg(msg, skip_frames=skip_frames+1))
68
+ else:
69
+ _logger.info(msg)
50
70
 
51
71
 
52
72
  def warning(msg, *, skip_frames=0):
53
- _logger.warning(_format_msg(msg, skip_frames=skip_frames+1))
73
+ if in_debug_mode():
74
+ _logger.warning(_format_msg(msg, skip_frames=skip_frames+1))
75
+ else:
76
+ _logger.warning(msg)
@@ -0,0 +1,158 @@
1
+ """
2
+ Provides a namedtuple-inspired frozen mapping-backed data structure.
3
+
4
+ class MyMap(NamedMapping):
5
+ pass
6
+
7
+ m = MyMap(a=123, b=456)
8
+
9
+ m["a"] # 123
10
+ m.a # 123
11
+ m["a"] = 9 # KeyError (is read-only)
12
+ m.a = 9 # AttributeError (is read-only)
13
+
14
+ Like namedtuple, you can specify required keys that always need to be given
15
+ to the constructor:
16
+
17
+ class MyMap(NamedMapping, required=("key1", "key2")):
18
+ pass
19
+
20
+ m = MyMap(a=123, b=456, key1=999) # KeyError (key2 not specified)
21
+
22
+ Similarly, you can specify defaults (for required or non-required keys),
23
+ as a dict, that are used if omitted from the constructor:
24
+
25
+ class MyMap(NamedMapping, defaults={"key": 678}):
26
+ pass
27
+
28
+ m = MyMap() # will have m.key == 678
29
+
30
+ A class instance can unpack via ** with the entirety of its mapping contents:
31
+
32
+ m = MyMap(key2=456)
33
+ both = {'key1': 123, **m} # contains both keys
34
+
35
+ You can also chain (append to) required / default values through inheritance:
36
+
37
+ class MyMap(NamedMapping, required=("key1",), defaults={"key2": 234}):
38
+ pass
39
+
40
+ class AnotherMap(MyMap, required=("key3",))
41
+ pass
42
+
43
+ m = AnotherMap() # KeyError (key1 and key3 are required)
44
+
45
+ isinstance(m, MyMap) # would be True
46
+
47
+ When instantiating, it is also possible to copy just the required keys from
48
+ another dict-like object (does not have to be a parent of the class):
49
+
50
+ class SmallMap(NamedMapping, required=("key1", "key2")):
51
+ pass
52
+
53
+ class BigMap(SmallMap, required=("key3", "key4")):
54
+ pass
55
+
56
+ b = BigMap(key1=123, key2=456, key3=789, key4=0)
57
+
58
+ s = SmallMap._from(b) # will copy just key1 and key2
59
+ s = SmallMap._from(b, extra=555) # can pass extra **kwargs to __init__
60
+ s = SmallMap(**b) # will copy all keys
61
+
62
+ Note that this is a fairly basic implementation without __hash__, etc.
63
+ """
64
+
65
+ import abc
66
+ import collections
67
+
68
+
69
+ class _NamedMappingMeta(abc.ABCMeta):
70
+ def __new__(
71
+ metacls, name, bases, namespace, *, required=None, default=None, **kwargs, # noqa: N804
72
+ ):
73
+ new_required = []
74
+ for base in bases:
75
+ new_required.extend(getattr(base, "_required", ()))
76
+ if required:
77
+ new_required.extend(required)
78
+ namespace["_required"] = tuple(set(new_required))
79
+
80
+ new_default = {}
81
+ for base in bases:
82
+ new_default.update(getattr(base, "_default", {}))
83
+ if default:
84
+ new_default.update(default)
85
+ namespace["_default"] = new_default
86
+
87
+ return super().__new__(metacls, name, bases, namespace, **kwargs)
88
+
89
+
90
+ class NamedMapping(collections.abc.Mapping, metaclass=_NamedMappingMeta):
91
+ __slots__ = ("_data",)
92
+
93
+ def __init__(self, **keys):
94
+ data = {}
95
+ if hasattr(self, "_default"):
96
+ data.update(self._default)
97
+ data.update(keys)
98
+ if hasattr(self, "_required"):
99
+ for key in self._required:
100
+ if key not in data:
101
+ raise KeyError(f"'{self.__class__.__name__}' requires key '{key}'")
102
+ object.__setattr__(self, "_data", data)
103
+
104
+ @classmethod
105
+ def _from(cls, foreign, **keys):
106
+ """
107
+ (keys is like for __init__)
108
+ """
109
+ foreign_data = {}
110
+ if hasattr(cls, "_required"):
111
+ for key in cls._required:
112
+ if key in foreign:
113
+ foreign_data[key] = foreign[key]
114
+ foreign_data.update(keys)
115
+ return cls(**foreign_data)
116
+
117
+ def __getattr__(self, item):
118
+ if item in ("_data", "_required", "_default"):
119
+ return super().__getattr__(item)
120
+ try:
121
+ return self._data[item]
122
+ except KeyError:
123
+ raise AttributeError(
124
+ f"'{self.__class__.__name__}' object has no attribute '{item}'",
125
+ name=item,
126
+ ) from None
127
+
128
+ def __setattr__(self, name, value):
129
+ raise AttributeError(
130
+ f"'{self}' is read-only, cannot set '{name}'",
131
+ name=name,
132
+ obj=value,
133
+ )
134
+
135
+ def __getitem__(self, key):
136
+ return self._data[key]
137
+
138
+ def __setitem__(self, key, value):
139
+ raise ValueError(f"'{self}' is read-only, cannot set '{key}'")
140
+
141
+ def __delitem__(self, key):
142
+ raise ValueError(f"'{self}' is read-only, cannot delete '{key}'")
143
+
144
+ def __contains__(self, key):
145
+ return key in self._data
146
+
147
+ def __iter__(self):
148
+ return iter(self._data)
149
+
150
+ def __len__(self):
151
+ return len(self._data)
152
+
153
+ def __repr__(self):
154
+ return (
155
+ f"{self.__class__.__name__}("
156
+ + ", ".join((f"{k}={repr(v)}" for k,v in self._data.items()))
157
+ + ")"
158
+ )
atex/util/path.py ADDED
@@ -0,0 +1,16 @@
1
+ import os
2
+
3
+
4
+ def normalize_path(path):
5
+ """
6
+ Transform a potentially dangerous path (leading slash, relative ../../../
7
+ leading beyond parent, etc.) to a safe one.
8
+
9
+ Always returns a relative path.
10
+ """
11
+ # the magic here is to treat any dangerous path as starting at /
12
+ # and resolve any weird constructs relative to /, and then simply
13
+ # strip off the leading / and use it as a relative path
14
+ path = path.lstrip("/")
15
+ path = os.path.normpath(f"/{path}")
16
+ return path[1:]
@@ -0,0 +1,14 @@
1
+ import subprocess
2
+ from pathlib import Path
3
+
4
+ from .subprocess import subprocess_run
5
+
6
+
7
+ def ssh_keygen(dest_dir, key_type="rsa"):
8
+ dest_dir = Path(dest_dir)
9
+ subprocess_run(
10
+ ("ssh-keygen", "-t", key_type, "-N", "", "-f", dest_dir / f"key_{key_type}"),
11
+ stdout=subprocess.DEVNULL,
12
+ check=True,
13
+ )
14
+ return (dest_dir / "key_rsa", dest_dir / "key_rsa.pub")
atex/util/threads.py ADDED
@@ -0,0 +1,99 @@
1
+ import queue
2
+ import threading
3
+
4
+ from .named_mapping import NamedMapping
5
+
6
+ # TODO: documentation; this is like concurrent.futures, but with daemon=True support
7
+
8
+
9
+ class ThreadQueue:
10
+ class ThreadReturn(NamedMapping, required=("thread", "returned", "exception")):
11
+ pass
12
+
13
+ Empty = queue.Empty
14
+
15
+ def __init__(self, daemon=False):
16
+ self.lock = threading.RLock()
17
+ self.queue = queue.SimpleQueue()
18
+ self.daemon = daemon
19
+ self.threads = set()
20
+
21
+ def _wrapper(self, func, func_args, func_kwargs, **user_kwargs):
22
+ current_thread = threading.current_thread()
23
+ try:
24
+ ret = func(*func_args, **func_kwargs)
25
+ result = self.ThreadReturn(
26
+ thread=current_thread,
27
+ returned=ret,
28
+ exception=None,
29
+ **user_kwargs,
30
+ )
31
+ except Exception as e:
32
+ result = self.ThreadReturn(
33
+ thread=current_thread,
34
+ returned=None,
35
+ exception=e,
36
+ **user_kwargs,
37
+ )
38
+ self.queue.put(result)
39
+
40
+ def start_thread(self, target, target_args=None, target_kwargs=None, **user_kwargs):
41
+ """
42
+ Start a new thread and call 'target' as a callable inside it, passing it
43
+ 'target_args' as arguments and 'target_kwargs' as keyword arguments.
44
+
45
+ Any additional 'user_kwargs' specified are NOT passed to the callable,
46
+ but instead become part of the ThreadReturn namespace returned by the
47
+ .get_raw() method.
48
+ """
49
+ t = threading.Thread(
50
+ target=self._wrapper,
51
+ args=(target, target_args or (), target_kwargs or {}),
52
+ kwargs=user_kwargs,
53
+ daemon=self.daemon,
54
+ )
55
+ with self.lock:
56
+ self.threads.add(t)
57
+ t.start()
58
+
59
+ def get_raw(self, block=True, timeout=None):
60
+ """
61
+ Wait for and return the next available ThreadReturn instance on the
62
+ queue, as enqueued by a finished callable started by the .start_thread()
63
+ method.
64
+ """
65
+ with self.lock:
66
+ if block and timeout is None and not self.threads:
67
+ raise AssertionError("no threads are running, would block forever")
68
+ treturn = self.queue.get(block=block, timeout=timeout)
69
+ with self.lock:
70
+ self.threads.remove(treturn.thread)
71
+ return treturn
72
+
73
+ # get one return value from any thread's function, like .as_completed()
74
+ # or concurrent.futures.FIRST_COMPLETED
75
+ def get(self, block=True, timeout=None):
76
+ """
77
+ Wait for and return the next available return value of a callable
78
+ enqueued via the .start_thread() method.
79
+
80
+ If the callable raised an exception, the exception is re-raised here.
81
+ """
82
+ treturn = self.get_raw(block, timeout)
83
+ if treturn.exception is not None:
84
+ raise treturn.exception
85
+ else:
86
+ return treturn.returned
87
+
88
+ # wait for all threads to finish (ignoring queue contents)
89
+ def join(self):
90
+ """
91
+ Wait for all threads to finish, ignoring the state of the queue.
92
+ """
93
+ while True:
94
+ with self.lock:
95
+ try:
96
+ thread = self.threads.pop()
97
+ except KeyError:
98
+ break
99
+ thread.join()