atex 0.8__py3-none-any.whl → 0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. atex/aggregator/__init__.py +60 -0
  2. atex/aggregator/json.py +96 -0
  3. atex/cli/__init__.py +11 -1
  4. atex/cli/fmf.py +73 -23
  5. atex/cli/libvirt.py +128 -0
  6. atex/cli/testingfarm.py +60 -3
  7. atex/connection/__init__.py +13 -11
  8. atex/connection/podman.py +61 -0
  9. atex/connection/ssh.py +38 -47
  10. atex/executor/executor.py +144 -119
  11. atex/executor/reporter.py +66 -71
  12. atex/executor/scripts.py +13 -5
  13. atex/executor/testcontrol.py +43 -30
  14. atex/fmf.py +94 -74
  15. atex/orchestrator/__init__.py +76 -2
  16. atex/orchestrator/adhoc.py +465 -0
  17. atex/{provision → provisioner}/__init__.py +54 -42
  18. atex/provisioner/libvirt/__init__.py +2 -0
  19. atex/provisioner/libvirt/libvirt.py +472 -0
  20. atex/provisioner/libvirt/locking.py +170 -0
  21. atex/{provision → provisioner}/libvirt/setup-libvirt.sh +21 -1
  22. atex/provisioner/podman/__init__.py +2 -0
  23. atex/provisioner/podman/podman.py +169 -0
  24. atex/{provision → provisioner}/testingfarm/api.py +121 -69
  25. atex/{provision → provisioner}/testingfarm/testingfarm.py +44 -52
  26. atex/util/libvirt.py +18 -0
  27. atex/util/log.py +53 -43
  28. atex/util/named_mapping.py +158 -0
  29. atex/util/subprocess.py +46 -12
  30. atex/util/threads.py +71 -20
  31. atex-0.10.dist-info/METADATA +86 -0
  32. atex-0.10.dist-info/RECORD +44 -0
  33. atex/orchestrator/aggregator.py +0 -106
  34. atex/orchestrator/orchestrator.py +0 -324
  35. atex/provision/libvirt/__init__.py +0 -24
  36. atex/provision/podman/README +0 -59
  37. atex/provision/podman/host_container.sh +0 -74
  38. atex-0.8.dist-info/METADATA +0 -197
  39. atex-0.8.dist-info/RECORD +0 -37
  40. /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
  41. /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
  42. {atex-0.8.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
  43. {atex-0.8.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
  44. {atex-0.8.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
@@ -0,0 +1,465 @@
1
+ import tempfile
2
+ import concurrent
3
+ import collections
4
+ from pathlib import Path
5
+
6
+ from .. import util, executor
7
+ from . import Orchestrator, OrchestratorError
8
+
9
+
10
+ class FailedSetupError(OrchestratorError):
11
+ pass
12
+
13
+
14
+ class AdHocOrchestrator(Orchestrator):
15
+ """
16
+ TODO: document function specific to this reference, ie. run_setup(), etc.
17
+ """
18
+
19
+ class SetupInfo(
20
+ util.NamedMapping,
21
+ required=(
22
+ # class Provisioner instance this machine is provided by
23
+ # (for logging purposes)
24
+ "provisioner",
25
+ # class Remote instance returned by the Provisioner
26
+ "remote",
27
+ # class Executor instance uploading tests / running setup or tests
28
+ "executor",
29
+ ),
30
+ ):
31
+ pass
32
+
33
+ class RunningInfo(
34
+ SetupInfo,
35
+ required=(
36
+ # string with /test/name
37
+ "test_name",
38
+ # class tempfile.TemporaryDirectory instance passed to Executor
39
+ "tmp_dir",
40
+ ),
41
+ ):
42
+ pass
43
+
44
+ class FinishedInfo(
45
+ RunningInfo,
46
+ required=(
47
+ # integer with exit code of the test
48
+ # (None if exception happened)
49
+ "exit_code",
50
+ # exception class instance if running the test failed
51
+ # (None if no exception happened (exit_code is defined))
52
+ "exception",
53
+ # Path of a 'results' JSON file with test-reported results
54
+ "results",
55
+ # Path of a 'files' directory with test-uploaded files
56
+ "files",
57
+ ),
58
+ ):
59
+ pass
60
+
61
+ def __init__(
62
+ self, platform, fmf_tests, provisioners, aggregator, tmp_dir, *,
63
+ max_remotes=1, max_spares=0, max_reruns=2, max_failed_setups=10, env=None,
64
+ ):
65
+ """
66
+ 'platform' is a string with platform name.
67
+
68
+ 'fmf_tests' is a class FMFTests instance of the tests to run.
69
+
70
+ 'provisioners' is an iterable of class Provisioner instances.
71
+
72
+ 'aggregator' is a class CSVAggregator instance.
73
+
74
+ 'tmp_dir' is a string/Path to a temporary directory, to be used for
75
+ storing per-test results and uploaded files before being ingested
76
+ by the aggregator. Can be safely shared by Orchestrator instances.
77
+
78
+ 'max_remotes' is how many Remotes to hold reserved at any given time,
79
+ eg. how many tests to run in parallel. Clamped to the number of
80
+ to-be-run tests given as 'fmf_tests'.
81
+
82
+ 'max_spares' is how many set-up Remotes to hold reserved and unused,
83
+ ready to replace a Remote destroyed by test. Values above 0 greatly
84
+ speed up test reruns as Remote reservation happens asynchronously
85
+ to test execution. Spares are reserved on top of 'max_remotes'.
86
+
87
+ 'max_reruns' is an integer of how many times to re-try running a failed
88
+ test (which exited with non-0 or caused an Executor exception).
89
+
90
+ 'max_failed_setups' is an integer of how many times an Executor's
91
+ plan setup (uploading tests, running prepare scripts, etc.) may fail
92
+ before FailedSetupError is raised.
93
+
94
+ 'env' is a dict of extra environment variables to pass to Executor.
95
+ """
96
+ self.platform = platform
97
+ self.fmf_tests = fmf_tests
98
+ self.provisioners = tuple(provisioners)
99
+ self.aggregator = aggregator
100
+ self.tmp_dir = tmp_dir
101
+ self.failed_setups_left = max_failed_setups
102
+ self.max_remotes = max_remotes
103
+ self.max_spares = max_spares
104
+ # indexed by test name, value being integer of how many times
105
+ self.reruns = collections.defaultdict(lambda: max_reruns)
106
+ self.env = env
107
+ # tests still waiting to be run
108
+ self.to_run = set(fmf_tests.tests)
109
+ # running tests as a dict, indexed by test name, with RunningInfo values
110
+ self.running_tests = {}
111
+ # thread queue for actively running tests
112
+ self.test_queue = util.ThreadQueue(daemon=False)
113
+ # thread queue for remotes being set up (uploading tests, etc.)
114
+ self.setup_queue = util.ThreadQueue(daemon=True)
115
+ # thread queue for remotes being released
116
+ self.release_queue = util.ThreadQueue(daemon=True)
117
+
118
+ def _run_new_test(self, info):
119
+ """
120
+ 'info' can be either
121
+ - SetupInfo instance with Remote/Executor to run the new test.
122
+ - FinishedInfo instance of a previously executed test
123
+ (reusing Remote/Executor for a new test).
124
+ """
125
+ next_test_name = self.next_test(self.to_run, self.fmf_tests.tests, info)
126
+ assert next_test_name in self.to_run, "next_test() returned valid test name"
127
+
128
+ util.info(f"starting '{next_test_name}' on {info.remote}")
129
+
130
+ self.to_run.remove(next_test_name)
131
+
132
+ rinfo = self.RunningInfo._from(
133
+ info,
134
+ test_name=next_test_name,
135
+ tmp_dir=tempfile.TemporaryDirectory(
136
+ prefix=next_test_name.strip("/").replace("/","-") + "-",
137
+ dir=self.tmp_dir,
138
+ delete=False,
139
+ ),
140
+ )
141
+
142
+ tmp_dir_path = Path(rinfo.tmp_dir.name)
143
+ self.test_queue.start_thread(
144
+ target=info.executor.run_test,
145
+ target_args=(
146
+ next_test_name,
147
+ tmp_dir_path,
148
+ ),
149
+ rinfo=rinfo,
150
+ )
151
+
152
+ self.running_tests[next_test_name] = rinfo
153
+
154
+ def _process_finished_test(self, finfo):
155
+ """
156
+ 'finfo' is a FinishedInfo instance.
157
+ """
158
+ test_data = self.fmf_tests.tests[finfo.test_name]
159
+
160
+ # TODO: somehow move logging from was_successful and should_be_rerun here,
161
+ # probably print just some generic info from those functions that doesn't
162
+ # imply any outcome, ie.
163
+ # {remote_with_test} threw {exception}
164
+ # {remote_with_test} exited with {code}
165
+ # {remote_with_test} has {N} reruns left
166
+ # {remote_with_test} has 0 reruns left
167
+ # and then log the decision separately, here below, such as
168
+ # {remote_with_test} failed, re-running
169
+ # {remote_with_test} completed, ingesting result
170
+ # {remote_with_test} was destructive, releasing remote
171
+ # {remote_with_test} ...., running next test
172
+ # That allows the user to override the functions, while keeping critical
173
+ # flow reliably logged here.
174
+
175
+ remote_with_test = f"{finfo.remote}: '{finfo.test_name}'"
176
+
177
+ if not self.was_successful(finfo, test_data) and self.should_be_rerun(finfo, test_data):
178
+ # re-run the test
179
+ self.to_run.add(finfo.test_name)
180
+ else:
181
+ # ingest the result
182
+ #
183
+ # a condition just in case Executor code itself threw an exception
184
+ # and didn't even report the fallback 'infra' result
185
+ if finfo.results is not None and finfo.files is not None:
186
+ self.aggregator.ingest(
187
+ self.platform,
188
+ finfo.test_name,
189
+ finfo.results,
190
+ finfo.files,
191
+ )
192
+ # also delete the tmpdir housing these
193
+ finfo.tmp_dir.cleanup()
194
+ # ingesting destroyed these
195
+ finfo = self.FinishedInfo._from(
196
+ finfo,
197
+ results=None,
198
+ files=None,
199
+ tmp_dir=None,
200
+ )
201
+
202
+ # if destroyed, release the remote and request a replacement
203
+ # (Executor exception is always considered destructive)
204
+ if finfo.exception or self.destructive(finfo, test_data):
205
+ util.debug(f"{remote_with_test} was destructive, releasing remote")
206
+ self.release_queue.start_thread(
207
+ finfo.remote.release,
208
+ remote=finfo.remote,
209
+ )
210
+ finfo.provisioner.provision(1)
211
+
212
+ # if still not destroyed, run another test on it
213
+ # (without running plan setup, re-using already set up remote)
214
+ elif self.to_run:
215
+ util.debug(f"{remote_with_test} was non-destructive, running next test")
216
+ self._run_new_test(finfo)
217
+
218
+ def serve_once(self):
219
+ """
220
+ Run the orchestration logic, processing any outstanding requests
221
+ (for provisioning, new test execution, etc.) and returning once these
222
+ are taken care of.
223
+
224
+ Returns True to indicate that it should be called again by the user
225
+ (more work to be done), False once all testing is concluded.
226
+ """
227
+ # all done
228
+ if not self.to_run and not self.running_tests and self.release_queue.qsize() == 0:
229
+ return False
230
+
231
+ # process all finished tests, potentially reusing remotes for executing
232
+ # further tests
233
+ while True:
234
+ try:
235
+ treturn = self.test_queue.get_raw(block=False)
236
+ except util.ThreadQueue.Empty:
237
+ break
238
+
239
+ rinfo = treturn.rinfo
240
+ del self.running_tests[rinfo.test_name]
241
+
242
+ tmp_dir_path = Path(rinfo.tmp_dir.name)
243
+ results_path = tmp_dir_path / "results"
244
+ files_path = tmp_dir_path / "files"
245
+
246
+ finfo = self.FinishedInfo(
247
+ **rinfo,
248
+ exit_code=treturn.returned,
249
+ exception=treturn.exception,
250
+ results=results_path if results_path.exists() else None,
251
+ files=files_path if files_path.exists() else None,
252
+ )
253
+ self._process_finished_test(finfo)
254
+
255
+ # process any remotes with finished plan setup (uploaded tests,
256
+ # plan-defined pkgs / prepare scripts), start executing tests on them
257
+ while self.to_run:
258
+ try:
259
+ treturn = self.setup_queue.get_raw(block=False)
260
+ except util.ThreadQueue.Empty:
261
+ break
262
+
263
+ sinfo = treturn.sinfo
264
+
265
+ if treturn.exception:
266
+ msg = f"{sinfo.remote}: setup failed with {repr(treturn.exception)}"
267
+ self.release_queue.start_thread(
268
+ sinfo.remote.release,
269
+ remote=sinfo.remote,
270
+ )
271
+ if (reruns_left := self.failed_setups_left) > 0:
272
+ util.warning(f"{msg}, re-trying ({reruns_left} setup retries left)")
273
+ self.failed_setups_left -= 1
274
+ sinfo.provisioner.provision(1)
275
+ else:
276
+ util.warning(f"{msg}, setup retries exceeded, giving up")
277
+ raise FailedSetupError("setup retries limit exceeded, broken infra?")
278
+ else:
279
+ self._run_new_test(sinfo)
280
+
281
+ # release any extra Remotes being held as set-up when we know we won't
282
+ # use them for any tests (because to_run is empty)
283
+ else:
284
+ while self.setup_queue.qsize() > self.max_spares:
285
+ try:
286
+ treturn = self.setup_queue.get_raw(block=False)
287
+ except util.ThreadQueue.Empty:
288
+ break
289
+ self.release_queue.start_thread(
290
+ treturn.sinfo.remote.release,
291
+ remote=treturn.sinfo.remote,
292
+ )
293
+
294
+ # try to get new remotes from Provisioners - if we get some, start
295
+ # running setup on them
296
+ for provisioner in self.provisioners:
297
+ while (remote := provisioner.get_remote(block=False)) is not None:
298
+ ex = executor.Executor(self.fmf_tests, remote, env=self.env)
299
+ sinfo = self.SetupInfo(
300
+ provisioner=provisioner,
301
+ remote=remote,
302
+ executor=ex,
303
+ )
304
+ self.setup_queue.start_thread(
305
+ target=self.run_setup,
306
+ target_args=(sinfo,),
307
+ sinfo=sinfo,
308
+ )
309
+ util.info(f"{provisioner}: running setup on new {remote}")
310
+
311
+ # gather returns from Remote.release() functions - check for exceptions
312
+ # thrown, re-report them as warnings as they are not typically critical
313
+ # for operation
314
+ try:
315
+ treturn = self.release_queue.get_raw(block=False)
316
+ except util.ThreadQueue.Empty:
317
+ pass
318
+ else:
319
+ if treturn.exception:
320
+ util.warning(f"{treturn.remote} release failed: {repr(treturn.exception)}")
321
+ else:
322
+ util.debug(f"{treturn.remote}: completed .release()")
323
+
324
+ return True
325
+
326
+ def start(self):
327
+ # start all provisioners
328
+ for prov in self.provisioners:
329
+ prov.start()
330
+
331
+ # start up initial reservations, balanced evenly across all available
332
+ # provisioner instances
333
+ count = min(self.max_remotes, len(self.fmf_tests.tests)) + self.max_spares
334
+ provisioners = self.provisioners[:count]
335
+ for idx, prov in enumerate(provisioners):
336
+ if count % len(provisioners) > idx:
337
+ prov.provision((count // len(provisioners)) + 1)
338
+ else:
339
+ prov.provision(count // len(provisioners))
340
+
341
+ def stop(self):
342
+ # cancel all running tests and wait for them to clean up (up to 0.1sec)
343
+ for rinfo in self.running_tests.values():
344
+ rinfo.executor.cancel()
345
+ self.test_queue.join() # also ignore any exceptions raised
346
+
347
+ # stop all provisioners, also releasing all remotes
348
+ # TODO: don't parallelize here, remove .stop_defer() and parallelize in provisioners
349
+ if self.provisioners:
350
+ workers = min(len(self.provisioners), 20)
351
+ with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as ex:
352
+ for provisioner in self.provisioners:
353
+ for func in provisioner.stop_defer():
354
+ ex.submit(func)
355
+
356
+ @staticmethod
357
+ def run_setup(sinfo):
358
+ """
359
+ Set up a newly acquired class Remote instance for test execution.
360
+
361
+ 'sinfo' is a SetupInfo instance with the (fully connected) remote.
362
+ """
363
+ sinfo.executor.start()
364
+ sinfo.executor.upload_tests()
365
+ sinfo.executor.plan_prepare()
366
+ # NOTE: we never run executor.plan_finish() or even executor.stop()
367
+ # anywhere - instead, we assume the remote (and its connection)
368
+ # was invalidated by the test, so we just rely on remote.release()
369
+ # destroying the system
370
+
371
+ @staticmethod
372
+ def next_test(to_run, all_tests, previous): # noqa: ARG004
373
+ """
374
+ Return a test name (string) to be executed next.
375
+
376
+ 'to_run' is a set of test names to pick from. The returned test name
377
+ must be chosen from this set.
378
+
379
+ 'tests' is a dict indexed by test name (string), with values being
380
+ fully resolved fmf test metadata (dicts) of all possible tests.
381
+
382
+ 'previous' can be either
383
+ - Orchestrator.SetupInfo instance (first test to be run)
384
+ - Orchestrator.FinishedInfo instance (previous executed test)
385
+
386
+ This method must not modify any of its arguments, it must treat them
387
+ as read-only, eg. don't remove the returned test name from 'to_run'.
388
+ """
389
+ # default to simply picking any available test
390
+ return next(iter(to_run))
391
+
392
+ @staticmethod
393
+ def destructive(info, test_data): # noqa: ARG004
394
+ """
395
+ Return a boolean result whether a finished test was destructive
396
+ to a class Remote instance, indicating that the Remote instance
397
+ should not be used for further test execution.
398
+
399
+ 'info' is Orchestrator.FinishedInfo namedtuple of the test.
400
+
401
+ 'test_data' is a dict of fully resolved fmf test metadata of that test.
402
+ """
403
+ # if Executor ended with an exception (ie. duration exceeded),
404
+ # consider the test destructive
405
+ if info.exception:
406
+ return True
407
+ # if the test returned non-0 exit code, it could have thrown
408
+ # a python exception of its own, or (if bash) aborted abruptly
409
+ # due to 'set -e', don't trust the remote, consider it destroyed
410
+ if info.exit_code != 0:
411
+ return True
412
+ # otherwise we good
413
+ return False
414
+
415
+ @staticmethod
416
+ def was_successful(info, test_data): # noqa: ARG004
417
+ """
418
+ Return a boolean result whether a finished test was successful.
419
+ Returning False might cause it to be re-run (per should_be_rerun()).
420
+
421
+ 'info' is Orchestrator.FinishedInfo namedtuple of the test.
422
+
423
+ 'test_data' is a dict of fully resolved fmf test metadata of that test.
424
+ """
425
+ remote_with_test = f"{info.remote}: '{info.test_name}'"
426
+
427
+ # executor (or test) threw exception
428
+ if info.exception:
429
+ util.info(f"{remote_with_test} threw {repr(info.exception)} during test runtime")
430
+ return False
431
+
432
+ # the test exited as non-0
433
+ if info.exit_code != 0:
434
+ util.info(f"{remote_with_test} exited with non-zero: {info.exit_code}")
435
+ return False
436
+
437
+ # otherwise we good
438
+ return True
439
+
440
+ # TODO: @staticmethod and remove ARG002
441
+ #@staticmethod
442
+ def should_be_rerun(self, info, test_data): # noqa: ARG004, ARG002
443
+ """
444
+ Return a boolean result whether a finished test failed in a way
445
+ that another execution attempt might succeed, due to race conditions
446
+ in the test or other non-deterministic factors.
447
+
448
+ 'info' is Orchestrator.FinishedInfo namedtuple of the test.
449
+
450
+ 'test_data' is a dict of fully resolved fmf test metadata of that test.
451
+ """
452
+ remote_with_test = f"{info.remote}: '{info.test_name}'"
453
+
454
+ # TODO: remove self.reruns and the whole X-reruns logic from AdHocOrchestrator,
455
+ # leave it up to the user to wrap should_be_rerun() with an external dict
456
+ # of tests, counting reruns for each
457
+ # - allows the user to adjust counts per-test (ie. test_data metadata)
458
+ # - allows this template to be @staticmethod
459
+ if (reruns_left := self.reruns[info.test_name]) > 0:
460
+ util.info(f"{remote_with_test}: re-running ({reruns_left} reruns left)")
461
+ self.reruns[info.test_name] -= 1
462
+ return True
463
+ else:
464
+ util.info(f"{remote_with_test}: reruns exceeded, giving up")
465
+ return False
@@ -1,48 +1,81 @@
1
1
  import importlib as _importlib
2
2
  import pkgutil as _pkgutil
3
- import threading as _threading
4
3
 
5
4
  from .. import connection as _connection
6
5
 
7
6
 
7
+ class Remote(_connection.Connection):
8
+ """
9
+ Representation of a provisioned (reserved) remote system, providing
10
+ a Connection-like API in addition to system management helpers.
11
+
12
+ An instance of Remote is typically prepared by a Provisioner and returned
13
+ to the caller for use and an eventual .release().
14
+
15
+ Also note that Remote can be used via Context Manager, but does not
16
+ do automatic .release(), the manager only handles the built-in Connection.
17
+ The intention is for a Provisioner to run via its own Contest Manager and
18
+ release all Remotes upon exit.
19
+ If you need automatic release of one Remote, use a try/finally block.
20
+ """
21
+
22
+ def release(self):
23
+ """
24
+ Release (de-provision) the remote resource.
25
+ """
26
+ raise NotImplementedError(f"'release' not implemented for {self.__class__.__name__}")
27
+
28
+
8
29
  class Provisioner:
9
30
  """
10
31
  A remote resource (machine/system) provider.
11
32
 
12
- The main interface is .get_remote() that returns a connected class Remote
13
- instance for use by the user, to be .release()d when not needed anymore,
14
- with Provisioner automatically getting a replacement for it, to be returned
15
- via .get_remote() later.
33
+ The idea is to request machines (a.k.a. Remotes, or class Remote instances)
34
+ to be reserved via a non-blocking .provision() and for them to be retrieved
35
+ through blocking / non-blocking .get_remote() when they become available.
36
+
37
+ Each Remote has its own .release() for freeing (de-provisioning) it once
38
+ the user doesn't need it anymore. The Provisioner does this automatically
39
+ to all Remotes during .stop() or context manager exit.
16
40
 
17
41
  p = Provisioner()
18
42
  p.start()
43
+ p.provision(count=1)
19
44
  remote = p.get_remote()
20
45
  remote.cmd(["ls", "/"])
21
46
  remote.release()
22
47
  p.stop()
23
48
 
24
49
  with Provisioner() as p:
25
- remote = p.get_remote()
50
+ p.provision(count=2)
51
+ remote1 = p.get_remote()
52
+ remote2 = p.get_remote()
26
53
  ...
27
- remote.release()
28
54
 
55
+ Note that .provision() is a hint expressed by the caller, not a guarantee
56
+ that .get_remote() will ever return a Remote. Ie. the caller can call
57
+ .provision(count=math.inf) to receive as many remotes as the Provisioner
58
+ can possibly supply.
59
+
60
+ TODO: remove .defer_stop() (or stop_defer) and mention this below:
29
61
  Note that .stop() or .defer_stop() may be called from a different
30
62
  thread, asynchronously to any other functions.
31
63
  """
32
64
 
33
- def __init__(self):
65
+ def provision(self, count=1):
34
66
  """
35
- Initialize the provisioner instance.
36
- If extending __init__, always call 'super().__init__()' at the top.
67
+ Request that 'count' machines be provisioned (reserved) for use,
68
+ to be returned at a later point by .get_remote().
37
69
  """
38
- self.lock = _threading.RLock()
70
+ raise NotImplementedError(f"'provision' not implemented for {self.__class__.__name__}")
39
71
 
40
72
  def get_remote(self, block=True):
41
73
  """
42
- Get a connected class Remote instance.
74
+ Return a connected class Remote instance of a previously .provision()ed
75
+ remote system.
43
76
 
44
- If 'block' is True, wait for the remote to be available and connected,
45
- otherwise return None if there is no Remote available yet.
77
+ If 'block' is True, wait for the Remote to be available and connected,
78
+ otherwise return None if there is none available yet.
46
79
  """
47
80
  raise NotImplementedError(f"'get_remote' not implemented for {self.__class__.__name__}")
48
81
 
@@ -70,41 +103,20 @@ class Provisioner:
70
103
  Ie. a list of 200 .release() functions, to be called in a thread pool
71
104
  by the user, speeding up cleanup.
72
105
  """
73
- return self.stop
106
+ return (self.stop,)
74
107
 
75
108
  def __enter__(self):
76
- self.start()
77
- return self
109
+ try:
110
+ self.start()
111
+ return self
112
+ except Exception:
113
+ self.stop()
114
+ raise
78
115
 
79
116
  def __exit__(self, exc_type, exc_value, traceback):
80
117
  self.stop()
81
118
 
82
119
 
83
- class Remote(_connection.Connection):
84
- """
85
- Representation of a provisioned (reserved) remote system, providing
86
- a Connection-like API in addition to system management helpers.
87
-
88
- An instance of Remote is typically prepared by a Provisioner and lent out
89
- for further use, to be .release()d by the user (if destroyed).
90
- It is not meant for repeated reserve/release cycles, hence the lack
91
- of .reserve().
92
-
93
- Also note that Remote can be used via Context Manager, but does not
94
- do automatic .release(), the manager only handles the built-in Connection.
95
- The intention is for a Provisioner to run via its own Contest Manager and
96
- release all Remotes upon exit.
97
- If you need automatic release of one Remote, use a contextlib.ExitStack
98
- with a callback, or a try/finally block.
99
- """
100
-
101
- def release(self):
102
- """
103
- Release (de-provision) the remote resource.
104
- """
105
- raise NotImplementedError(f"'release' not implemented for {self.__class__.__name__}")
106
-
107
-
108
120
  _submodules = [
109
121
  info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
110
122
  ]
@@ -0,0 +1,2 @@
1
+ from . import locking # noqa: F401
2
+ from .libvirt import LibvirtCloningProvisioner, LibvirtCloningRemote # noqa: F401