atex 0.13__py3-none-any.whl → 0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,4 @@
1
+ import abc as _abc
1
2
  import importlib as _importlib
2
3
  import pkgutil as _pkgutil
3
4
  import time as _time
@@ -14,16 +15,16 @@ class Orchestrator:
14
15
  TODO: more description
15
16
  """
16
17
 
18
+ @_abc.abstractmethod
17
19
  def serve_once(self):
18
20
  """
19
21
  Run the orchestration logic, processing any outstanding requests
20
22
  (for provisioning, new test execution, etc.) and returning once these
21
23
  are taken care of.
22
24
 
23
- Returns True to indicate that it should be called again by the user
24
- (more work to be done), False once all testing is concluded.
25
+ Returns `True` to indicate that it should be called again by the user
26
+ (more work to be done), `False` once all testing is concluded.
25
27
  """
26
- raise NotImplementedError(f"'serve_once' not implemented for {self.__class__.__name__}")
27
28
 
28
29
  def serve_forever(self):
29
30
  """
@@ -32,18 +33,18 @@ class Orchestrator:
32
33
  while self.serve_once():
33
34
  _time.sleep(1)
34
35
 
36
+ @_abc.abstractmethod
35
37
  def start(self):
36
38
  """
37
39
  Start the Orchestrator instance, opening any files / allocating
38
40
  resources as necessary.
39
41
  """
40
- raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
41
42
 
43
+ @_abc.abstractmethod
42
44
  def stop(self):
43
45
  """
44
46
  Stop the Orchestrator instance, freeing all allocated resources.
45
47
  """
46
- raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
47
48
 
48
49
  def __enter__(self):
49
50
  try:
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  import tempfile
2
3
  import concurrent.futures
3
4
  from pathlib import Path
@@ -5,6 +6,8 @@ from pathlib import Path
5
6
  from .. import util, executor
6
7
  from . import Orchestrator, OrchestratorError
7
8
 
9
+ logger = logging.getLogger("atex.orchestrator.adhoc")
10
+
8
11
 
9
12
  class FailedSetupError(OrchestratorError):
10
13
  pass
@@ -62,32 +65,32 @@ class AdHocOrchestrator(Orchestrator):
62
65
  max_remotes=1, max_spares=0, max_failed_setups=10, env=None,
63
66
  ):
64
67
  """
65
- 'platform' is a string with platform name.
68
+ - `platform` is a string with platform name.
66
69
 
67
- 'fmf_tests' is a class FMFTests instance of the tests to run.
70
+ - `fmf_tests` is a class FMFTests instance of the tests to run.
68
71
 
69
- 'provisioners' is an iterable of class Provisioner instances.
72
+ - `provisioners` is an iterable of class Provisioner instances.
70
73
 
71
- 'aggregator' is a class CSVAggregator instance.
74
+ - `aggregator` is a class Aggregator instance.
72
75
 
73
- 'tmp_dir' is a string/Path to a temporary directory, to be used for
74
- storing per-test results and uploaded files before being ingested
75
- by the aggregator. Can be safely shared by Orchestrator instances.
76
+ - `tmp_dir` is a string/Path to a temporary directory, to be used for
77
+ storing per-test results and uploaded files before being ingested
78
+ by the aggregator. Can be safely shared by Orchestrator instances.
76
79
 
77
- 'max_remotes' is how many Remotes to hold reserved at any given time,
78
- eg. how many tests to run in parallel. Clamped to the number of
79
- to-be-run tests given as 'fmf_tests'.
80
+ - `max_remotes` is how many Remotes to hold reserved at any given time,
81
+ eg. how many tests to run in parallel. Clamped to the number of
82
+ to-be-run tests given as `fmf_tests`.
80
83
 
81
- 'max_spares' is how many set-up Remotes to hold reserved and unused,
82
- ready to replace a Remote destroyed by test. Values above 0 greatly
83
- speed up test reruns as Remote reservation happens asynchronously
84
- to test execution. Spares are reserved on top of 'max_remotes'.
84
+ - `max_spares` is how many set-up Remotes to hold reserved and unused,
85
+ ready to replace a Remote destroyed by test. Values above 0 greatly
86
+ speed up test reruns as Remote reservation happens asynchronously
87
+ to test execution. Spares are reserved on top of `max_remotes`.
85
88
 
86
- 'max_failed_setups' is an integer of how many times an Executor's
87
- plan setup (uploading tests, running prepare scripts, etc.) may fail
88
- before FailedSetupError is raised.
89
+ - `max_failed_setups` is an integer of how many times an Executor's
90
+ plan setup (uploading tests, running prepare scripts, etc.) may fail
91
+ before FailedSetupError is raised.
89
92
 
90
- 'env' is a dict of extra environment variables to pass to Executor.
93
+ - `env` is a dict of extra environment variables to pass to Executor.
91
94
  """
92
95
  if not fmf_tests.tests:
93
96
  raise ValueError("'fmf_tests' has no tests (bad discover params?)")
@@ -118,15 +121,17 @@ class AdHocOrchestrator(Orchestrator):
118
121
 
119
122
  def _run_new_test(self, info):
120
123
  """
121
- 'info' can be either
124
+ `info` can be either
125
+
122
126
  - SetupInfo instance with Remote/Executor to run the new test.
127
+
123
128
  - FinishedInfo instance of a previously executed test
124
129
  (reusing Remote/Executor for a new test).
125
130
  """
126
131
  next_test_name = self.next_test(self.to_run, self.fmf_tests.tests, info)
127
132
  assert next_test_name in self.to_run, "next_test() returned valid test name"
128
133
 
129
- util.info(f"{info.remote}: starting '{next_test_name}'")
134
+ logger.info(f"{info.remote}: starting '{next_test_name}'")
130
135
 
131
136
  self.to_run.remove(next_test_name)
132
137
 
@@ -155,14 +160,14 @@ class AdHocOrchestrator(Orchestrator):
155
160
 
156
161
  def _process_finished_test(self, finfo):
157
162
  """
158
- 'finfo' is a FinishedInfo instance.
163
+ `finfo` is a FinishedInfo instance.
159
164
  """
160
165
  test_data = self.fmf_tests.tests[finfo.test_name]
161
166
  remote_with_test = f"{finfo.remote}: '{finfo.test_name}'"
162
167
 
163
168
  if not self.was_successful(finfo, test_data) and self.should_be_rerun(finfo, test_data):
164
169
  # re-run the test
165
- util.info(f"{remote_with_test} failed, re-running")
170
+ logger.info(f"{remote_with_test} failed, re-running")
166
171
  self.to_run.add(finfo.test_name)
167
172
  else:
168
173
  # ingest the result
@@ -170,7 +175,7 @@ class AdHocOrchestrator(Orchestrator):
170
175
  # a condition just in case Executor code itself threw an exception
171
176
  # and didn't even report the fallback 'infra' result
172
177
  if finfo.results is not None and finfo.files is not None:
173
- util.info(f"{remote_with_test} completed, ingesting result")
178
+ logger.info(f"{remote_with_test} completed, ingesting result")
174
179
 
175
180
  def ingest_and_cleanup(ingest, args, cleanup):
176
181
  ingest(*args)
@@ -201,7 +206,7 @@ class AdHocOrchestrator(Orchestrator):
201
206
  # if there are still tests to be run and the last test was not
202
207
  # destructive, just run a new test on it
203
208
  if self.to_run and not (finfo.exception or self.destructive(finfo, test_data)):
204
- util.debug(f"{remote_with_test} was non-destructive, running next test")
209
+ logger.debug(f"{remote_with_test} was non-destructive, running next test")
205
210
  self._run_new_test(finfo)
206
211
  return
207
212
 
@@ -211,7 +216,7 @@ class AdHocOrchestrator(Orchestrator):
211
216
  if self.remotes_requested >= len(self.to_run):
212
217
  # we have enough remotes in the pipe to run every test,
213
218
  # we don't need a new one - just release the current one
214
- util.debug(f"{finfo.remote} no longer useful, releasing it")
219
+ logger.debug(f"{finfo.remote} no longer useful, releasing it")
215
220
  self.release_queue.start_thread(
216
221
  finfo.remote.release,
217
222
  remote=finfo.remote,
@@ -219,7 +224,7 @@ class AdHocOrchestrator(Orchestrator):
219
224
  else:
220
225
  # we need more remotes and the last test was destructive,
221
226
  # get a new one and let serve_once() run a test later
222
- util.debug(f"{remote_with_test} was destructive, getting a new Remote")
227
+ logger.debug(f"{remote_with_test} was destructive, getting a new Remote")
223
228
  self.release_queue.start_thread(
224
229
  finfo.remote.release,
225
230
  remote=finfo.remote,
@@ -227,14 +232,6 @@ class AdHocOrchestrator(Orchestrator):
227
232
  finfo.provisioner.provision(1)
228
233
 
229
234
  def serve_once(self):
230
- """
231
- Run the orchestration logic, processing any outstanding requests
232
- (for provisioning, new test execution, etc.) and returning once these
233
- are taken care of.
234
-
235
- Returns True to indicate that it should be called again by the user
236
- (more work to be done), False once all testing is concluded.
237
- """
238
235
  # all done
239
236
  if not self.to_run and not self.running_tests:
240
237
  return False
@@ -282,12 +279,12 @@ class AdHocOrchestrator(Orchestrator):
282
279
  remote=sinfo.remote,
283
280
  )
284
281
  if (retries_left := self.failed_setups_left) > 0:
285
- util.warning(f"{msg}, re-trying ({retries_left} setup retries left)")
282
+ logger.warning(f"{msg}, re-trying ({retries_left} setup retries left)")
286
283
  self.failed_setups_left -= 1
287
284
  sinfo.provisioner.provision(1)
288
285
  self.remotes_requested += 1
289
286
  else:
290
- util.warning(f"{msg}, setup retries exceeded, giving up")
287
+ logger.warning(f"{msg}, setup retries exceeded, giving up")
291
288
  raise FailedSetupError("setup retries limit exceeded, broken infra?")
292
289
  else:
293
290
  self._run_new_test(sinfo)
@@ -300,7 +297,7 @@ class AdHocOrchestrator(Orchestrator):
300
297
  treturn = self.setup_queue.get_raw(block=False)
301
298
  except util.ThreadQueue.Empty:
302
299
  break
303
- util.debug(f"releasing extraneous set-up {treturn.sinfo.remote}")
300
+ logger.debug(f"releasing extraneous set-up {treturn.sinfo.remote}")
304
301
  self.release_queue.start_thread(
305
302
  treturn.sinfo.remote.release,
306
303
  remote=treturn.sinfo.remote,
@@ -322,7 +319,7 @@ class AdHocOrchestrator(Orchestrator):
322
319
  target_args=(sinfo,),
323
320
  sinfo=sinfo,
324
321
  )
325
- util.info(f"{provisioner}: running setup on new {remote}")
322
+ logger.info(f"{provisioner}: running setup on new {remote}")
326
323
 
327
324
  # gather returns from Remote.release() functions - check for exceptions
328
325
  # thrown, re-report them as warnings as they are not typically critical
@@ -335,9 +332,9 @@ class AdHocOrchestrator(Orchestrator):
335
332
  else:
336
333
  if treturn.exception:
337
334
  exc_str = f"{type(treturn.exception).__name__}({treturn.exception})"
338
- util.warning(f"{treturn.remote} release failed: {exc_str}")
335
+ logger.warning(f"{treturn.remote} release failed: {exc_str}")
339
336
  else:
340
- util.debug(f"{treturn.remote} release completed")
337
+ logger.debug(f"{treturn.remote} release completed")
341
338
 
342
339
  # gather returns from Aggregator.ingest() calls - check for exceptions
343
340
  while True:
@@ -348,9 +345,9 @@ class AdHocOrchestrator(Orchestrator):
348
345
  else:
349
346
  if treturn.exception:
350
347
  exc_str = f"{type(treturn.exception).__name__}({treturn.exception})"
351
- util.warning(f"'{treturn.test_name}' ingesting failed: {exc_str}")
348
+ logger.warning(f"'{treturn.test_name}' ingesting failed: {exc_str}")
352
349
  else:
353
- util.debug(f"'{treturn.test_name}' ingesting completed")
350
+ logger.debug(f"'{treturn.test_name}' ingesting completed")
354
351
 
355
352
  return True
356
353
 
@@ -388,9 +385,9 @@ class AdHocOrchestrator(Orchestrator):
388
385
  else:
389
386
  if treturn.exception:
390
387
  exc_str = f"{type(treturn.exception).__name__}({treturn.exception})"
391
- util.warning(f"'{treturn.test_name}' ingesting failed: {exc_str}")
388
+ logger.warning(f"'{treturn.test_name}' ingesting failed: {exc_str}")
392
389
  else:
393
- util.debug(f"'{treturn.test_name}' ingesting completed")
390
+ logger.debug(f"'{treturn.test_name}' ingesting completed")
394
391
  self.ingest_queue.join()
395
392
 
396
393
  # stop all provisioners, also releasing all remotes
@@ -406,7 +403,7 @@ class AdHocOrchestrator(Orchestrator):
406
403
  """
407
404
  Set up a newly acquired class Remote instance for test execution.
408
405
 
409
- 'sinfo' is a SetupInfo instance with the (fully connected) remote.
406
+ - `sinfo` is a SetupInfo instance with the (fully connected) remote.
410
407
  """
411
408
  sinfo.executor.start()
412
409
  sinfo.executor.upload_tests()
@@ -421,18 +418,20 @@ class AdHocOrchestrator(Orchestrator):
421
418
  """
422
419
  Return a test name (string) to be executed next.
423
420
 
424
- 'to_run' is a set of test names to pick from. The returned test name
425
- must be chosen from this set.
421
+ - `to_run` is a set of test names to pick from. The returned test name
422
+ must be chosen from this set.
426
423
 
427
- 'tests' is a dict indexed by test name (string), with values being
428
- fully resolved fmf test metadata (dicts) of all possible tests.
424
+ - `tests` is a dict indexed by test name (string), with values being
425
+ fully resolved fmf test metadata (dicts) of all possible tests.
426
+
427
+ - `previous` can be either
429
428
 
430
- 'previous' can be either
431
429
  - Orchestrator.SetupInfo instance (first test to be run)
430
+
432
431
  - Orchestrator.FinishedInfo instance (previous executed test)
433
432
 
434
433
  This method must not modify any of its arguments, it must treat them
435
- as read-only, eg. don't remove the returned test name from 'to_run'.
434
+ as read-only, eg. don't remove the returned test name from `to_run`.
436
435
  """
437
436
  # default to simply picking any available test
438
437
  return next(iter(to_run))
@@ -444,9 +443,9 @@ class AdHocOrchestrator(Orchestrator):
444
443
  to a class Remote instance, indicating that the Remote instance
445
444
  should not be used for further test execution.
446
445
 
447
- 'info' is Orchestrator.FinishedInfo namedtuple of the test.
446
+ - `info` is Orchestrator.FinishedInfo namedtuple of the test.
448
447
 
449
- 'test_data' is a dict of fully resolved fmf test metadata of that test.
448
+ - `test_data` is a dict of fully resolved fmf test metadata of that test.
450
449
  """
451
450
  # if Executor ended with an exception (ie. duration exceeded),
452
451
  # consider the test destructive
@@ -464,21 +463,21 @@ class AdHocOrchestrator(Orchestrator):
464
463
  def was_successful(info, test_data): # noqa: ARG004
465
464
  """
466
465
  Return a boolean result whether a finished test was successful.
467
- Returning False might cause it to be re-run (per should_be_rerun()).
466
+ Returning `False` might cause it to be re-run (per `should_be_rerun()`).
468
467
 
469
- 'info' is Orchestrator.FinishedInfo namedtuple of the test.
468
+ - `info` is Orchestrator.FinishedInfo namedtuple of the test.
470
469
 
471
- 'test_data' is a dict of fully resolved fmf test metadata of that test.
470
+ - `test_data` is a dict of fully resolved fmf test metadata of that test.
472
471
  """
473
472
  remote_with_test = f"{info.remote}: '{info.test_name}'"
474
473
  # executor (or test) threw exception
475
474
  if info.exception:
476
475
  exc_str = f"{type(info.exception).__name__}({info.exception})"
477
- util.info(f"{remote_with_test} threw {exc_str} during test runtime")
476
+ logger.info(f"{remote_with_test} threw {exc_str} during test runtime")
478
477
  return False
479
478
  # the test exited as non-0
480
479
  if info.exit_code != 0:
481
- util.info(f"{remote_with_test} exited with non-zero: {info.exit_code}")
480
+ logger.info(f"{remote_with_test} exited with non-zero: {info.exit_code}")
482
481
  return False
483
482
  # otherwise we good
484
483
  return True
@@ -490,9 +489,9 @@ class AdHocOrchestrator(Orchestrator):
490
489
  that another execution attempt might succeed, due to race conditions
491
490
  in the test or other non-deterministic factors.
492
491
 
493
- 'info' is Orchestrator.FinishedInfo namedtuple of the test.
492
+ - `info` is Orchestrator.FinishedInfo namedtuple of the test.
494
493
 
495
- 'test_data' is a dict of fully resolved fmf test metadata of that test.
494
+ - `test_data` is a dict of fully resolved fmf test metadata of that test.
496
495
  """
497
496
  # never rerun by default
498
497
  return False
@@ -1,8 +1,11 @@
1
+ import logging
1
2
  import collections
2
3
 
3
4
  from .. import util
4
5
  from .adhoc import AdHocOrchestrator
5
6
 
7
+ logger = logging.getLogger("atex.provisioner.contest")
8
+
6
9
 
7
10
  # copy/pasted from the Contest repo, lib/virt.py
8
11
  def calculate_guest_tag(tags):
@@ -21,7 +24,7 @@ def calculate_guest_tag(tags):
21
24
  class ContestOrchestrator(AdHocOrchestrator):
22
25
  """
23
26
  Orchestrator for the Contest test suite:
24
- https://github.com/RHSecurityCompliance/contest
27
+ https://github.com/RHSecurityCompliance/contest
25
28
 
26
29
  Includes SCAP content upload via rsync and other Contest-specific
27
30
  optimizations (around VM snapshots and scheduling).
@@ -30,11 +33,12 @@ class ContestOrchestrator(AdHocOrchestrator):
30
33
 
31
34
  def __init__(self, *args, content_dir, max_reruns=1, **kwargs):
32
35
  """
33
- 'content_dir' is a filesystem path to ComplianceAsCode/content local
34
- directory, to be uploaded to the tested systems.
36
+ - `content_dir` is a filesystem path to ComplianceAsCode/content local
37
+ directory, to be uploaded to the tested systems.
35
38
 
36
- 'max_reruns' is an integer of how many times to re-try running a failed
37
- test (which exited with non-0 or caused an Executor exception).
39
+ - `max_reruns` is an integer of how many times to re-try running
40
+ a failed test (which exited with non-0 or caused an Executor
41
+ exception).
38
42
  """
39
43
  super().__init__(*args, **kwargs)
40
44
  self.content_dir = content_dir
@@ -59,9 +63,9 @@ class ContestOrchestrator(AdHocOrchestrator):
59
63
  if type(previous) is AdHocOrchestrator.SetupInfo:
60
64
  for next_name in to_run:
61
65
  next_tags = all_tests[next_name].get("tag", ())
62
- util.debug(f"considering next_test for destructivity: {next_name}")
66
+ logger.debug(f"considering next_test for destructivity: {next_name}")
63
67
  if "destructive" in next_tags:
64
- util.debug(f"chosen next_test: {next_name}")
68
+ logger.debug(f"chosen next_test: {next_name}")
65
69
  return next_name
66
70
 
67
71
  # previous test was run and finished non-destructively,
@@ -69,15 +73,15 @@ class ContestOrchestrator(AdHocOrchestrator):
69
73
  # as the previous one, allowing snapshot reuse by Contest
70
74
  elif type(previous) is AdHocOrchestrator.FinishedInfo:
71
75
  finished_tags = all_tests[previous.test_name].get("tag", ())
72
- util.debug(f"previous finished test on {previous.remote}: {previous.test_name}")
76
+ logger.debug(f"previous finished test on {previous.remote}: {previous.test_name}")
73
77
  # if Guest tag is None, don't bother searching
74
78
  if finished_guest_tag := calculate_guest_tag(finished_tags):
75
79
  for next_name in to_run:
76
- util.debug(f"considering next_test with tags {finished_tags}: {next_name}")
80
+ logger.debug(f"considering next_test with tags {finished_tags}: {next_name}")
77
81
  next_tags = all_tests[next_name].get("tag", ())
78
82
  next_guest_tag = calculate_guest_tag(next_tags)
79
83
  if next_guest_tag and finished_guest_tag == next_guest_tag:
80
- util.debug(f"chosen next_test: {next_name}")
84
+ logger.debug(f"chosen next_test: {next_name}")
81
85
  return next_name
82
86
 
83
87
  # fallback to the default next_test()
@@ -108,7 +112,7 @@ class ContestOrchestrator(AdHocOrchestrator):
108
112
  remote_with_test = f"{info.remote}: '{info.test_name}'"
109
113
 
110
114
  reruns_left = self.reruns[info.test_name]
111
- util.info(f"{remote_with_test}: {reruns_left} reruns left")
115
+ logger.info(f"{remote_with_test}: {reruns_left} reruns left")
112
116
  if reruns_left > 0:
113
117
  self.reruns[info.test_name] -= 1
114
118
  return True
@@ -1,3 +1,4 @@
1
+ import abc as _abc
1
2
  import importlib as _importlib
2
3
  import pkgutil as _pkgutil
3
4
 
@@ -10,20 +11,27 @@ class Remote(_connection.Connection):
10
11
  a Connection-like API in addition to system management helpers.
11
12
 
12
13
  An instance of Remote is typically prepared by a Provisioner and returned
13
- to the caller for use and an eventual .release().
14
+ to the caller for use and an eventual `.release()`.
14
15
 
15
16
  Also note that Remote can be used via Context Manager, but does not
16
- do automatic .release(), the manager only handles the built-in Connection.
17
+ do automatic `.release()`, the manager only handles the built-in Connection.
17
18
  The intention is for a Provisioner to run via its own Contest Manager and
18
19
  release all Remotes upon exit.
19
- If you need automatic release of one Remote, use a try/finally block.
20
+
21
+ If you need automatic release of one Remote, use a try/finally block, ie.
22
+
23
+ try:
24
+ remote.cmd(...)
25
+ ...
26
+ finally:
27
+ remote.release()
20
28
  """
21
29
 
30
+ @_abc.abstractmethod
22
31
  def release(self):
23
32
  """
24
33
  Release (de-provision) the remote resource.
25
34
  """
26
- raise NotImplementedError(f"'release' not implemented for {self.__class__.__name__}")
27
35
 
28
36
 
29
37
  class Provisioner:
@@ -31,12 +39,13 @@ class Provisioner:
31
39
  A remote resource (machine/system) provider.
32
40
 
33
41
  The idea is to request machines (a.k.a. Remotes, or class Remote instances)
34
- to be reserved via a non-blocking .provision() and for them to be retrieved
35
- through blocking / non-blocking .get_remote() when they become available.
42
+ to be reserved via a non-blocking `.provision()` and for them to be
43
+ retrieved through blocking / non-blocking `.get_remote()` when they
44
+ become available.
36
45
 
37
- Each Remote has its own .release() for freeing (de-provisioning) it once
46
+ Each Remote has its own `.release()` for freeing (de-provisioning) it once
38
47
  the user doesn't need it anymore. The Provisioner does this automatically
39
- to all Remotes during .stop() or context manager exit.
48
+ to all Remotes during `.stop()` or Context Manager exit.
40
49
 
41
50
  p = Provisioner()
42
51
  p.start()
@@ -52,42 +61,42 @@ class Provisioner:
52
61
  remote2 = p.get_remote()
53
62
  ...
54
63
 
55
- Note that .provision() is a hint expressed by the caller, not a guarantee
56
- that .get_remote() will ever return a Remote. Ie. the caller can call
57
- .provision(count=math.inf) to receive as many remotes as the Provisioner
64
+ Note that `.provision()` is a hint expressed by the caller, not a guarantee
65
+ that `.get_remote()` will ever return a Remote. Ie. the caller can call
66
+ `.provision(count=math.inf)` to receive as many remotes as the Provisioner
58
67
  can possibly supply.
59
68
  """
60
69
 
70
+ @_abc.abstractmethod
61
71
  def provision(self, count=1):
62
72
  """
63
- Request that 'count' machines be provisioned (reserved) for use,
64
- to be returned at a later point by .get_remote().
73
+ Request that `count` machines be provisioned (reserved) for use,
74
+ to be returned at a later point by `.get_remote()`.
65
75
  """
66
- raise NotImplementedError(f"'provision' not implemented for {self.__class__.__name__}")
67
76
 
77
+ @_abc.abstractmethod
68
78
  def get_remote(self, block=True):
69
79
  """
70
- Return a connected class Remote instance of a previously .provision()ed
71
- remote system.
80
+ Return a connected class Remote instance of a previously
81
+ `.provision()`ed remote system.
72
82
 
73
- If 'block' is True, wait for the Remote to be available and connected,
74
- otherwise return None if there is none available yet.
83
+ - If `block` is True, wait for the Remote to be available and connected,
84
+ otherwise return None if there is none available yet.
75
85
  """
76
- raise NotImplementedError(f"'get_remote' not implemented for {self.__class__.__name__}")
77
86
 
87
+ @_abc.abstractmethod
78
88
  def start(self):
79
89
  """
80
90
  Start the Provisioner instance, start any provisioning-related
81
91
  processes that lead to systems being reserved.
82
92
  """
83
- raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
84
93
 
94
+ @_abc.abstractmethod
85
95
  def stop(self):
86
96
  """
87
97
  Stop the Provisioner instance, freeing all reserved resources,
88
- calling .release() on all Remote instances that were created.
98
+ calling `.release()` on all Remote instances that were created.
89
99
  """
90
- raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
91
100
 
92
101
  def __enter__(self):
93
102
  try: