atex 0.9__py3-none-any.whl → 0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. atex/aggregator/__init__.py +60 -0
  2. atex/{orchestrator/aggregator.py → aggregator/json.py} +6 -21
  3. atex/cli/__init__.py +11 -1
  4. atex/cli/libvirt.py +3 -2
  5. atex/cli/testingfarm.py +48 -3
  6. atex/connection/podman.py +2 -4
  7. atex/connection/ssh.py +7 -14
  8. atex/executor/executor.py +18 -17
  9. atex/executor/scripts.py +5 -3
  10. atex/executor/testcontrol.py +1 -1
  11. atex/orchestrator/__init__.py +76 -3
  12. atex/orchestrator/{orchestrator.py → adhoc.py} +183 -103
  13. atex/{provision → provisioner}/__init__.py +49 -37
  14. atex/{provision → provisioner}/libvirt/libvirt.py +21 -14
  15. atex/{provision → provisioner}/libvirt/locking.py +3 -1
  16. atex/provisioner/podman/__init__.py +2 -0
  17. atex/provisioner/podman/podman.py +169 -0
  18. atex/{provision → provisioner}/testingfarm/api.py +53 -44
  19. atex/{provision → provisioner}/testingfarm/testingfarm.py +17 -23
  20. atex/util/log.py +62 -67
  21. atex/util/subprocess.py +46 -12
  22. atex/util/threads.py +7 -0
  23. atex-0.10.dist-info/METADATA +86 -0
  24. atex-0.10.dist-info/RECORD +44 -0
  25. atex/provision/podman/__init__.py +0 -1
  26. atex/provision/podman/podman.py +0 -274
  27. atex-0.9.dist-info/METADATA +0 -178
  28. atex-0.9.dist-info/RECORD +0 -43
  29. /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
  30. /atex/{provision → provisioner}/libvirt/__init__.py +0 -0
  31. /atex/{provision → provisioner}/libvirt/setup-libvirt.sh +0 -0
  32. /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
  33. {atex-0.9.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
  34. {atex-0.9.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
  35. {atex-0.9.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
@@ -0,0 +1,60 @@
1
+ import importlib as _importlib
2
+ import pkgutil as _pkgutil
3
+
4
+
5
+ class Aggregator:
6
+ """
7
+ TODO: generic description, not JSON-specific
8
+ """
9
+
10
+ def ingest(self, platform, test_name, results_file, files_dir):
11
+ """
12
+ Process 'results_file' (string/Path) for reported results and append
13
+ them to the overall aggregated line-JSON file, recursively copying over
14
+ the dir structure under 'files_dir' (string/Path) under the respective
15
+ platform and test name in the aggregated storage dir.
16
+ """
17
+ raise NotImplementedError(f"'ingest' not implemented for {self.__class__.__name__}")
18
+
19
+ def start(self):
20
+ """
21
+ Start the Aggregator instance, opening any files / allocating resources
22
+ as necessary.
23
+ """
24
+ raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
25
+
26
+ def stop(self):
27
+ """
28
+ Stop the Aggregator instance, freeing all allocated resources.
29
+ """
30
+ raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
31
+
32
+ def __enter__(self):
33
+ try:
34
+ self.start()
35
+ return self
36
+ except Exception:
37
+ self.close()
38
+ raise
39
+
40
+ def __exit__(self, exc_type, exc_value, traceback):
41
+ self.stop()
42
+
43
+
44
+ _submodules = [
45
+ info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
46
+ ]
47
+
48
+ __all__ = [*_submodules, Aggregator.__name__] # noqa: PLE0604
49
+
50
+
51
+ def __dir__():
52
+ return __all__
53
+
54
+
55
+ # lazily import submodules
56
+ def __getattr__(attr):
57
+ if attr in _submodules:
58
+ return _importlib.import_module(f".{attr}", __name__)
59
+ else:
60
+ raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")
@@ -4,8 +4,10 @@ import shutil
4
4
  import threading
5
5
  from pathlib import Path
6
6
 
7
+ from . import Aggregator
7
8
 
8
- class JSONAggregator:
9
+
10
+ class JSONAggregator(Aggregator):
9
11
  """
10
12
  Collects reported results as a GZIP-ed line-JSON and files (logs) from
11
13
  multiple test runs under a shared directory.
@@ -37,7 +39,7 @@ class JSONAggregator:
37
39
  self.json_file = Path(json_file)
38
40
  self.json_gzip_fobj = None
39
41
 
40
- def open(self):
42
+ def start(self):
41
43
  if self.json_file.exists():
42
44
  raise FileExistsError(f"{self.json_file} already exists")
43
45
  self.json_gzip_fobj = gzip.open(self.json_file, "wt", newline="\n")
@@ -46,29 +48,12 @@ class JSONAggregator:
46
48
  raise FileExistsError(f"{self.storage_dir} already exists")
47
49
  self.storage_dir.mkdir()
48
50
 
49
- def close(self):
51
+ def stop(self):
50
52
  if self.json_gzip_fobj:
51
53
  self.json_gzip_fobj.close()
52
54
  self.json_gzip_fobj = None
53
55
 
54
- def __enter__(self):
55
- try:
56
- self.open()
57
- return self
58
- except Exception:
59
- self.close()
60
- raise
61
-
62
- def __exit__(self, exc_type, exc_value, traceback):
63
- self.close()
64
-
65
56
  def ingest(self, platform, test_name, results_file, files_dir):
66
- """
67
- Process 'results_file' (string/Path) for reported results and append
68
- them to the overall aggregated line-JSON file, recursively copying over
69
- the dir structure under 'files_dir' (string/Path) under the respective
70
- platform and test name in the aggregated storage dir.
71
- """
72
57
  platform_dir = self.storage_dir / platform
73
58
  test_dir = platform_dir / test_name.lstrip("/")
74
59
  if test_dir.exists():
@@ -92,7 +77,7 @@ class JSONAggregator:
92
77
  platform,
93
78
  result_line["status"],
94
79
  test_name,
95
- result_line.get("name"),
80
+ result_line.get("name"), # subtest
96
81
  file_names,
97
82
  result_line.get("note"),
98
83
  )
atex/cli/__init__.py CHANGED
@@ -27,12 +27,18 @@ import pkgutil
27
27
  import argparse
28
28
  import logging
29
29
 
30
+ from .. import util
31
+
30
32
 
31
33
  def setup_logging(level):
34
+ if level <= util.EXTRADEBUG:
35
+ fmt = "%(asctime)s %(name)s: %(filename)s:%(lineno)s: %(funcName)s(): %(message)s"
36
+ else:
37
+ fmt = "%(asctime)s %(name)s: %(message)s"
32
38
  logging.basicConfig(
33
39
  level=level,
34
40
  stream=sys.stderr,
35
- format="%(asctime)s %(name)s: %(message)s",
41
+ format=fmt,
36
42
  datefmt="%Y-%m-%d %H:%M:%S",
37
43
  )
38
44
 
@@ -53,6 +59,10 @@ def main():
53
59
  "--debug", "-d", action="store_const", dest="loglevel", const=logging.DEBUG,
54
60
  help="enable extra debugging (logging.DEBUG)",
55
61
  )
62
+ log_grp.add_argument(
63
+ "--extra-debug", "-D", action="store_const", dest="loglevel", const=util.EXTRADEBUG,
64
+ help="enable extra debugging (atex.util.EXTRADEBUG)",
65
+ )
56
66
  log_grp.add_argument(
57
67
  "--quiet", "-q", action="store_const", dest="loglevel", const=logging.WARNING,
58
68
  help="be quiet during normal operation (logging.WARNING)",
atex/cli/libvirt.py CHANGED
@@ -1,9 +1,10 @@
1
1
  import sys
2
2
  import re
3
3
 
4
- import libvirt
4
+ from .. import util
5
+ from ..provisioner.libvirt import locking
5
6
 
6
- from ..provision.libvirt import locking
7
+ libvirt = util.import_libvirt()
7
8
 
8
9
 
9
10
  def _libvirt_open(url=None):
atex/cli/testingfarm.py CHANGED
@@ -1,9 +1,10 @@
1
1
  import sys
2
2
  import json
3
3
  import pprint
4
+ import collections
4
5
 
5
6
  from .. import util
6
- from ..provision.testingfarm import api as tf
7
+ from ..provisioner.testingfarm import api as tf
7
8
 
8
9
 
9
10
  def _get_api(args):
@@ -36,7 +37,6 @@ def composes(args):
36
37
  def get_request(args):
37
38
  api = _get_api(args)
38
39
  request = tf.Request(args.request_id, api=api)
39
- request.update()
40
40
  print(str(request))
41
41
 
42
42
 
@@ -79,6 +79,44 @@ def search_requests(args):
79
79
  print(f"{created} {req_id} : {envs_str}")
80
80
 
81
81
 
82
+ def stats(args):
83
+ api = _get_api(args)
84
+
85
+ def top_users_repos(requests):
86
+ tokens = collections.defaultdict(int)
87
+ repos = collections.defaultdict(int)
88
+ for req in requests:
89
+ tokens[req["token_id"]] += 1
90
+ if "fmf" in req["test"] and req["test"]["fmf"]:
91
+ repos[req["test"]["fmf"]["url"]] += 1
92
+ elif "tmt" in req["test"] and req["test"]["tmt"]:
93
+ repos[req["test"]["tmt"]["url"]] += 1
94
+
95
+ print("Top 10 token IDs:")
96
+ for token_id in sorted(tokens, key=lambda x: tokens[x], reverse=True)[:10]:
97
+ count = tokens[token_id]
98
+ print(f"{count:>5} {token_id}")
99
+
100
+ print("Top 10 repo URLs:")
101
+ for repo_url in sorted(repos, key=lambda x: repos[x], reverse=True)[:10]:
102
+ count = repos[repo_url]
103
+ print(f"{count:>5} {repo_url}")
104
+
105
+ def chain_without_none(*iterables):
106
+ for itr in iterables:
107
+ if itr is None:
108
+ continue
109
+ for item in itr:
110
+ if item is not None:
111
+ yield item
112
+
113
+ queued_and_running = chain_without_none(
114
+ api.search_requests(state="queued", ranch=args.ranch, mine=False),
115
+ api.search_requests(state="running", ranch=args.ranch, mine=False),
116
+ )
117
+ top_users_repos(queued_and_running)
118
+
119
+
82
120
  def reserve(args):
83
121
  util.info(f"Reserving {args.compose} on {args.arch} for {args.timeout} minutes")
84
122
 
@@ -106,7 +144,6 @@ def reserve(args):
106
144
  util.info(f"Got machine: {m}")
107
145
  while True:
108
146
  try:
109
- res.request.update()
110
147
  res.request.assert_alive()
111
148
  except tf.GoneAwayError as e:
112
149
  print(e)
@@ -198,6 +235,12 @@ def parse_args(parser):
198
235
  cmd.add_argument("--after", help="only requests created after ISO8601")
199
236
  cmd.add_argument("--json", help="full details, one request per line", action="store_true")
200
237
 
238
+ cmd = cmds.add_parser(
239
+ "stats",
240
+ help="print out TF usage statistics",
241
+ )
242
+ cmd.add_argument("ranch", help="Testing Farm ranch name")
243
+
201
244
  cmd = cmds.add_parser(
202
245
  "reserve",
203
246
  help="reserve a system and ssh into it",
@@ -233,6 +276,8 @@ def main(args):
233
276
  cancel(args)
234
277
  elif args._cmd in ("search-requests", "sr"):
235
278
  search_requests(args)
279
+ elif args._cmd == "stats":
280
+ stats(args)
236
281
  elif args._cmd == "reserve":
237
282
  reserve(args)
238
283
  elif args._cmd in ("watch-pipeline", "wp"):
atex/connection/podman.py CHANGED
@@ -8,11 +8,11 @@ from .. import util
8
8
  from . import Connection
9
9
 
10
10
 
11
- class PodmanConnError(ConnectionError):
11
+ class PodmanConnectionError(ConnectionError):
12
12
  pass
13
13
 
14
14
 
15
- class PodmanConn(Connection):
15
+ class PodmanConnection(Connection):
16
16
  """
17
17
  Implements the Connection API via 'podman container exec' on an
18
18
  already-running container, it does not handle any image pulling,
@@ -42,7 +42,6 @@ class PodmanConn(Connection):
42
42
  def cmd(self, command, *, func=util.subprocess_run, **func_args):
43
43
  return func(
44
44
  ("podman", "container", "exec", "-i", self.container, *command),
45
- skip_frames=1,
46
45
  **func_args,
47
46
  )
48
47
 
@@ -56,7 +55,6 @@ class PodmanConn(Connection):
56
55
  "-e", f"/bin/bash -c 'exec podman container exec -i {self.container} \"$@\"'",
57
56
  *args,
58
57
  ),
59
- skip_frames=1,
60
58
  check=True,
61
59
  stdin=subprocess.DEVNULL,
62
60
  **func_args,
atex/connection/ssh.py CHANGED
@@ -133,16 +133,16 @@ def _rsync_host_cmd(*args, options, password=None, sudo=None):
133
133
  )
134
134
 
135
135
 
136
- class StatelessSSHConn(Connection):
136
+ class StatelessSSHConnection(Connection):
137
137
  """
138
138
  Implements the Connection API using a ssh(1) client using "standalone"
139
139
  (stateless) logic - connect() and disconnect() are no-op, .cmd() simply
140
140
  executes the ssh client and .rsync() executes 'rsync -e ssh'.
141
141
 
142
- Compared to ManagedSSHConn, this may be slow for many .cmd() calls,
142
+ Compared to ManagedSSHConnection, this may be slow for many .cmd() calls,
143
143
  but every call is stateless, there is no persistent connection.
144
144
 
145
- If you need only one .cmd(), this will be faster than ManagedSSHConn.
145
+ If you need only one .cmd(), this will be faster than ManagedSSHConnection.
146
146
  """
147
147
 
148
148
  def __init__(self, options, *, password=None, sudo=None):
@@ -182,7 +182,6 @@ class StatelessSSHConn(Connection):
182
182
  unified_options["RemoteCommand"] = _shell_cmd(command, sudo=self.sudo)
183
183
  return func(
184
184
  _options_to_ssh(unified_options, password=self.password),
185
- skip_frames=1,
186
185
  **func_args,
187
186
  )
188
187
 
@@ -197,7 +196,6 @@ class StatelessSSHConn(Connection):
197
196
  password=self.password,
198
197
  sudo=self.sudo,
199
198
  ),
200
- skip_frames=1,
201
199
  check=True,
202
200
  stdin=subprocess.DEVNULL,
203
201
  **func_args,
@@ -216,17 +214,15 @@ class StatelessSSHConn(Connection):
216
214
  # checks .assert_master() and manually signals the running clients
217
215
  # when it gets DisconnectedError from it.
218
216
 
219
- class ManagedSSHConn(Connection):
217
+ class ManagedSSHConnection(Connection):
220
218
  """
221
219
  Implements the Connection API using one persistently-running ssh(1) client
222
220
  started in a 'ControlMaster' mode, with additional ssh clients using that
223
221
  session to execute remote commands. Similarly, .rsync() uses it too.
224
222
 
225
- This is much faster than StatelessSSHConn when executing multiple commands,
226
- but contains a complex internal state (what if ControlMaster disconnects?).
227
-
228
- Hence why this implementation provides extra non-standard-Connection methods
229
- to manage this complexity.
223
+ This is much faster than StatelessSSHConnection when executing multiple
224
+ commands, but contains a complex internal state (what if ControlMaster
225
+ disconnects?).
230
226
  """
231
227
 
232
228
  # TODO: thread safety and locking via self.lock ?
@@ -351,7 +347,6 @@ class ManagedSSHConn(Connection):
351
347
  action = "forward" if not cancel else "cancel"
352
348
  util.subprocess_run(
353
349
  _options_to_ssh(options, extra_cli_flags=("-O", action)),
354
- skip_frames=1,
355
350
  check=True,
356
351
  )
357
352
 
@@ -365,7 +360,6 @@ class ManagedSSHConn(Connection):
365
360
  unified_options["ControlPath"] = self._tmpdir / "control.sock"
366
361
  return func(
367
362
  _options_to_ssh(unified_options),
368
- skip_frames=1,
369
363
  **func_args,
370
364
  )
371
365
 
@@ -381,7 +375,6 @@ class ManagedSSHConn(Connection):
381
375
  options=unified_options,
382
376
  sudo=self.sudo,
383
377
  ),
384
- skip_frames=1,
385
378
  check=True,
386
379
  stdin=subprocess.DEVNULL,
387
380
  **func_args,
atex/executor/executor.py CHANGED
@@ -26,9 +26,9 @@ class Executor:
26
26
  and uploaded files by those tests.
27
27
 
28
28
  tests_repo = "path/to/cloned/tests"
29
- tests_data = atex.fmf.FMFTests(tests_repo, "/plans/default")
29
+ fmf_tests = atex.fmf.FMFTests(tests_repo, "/plans/default")
30
30
 
31
- with Executor(tests_data, conn) as e:
31
+ with Executor(fmf_tests, conn) as e:
32
32
  e.upload_tests()
33
33
  e.plan_prepare()
34
34
  Path("output_here").mkdir()
@@ -42,12 +42,12 @@ class Executor:
42
42
 
43
43
  conn.cmd(["mkdir", "-p", "/shared"])
44
44
 
45
- with Executor(tests_data, conn, state_dir="/shared") as e:
45
+ with Executor(fmf_tests, conn, state_dir="/shared") as e:
46
46
  e.upload_tests()
47
47
  e.plan_prepare()
48
48
 
49
49
  # in parallel (ie. threading or multiprocessing)
50
- with Executor(tests_data, unique_conn, state_dir="/shared") as e:
50
+ with Executor(fmf_tests, unique_conn, state_dir="/shared") as e:
51
51
  e.run_test(...)
52
52
  """
53
53
 
@@ -74,7 +74,7 @@ class Executor:
74
74
  self.plan_env_file = None
75
75
  self.cancelled = False
76
76
 
77
- def setup(self):
77
+ def start(self):
78
78
  with self.lock:
79
79
  state_dir = self.state_dir
80
80
 
@@ -107,7 +107,7 @@ class Executor:
107
107
  # create / truncate the TMT_PLAN_ENVIRONMENT_FILE
108
108
  self.conn.cmd(("truncate", "-s", "0", self.plan_env_file), check=True)
109
109
 
110
- def cleanup(self):
110
+ def stop(self):
111
111
  with self.lock:
112
112
  work_dir = self.work_dir
113
113
 
@@ -121,14 +121,14 @@ class Executor:
121
121
 
122
122
  def __enter__(self):
123
123
  try:
124
- self.setup()
124
+ self.start()
125
125
  return self
126
126
  except Exception:
127
- self.cleanup()
127
+ self.stop()
128
128
  raise
129
129
 
130
130
  def __exit__(self, exc_type, exc_value, traceback):
131
- self.cleanup()
131
+ self.stop()
132
132
 
133
133
  def cancel(self):
134
134
  with self.lock:
@@ -140,10 +140,10 @@ class Executor:
140
140
  __init__() inside 'fmf_tests', to the remote host.
141
141
  """
142
142
  self.conn.rsync(
143
- "-rv" if util.in_debug_mode() else "-rq",
144
- "--delete", "--exclude=.git/",
143
+ "-r", "--delete", "--exclude=.git/",
145
144
  f"{self.fmf_tests.root}/",
146
145
  f"remote:{self.tests_dir}",
146
+ func=util.subprocess_log,
147
147
  )
148
148
 
149
149
  def _run_prepare_scripts(self, scripts):
@@ -158,11 +158,10 @@ class Executor:
158
158
  for script in scripts:
159
159
  self.conn.cmd(
160
160
  ("env", *env_args, "bash"),
161
+ func=util.subprocess_log,
162
+ stderr=subprocess.STDOUT,
161
163
  input=script,
162
- text=True,
163
164
  check=True,
164
- stdout=None if util.in_debug_mode() else subprocess.DEVNULL,
165
- stderr=subprocess.STDOUT,
166
165
  )
167
166
 
168
167
  def plan_prepare(self):
@@ -180,9 +179,9 @@ class Executor:
180
179
  "dnf", "-y", "--setopt=install_weak_deps=False",
181
180
  "install", *self.fmf_tests.prepare_pkgs,
182
181
  ),
183
- check=True,
184
- stdout=None if util.in_debug_mode() else subprocess.DEVNULL,
182
+ func=util.subprocess_log,
185
183
  stderr=subprocess.STDOUT,
184
+ check=True,
186
185
  )
187
186
 
188
187
  # run 'prepare' scripts from the plan
@@ -336,7 +335,9 @@ class Executor:
336
335
  reconnects += 1
337
336
  state = self.State.STARTING_TEST
338
337
  except BlockingIOError:
339
- pass
338
+ # avoid 100% CPU spinning if the connection it too slow
339
+ # to come up (ie. ssh ControlMaster socket file not created)
340
+ time.sleep(0.5)
340
341
  except ConnectionError:
341
342
  # can happen when ie. ssh is connecting over a LocalForward port,
342
343
  # causing 'read: Connection reset by peer' instead of timeout
atex/executor/scripts.py CHANGED
@@ -1,7 +1,9 @@
1
+ import os
1
2
  import collections
2
- import yaml
3
3
  from pathlib import Path
4
4
 
5
+ import yaml
6
+
5
7
  from .. import util, fmf
6
8
 
7
9
  # name: fmf path to the test as string, ie. /some/test
@@ -50,7 +52,7 @@ def test_wrapper(*, test, tests_dir, test_exec):
50
52
  # doing it here avoids unnecessary traffic (reading stdin) via ssh,
51
53
  # even if it is fed from subprocess.DEVNULL on the runner
52
54
 
53
- if util.in_debug_mode():
55
+ if os.environ.get("ATEX_DEBUG_TEST") == "1":
54
56
  out += "set -x\n"
55
57
 
56
58
  # use a subshell to limit the scope of the CWD change
@@ -122,7 +124,7 @@ def test_setup(*, test, wrapper_exec, test_exec, test_yaml, **kwargs):
122
124
  """
123
125
  out = "#!/bin/bash\n"
124
126
 
125
- if util.in_debug_mode():
127
+ if os.environ.get("ATEX_DEBUG_TEST") == "1":
126
128
  out += "set -xe\n"
127
129
  else:
128
130
  out += "exec 1>/dev/null\n"
@@ -156,7 +156,7 @@ class TestControl:
156
156
  except BufferFullError as e:
157
157
  raise BadControlError(str(e)) from None
158
158
 
159
- util.debug(f"got control line: {line} // eof: {self.stream.eof}")
159
+ util.extradebug(f"control line: {line} // eof: {self.stream.eof}")
160
160
 
161
161
  if self.stream.eof:
162
162
  self.eof = True
@@ -1,3 +1,76 @@
1
- #from .aggregator import CSVAggregator, JSONAggregator # noqa: F401
2
- from .aggregator import JSONAggregator # noqa: F401
3
- from .orchestrator import Orchestrator, OrchestratorError, FailedSetupError # noqa: F401
1
+ import importlib as _importlib
2
+ import pkgutil as _pkgutil
3
+ import time as _time
4
+
5
+
6
+ class OrchestratorError(Exception):
7
+ pass
8
+
9
+
10
+ class Orchestrator:
11
+ """
12
+ A scheduler for parallel execution on multiple resources (machines/systems).
13
+
14
+ TODO: more description
15
+ """
16
+
17
+ def serve_once(self):
18
+ """
19
+ Run the orchestration logic, processing any outstanding requests
20
+ (for provisioning, new test execution, etc.) and returning once these
21
+ are taken care of.
22
+
23
+ Returns True to indicate that it should be called again by the user
24
+ (more work to be done), False once all testing is concluded.
25
+ """
26
+ raise NotImplementedError(f"'serve_once' not implemented for {self.__class__.__name__}")
27
+
28
+ def serve_forever(self):
29
+ """
30
+ Run the orchestration logic, blocking until all testing is concluded.
31
+ """
32
+ while self.serve_once():
33
+ _time.sleep(1)
34
+
35
+ def start(self):
36
+ """
37
+ Start the Orchestrator instance, opening any files / allocating
38
+ resources as necessary.
39
+ """
40
+ raise NotImplementedError(f"'start' not implemented for {self.__class__.__name__}")
41
+
42
+ def stop(self):
43
+ """
44
+ Stop the Orchestrator instance, freeing all allocated resources.
45
+ """
46
+ raise NotImplementedError(f"'stop' not implemented for {self.__class__.__name__}")
47
+
48
+ def __enter__(self):
49
+ try:
50
+ self.start()
51
+ return self
52
+ except Exception:
53
+ self.stop()
54
+ raise
55
+
56
+ def __exit__(self, exc_type, exc_value, traceback):
57
+ self.stop()
58
+
59
+
60
+ _submodules = [
61
+ info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
62
+ ]
63
+
64
+ __all__ = [*_submodules, Orchestrator.__name__] # noqa: PLE0604
65
+
66
+
67
+ def __dir__():
68
+ return __all__
69
+
70
+
71
+ # lazily import submodules
72
+ def __getattr__(attr):
73
+ if attr in _submodules:
74
+ return _importlib.import_module(f".{attr}", __name__)
75
+ else:
76
+ raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")