atex 0.7__py3-none-any.whl → 0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
atex/cli/fmf.py ADDED
@@ -0,0 +1,93 @@
1
+ import sys
2
+ import pprint
3
+
4
+ from .. import fmf
5
+
6
+
7
+ def _fatal(msg):
8
+ print(msg, file=sys.stderr)
9
+ sys.exit(1)
10
+
11
+
12
+ def _get_context(args):
13
+ context = {}
14
+ if args.context:
15
+ for c in args.context:
16
+ key, value = c.split("=", 1)
17
+ context[key] = value
18
+ return context or None
19
+
20
+
21
+ def discover(args):
22
+ result = fmf.FMFTests(args.root, args.plan, context=_get_context(args))
23
+ for name in result.tests:
24
+ print(name)
25
+
26
+
27
+ def show(args):
28
+ result = fmf.FMFTests(args.root, args.plan, context=_get_context(args))
29
+ if tests := list(result.match(args.test)):
30
+ for test in tests:
31
+ print(f"\n--- {test.name} ---")
32
+ pprint.pprint(test.data)
33
+ else:
34
+ _fatal(f"Not reachable via {args.plan} discovery: {args.test}")
35
+
36
+
37
+ def prepare(args):
38
+ result = fmf.FMFTests(args.root, args.plan, context=_get_context(args))
39
+ print("--- fmf root ---")
40
+ print(str(result.root))
41
+ print("--- prepare packages ---")
42
+ print("\n".join(result.prepare_pkgs))
43
+ print("--- plan environment ---")
44
+ print("\n".join("{k}={v}" for k,v in result.plan_env))
45
+ for script in result.prepare_scripts:
46
+ print("--- prepare script ---")
47
+ print(script)
48
+ print("----------------------")
49
+
50
+
51
+ def parse_args(parser):
52
+ parser.add_argument("--root", help="path to directory with fmf tests", default=".")
53
+ parser.add_argument("--context", "-c", help="tmt style key=value context", action="append")
54
+ cmds = parser.add_subparsers(
55
+ dest="_cmd", help="executor feature", metavar="<cmd>", required=True,
56
+ )
57
+
58
+ cmd = cmds.add_parser(
59
+ "discover", aliases=("di",),
60
+ help="list tests, post-processed by tmt plans",
61
+ )
62
+ cmd.add_argument("plan", help="tmt plan to use for discovery")
63
+
64
+ cmd = cmds.add_parser(
65
+ "show",
66
+ help="show fmf data of a test",
67
+ )
68
+ cmd.add_argument("plan", help="tmt plan to use for discovery")
69
+ cmd.add_argument("test", help="fmf style test regex")
70
+
71
+ cmd = cmds.add_parser(
72
+ "prepare",
73
+ help="show prepare-related FMFTests details",
74
+ )
75
+ cmd.add_argument("plan", help="tmt plan to parse")
76
+
77
+
78
+ def main(args):
79
+ if args._cmd in ("discover", "di"):
80
+ discover(args)
81
+ elif args._cmd == "show":
82
+ show(args)
83
+ elif args._cmd == "prepare":
84
+ prepare(args)
85
+ else:
86
+ raise RuntimeError(f"unknown args: {args}")
87
+
88
+
89
+ CLI_SPEC = {
90
+ "help": "simple CLI interface to atex.fmf",
91
+ "args": parse_args,
92
+ "main": main,
93
+ }
atex/cli/testingfarm.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import sys
2
+ import json
2
3
  import pprint
3
4
 
4
5
  from .. import util
@@ -49,6 +50,8 @@ def search_requests(args):
49
50
  reply = api.search_requests(
50
51
  state=args.state,
51
52
  mine=not args.all,
53
+ user_id=args.user_id,
54
+ token_id=args.token_id,
52
55
  ranch=args.ranch,
53
56
  created_before=args.before,
54
57
  created_after=args.after,
@@ -56,20 +59,24 @@ def search_requests(args):
56
59
  if not reply:
57
60
  return
58
61
 
59
- for req in sorted(reply, key=lambda x: x["created"]):
60
- req_id = req["id"]
61
- created = req["created"].partition(".")[0]
62
+ if args.json:
63
+ for req in sorted(reply, key=lambda x: x["created"]):
64
+ print(json.dumps(req))
65
+ else:
66
+ for req in sorted(reply, key=lambda x: x["created"]):
67
+ req_id = req["id"]
68
+ created = req["created"].partition(".")[0]
62
69
 
63
- envs = []
64
- for env in req["environments_requested"]:
65
- if "os" in env and env["os"] and "compose" in env["os"]:
66
- compose = env["os"]["compose"]
67
- arch = env["arch"]
68
- if compose and arch:
69
- envs.append(f"{compose}@{arch}")
70
- envs_str = ", ".join(envs)
70
+ envs = []
71
+ for env in req["environments_requested"]:
72
+ if "os" in env and env["os"] and "compose" in env["os"]:
73
+ compose = env["os"]["compose"]
74
+ arch = env["arch"]
75
+ if compose and arch:
76
+ envs.append(f"{compose}@{arch}")
77
+ envs_str = ", ".join(envs)
71
78
 
72
- print(f"{created} {req_id} : {envs_str}")
79
+ print(f"{created} {req_id} : {envs_str}")
73
80
 
74
81
 
75
82
  def reserve(args):
@@ -177,9 +184,12 @@ def parse_args(parser):
177
184
  )
178
185
  cmd.add_argument("--state", help="request state (running, etc.)", required=True)
179
186
  cmd.add_argument("--all", help="all requests, not just owned by token", action="store_true")
180
- cmd.add_argument("--ranch", help="Testing Farm ranch")
187
+ cmd.add_argument("--ranch", help="Testing Farm ranch (detected from token)")
188
+ cmd.add_argument("--user-id", help="'user_id' request field (detected from token)")
189
+ cmd.add_argument("--token-id", help="'token_id' request field (detected from token)")
181
190
  cmd.add_argument("--before", help="only requests created before ISO8601")
182
191
  cmd.add_argument("--after", help="only requests created after ISO8601")
192
+ cmd.add_argument("--json", help="full details, one request per line", action="store_true")
183
193
 
184
194
  cmd = cmds.add_parser(
185
195
  "reserve",
@@ -65,14 +65,6 @@ class Connection:
65
65
  """
66
66
  raise NotImplementedError(f"'disconnect' not implemented for {self.__class__.__name__}")
67
67
 
68
- # TODO: is this needed? .. we probably want Remote.alive() instead
69
- #def alive(self):
70
- # """
71
- # Return True if the connection was established and is active,
72
- # False otherwise.
73
- # """
74
- # raise NotImplementedError(f"'alive' not implemented for {self.__class__.__name__}")
75
-
76
68
  def cmd(self, command, func=_util.subprocess_run, **func_args):
77
69
  """
78
70
  Execute a single command on the remote, using subprocess-like semantics.
atex/connection/ssh.py CHANGED
@@ -68,7 +68,7 @@ def _shell_cmd(command, sudo=None):
68
68
  """
69
69
  Make a command line for running 'command' on the target system.
70
70
  """
71
- quoted_args = (shlex.quote(arg) for arg in command)
71
+ quoted_args = (shlex.quote(str(arg)) for arg in command)
72
72
  if sudo:
73
73
  return " ".join((
74
74
  "exec", "sudo", "--no-update", "--non-interactive", "--user", sudo, "--", *quoted_args,
@@ -167,15 +167,12 @@ class StatelessSSHConn(Connection):
167
167
  Optional, .cmd() and .rsync() work without it, but it is provided here
168
168
  for compatibility with the Connection API.
169
169
  """
170
- # TODO: just wait until .cmd(['true']) starts responding
170
+ # TODO: just wait until .cmd(['true']) starts responding ?
171
171
  pass
172
172
 
173
173
  def disconnect(self):
174
174
  pass
175
175
 
176
- # def alive(self):
177
- # return True
178
-
179
176
  # have options as kwarg to be compatible with other functions here
180
177
  def cmd(self, command, options=None, func=util.subprocess_run, **func_args):
181
178
  unified_options = self.options.copy()
@@ -231,7 +228,7 @@ class ManagedSSHConn(Connection):
231
228
  to manage this complexity.
232
229
  """
233
230
 
234
- # TODO: thread safety and locking via self.lock
231
+ # TODO: thread safety and locking via self.lock ?
235
232
 
236
233
  def __init__(self, options, *, password=None, sudo=None):
237
234
  """
@@ -251,12 +248,6 @@ class ManagedSSHConn(Connection):
251
248
  self._tmpdir = None
252
249
  self._master_proc = None
253
250
 
254
- # def __copy__(self):
255
- # return type(self)(self.options, password=self.password)
256
- #
257
- # def copy(self):
258
- # return self.__copy__()
259
-
260
251
  def assert_master(self):
261
252
  proc = self._master_proc
262
253
  if not proc:
@@ -272,13 +263,6 @@ class ManagedSSHConn(Connection):
272
263
  f"SSH ControlMaster on {self._tmpdir} exited with {code}{out}",
273
264
  )
274
265
 
275
- # def alive(self):
276
- # try:
277
- # self.assert_master()
278
- # return True
279
- # except (NotConnectedError, DisconnectedError):
280
- # return False
281
-
282
266
  def disconnect(self):
283
267
  proc = self._master_proc
284
268
  if not proc:
@@ -0,0 +1,2 @@
1
+ from . import testcontrol # noqa: F401
2
+ from .executor import Executor # noqa: F401
@@ -0,0 +1,60 @@
1
+ import re
2
+ import time
3
+
4
+
5
+ class Duration:
6
+ """
7
+ A helper for parsing, keeping and manipulating test run time based on
8
+ FMF-defined 'duration' attribute.
9
+ """
10
+
11
+ def __init__(self, fmf_duration):
12
+ """
13
+ 'fmf_duration' is the string specified as 'duration' in FMF metadata.
14
+ """
15
+ duration = self._fmf_to_seconds(fmf_duration)
16
+ self.end = time.monotonic() + duration
17
+ # keep track of only the first 'save' and the last 'restore',
18
+ # ignore any nested ones (as tracked by '_count')
19
+ self.saved = None
20
+ self.saved_count = 0
21
+
22
+ @staticmethod
23
+ def _fmf_to_seconds(string):
24
+ match = re.fullmatch(r"([0-9]+)([a-z]*)", string)
25
+ if not match:
26
+ raise RuntimeError(f"'duration' has invalid format: {string}")
27
+ length, unit = match.groups()
28
+ if unit == "m":
29
+ return int(length)*60
30
+ elif unit == "h":
31
+ return int(length)*60*60
32
+ elif unit == "d":
33
+ return int(length)*60*60*24
34
+ else:
35
+ return int(length)
36
+
37
+ def set(self, to):
38
+ self.end = time.monotonic() + self._fmf_to_seconds(to)
39
+
40
+ def increment(self, by):
41
+ self.end += self._fmf_to_seconds(by)
42
+
43
+ def decrement(self, by):
44
+ self.end -= self._fmf_to_seconds(by)
45
+
46
+ def save(self):
47
+ if self.saved_count == 0:
48
+ self.saved = self.end - time.monotonic()
49
+ self.saved_count += 1
50
+
51
+ def restore(self):
52
+ if self.saved_count > 1:
53
+ self.saved_count -= 1
54
+ elif self.saved_count == 1:
55
+ self.end = time.monotonic() + self.saved
56
+ self.saved_count = 0
57
+ self.saved = None
58
+
59
+ def out_of_time(self):
60
+ return time.monotonic() > self.end
@@ -0,0 +1,378 @@
1
+ import os
2
+ import select
3
+ import threading
4
+ import contextlib
5
+ import subprocess
6
+ from pathlib import Path
7
+
8
+ from .. import util, fmf
9
+ from . import testcontrol, scripts
10
+ from .duration import Duration
11
+ from .reporter import Reporter
12
+
13
+
14
+ class TestAbortedError(Exception):
15
+ """
16
+ Raised when an infrastructure-related issue happened while running a test.
17
+ """
18
+ pass
19
+
20
+
21
+ class Executor:
22
+ """
23
+ Logic for running tests on a remote system and processing results
24
+ and uploaded files by those tests.
25
+
26
+ tests_repo = "path/to/cloned/tests"
27
+ tests_data = atex.fmf.FMFTests(tests_repo, "/plans/default")
28
+
29
+ with Executor(tests_data, conn) as e:
30
+ e.upload_tests(tests_repo)
31
+ e.setup_plan()
32
+ e.run_test("/some/test", "results/here.json", "uploaded/files/here")
33
+ e.run_test(...)
34
+
35
+ One Executor instance may be used to run multiple tests sequentially.
36
+ In addition, multiple Executor instances can run in parallel on the same
37
+ host, provided each receives a unique class Connection instance to it.
38
+
39
+ conn.cmd(["mkdir", "-p", "/shared"])
40
+
41
+ with Executor(tests_data, conn, state_dir="/shared") as e:
42
+ e.upload_tests(tests_repo)
43
+ e.setup_plan()
44
+
45
+ # in parallel (ie. threading or multiprocessing)
46
+ with Executor(tests_data, unique_conn, state_dir="/shared") as e:
47
+ e.run_test(...)
48
+ """
49
+
50
+ def __init__(self, fmf_tests, connection, *, state_dir=None):
51
+ """
52
+ 'fmf_tests' is a class FMFTests instance with (discovered) tests.
53
+
54
+ 'connection' is a class Connection instance, already fully connected.
55
+
56
+ 'state_dir' is a string or Path specifying path on the remote system for
57
+ storing additional data, such as tests, execution wrappers, temporary
58
+ plan-exported variables, etc. If left as None, a tmpdir is used.
59
+ """
60
+ self.lock = threading.RLock()
61
+ self.conn = connection
62
+ self.fmf_tests = fmf_tests
63
+ self.state_dir = state_dir
64
+ self.work_dir = None
65
+ self.tests_dir = None
66
+ self.plan_env_file = None
67
+ self.cancelled = False
68
+
69
+ def setup(self):
70
+ with self.lock:
71
+ state_dir = self.state_dir
72
+
73
+ # if user defined a state dir, have shared tests, but use per-instance
74
+ # work_dir for test wrappers, etc., identified by this instance's id(),
75
+ # which should be unique as long as this instance exists
76
+ if state_dir:
77
+ state_dir = Path(state_dir)
78
+ work_dir = state_dir / f"atex-{id(self)}"
79
+ self.conn.cmd(("mkdir", work_dir), check=True)
80
+ with self.lock:
81
+ self.tests_dir = state_dir / "tests"
82
+ self.plan_env_file = state_dir / "plan_env"
83
+ self.work_dir = work_dir
84
+
85
+ # else just create a tmpdir
86
+ else:
87
+ tmp_dir = self.conn.cmd(
88
+ # /var is not cleaned up by bootc, /var/tmp is
89
+ ("mktemp", "-d", "-p", "/var", "atex-XXXXXXXXXX"),
90
+ func=util.subprocess_output,
91
+ )
92
+ tmp_dir = Path(tmp_dir)
93
+ with self.lock:
94
+ self.tests_dir = tmp_dir / "tests"
95
+ self.plan_env_file = tmp_dir / "plan_env"
96
+ # use the tmpdir as work_dir, avoid extra mkdir over conn
97
+ self.work_dir = tmp_dir
98
+
99
+ def cleanup(self):
100
+ with self.lock:
101
+ work_dir = self.work_dir
102
+
103
+ if work_dir:
104
+ self.conn.cmd(("rm", "-rf", work_dir), check=True)
105
+
106
+ with self.lock:
107
+ self.work_dir = None
108
+ self.tests_dir = None
109
+ self.plan_env_file = None
110
+
111
+ def __enter__(self):
112
+ self.setup()
113
+ return self
114
+
115
+ def __exit__(self, exc_type, exc_value, traceback):
116
+ self.cleanup()
117
+
118
+ def cancel(self):
119
+ with self.lock:
120
+ self.cancelled = True
121
+
122
+ def upload_tests(self):
123
+ """
124
+ Upload a directory of all tests, the location of which was provided to
125
+ __init__() inside 'fmf_tests', to the remote host.
126
+ """
127
+ self.conn.rsync(
128
+ "-rv" if util.in_debug_mode() else "-rq",
129
+ "--delete", "--exclude=.git/",
130
+ f"{self.fmf_tests.root}/",
131
+ f"remote:{self.tests_dir}",
132
+ )
133
+
134
+ def setup_plan(self):
135
+ """
136
+ Install packages and run scripts extracted from a TMT plan by a FMFTests
137
+ instance given during class initialization.
138
+
139
+ Also prepare additional environment for tests, ie. create and export
140
+ a path to TMT_PLAN_ENVIRONMENT_FILE.
141
+ """
142
+ # install packages from the plan
143
+ if self.fmf_tests.prepare_pkgs:
144
+ self.conn.cmd(
145
+ (
146
+ "dnf", "-y", "--setopt=install_weak_deps=False",
147
+ "install", *self.fmf_tests.prepare_pkgs,
148
+ ),
149
+ check=True,
150
+ stdout=None if util.in_debug_mode() else subprocess.DEVNULL,
151
+ stderr=subprocess.STDOUT,
152
+ )
153
+
154
+ # make envionment for 'prepare' scripts
155
+ self.conn.cmd(("truncate", "-s", "0", self.plan_env_file), check=True)
156
+ env = self.fmf_tests.plan_env.copy()
157
+ env["TMT_PLAN_ENVIRONMENT_FILE"] = self.plan_env_file
158
+ env_args = (f"{k}={v}" for k, v in env.items())
159
+
160
+ # run the prepare scripts
161
+ for script in self.fmf_tests.prepare_scripts:
162
+ self.conn.cmd(
163
+ ("env", *env_args, "bash"),
164
+ input=script,
165
+ text=True,
166
+ check=True,
167
+ stdout=None if util.in_debug_mode() else subprocess.DEVNULL,
168
+ stderr=subprocess.STDOUT,
169
+ )
170
+
171
+ def run_test(self, test_name, json_file, files_dir, *, env=None):
172
+ """
173
+ Run one test on the remote system.
174
+
175
+ 'test_name' is a string with test name.
176
+
177
+ 'json_file' is a destination file (string or Path) for results.
178
+
179
+ 'files_dir' is a destination dir (string or Path) for uploaded files.
180
+
181
+ 'env' is a dict of extra environment variables to pass to the test.
182
+
183
+ Returns an integer exit code of the test script.
184
+ """
185
+ test_data = self.fmf_tests.tests[test_name]
186
+
187
+ # start with fmf-plan-defined environment
188
+ env_vars = self.fmf_tests.plan_env.copy()
189
+ # append fmf-test-defined environment into it
190
+ for item in fmf.listlike(test_data, "environment"):
191
+ env_vars.update(item)
192
+ # append additional variables typically exported by tmt
193
+ env_vars["TMT_PLAN_ENVIRONMENT_FILE"] = self.plan_env_file
194
+ env_vars["TMT_TEST_NAME"] = test_name
195
+ env_vars["ATEX_TEST_NAME"] = test_name
196
+ # append variables given to this function call
197
+ if env:
198
+ env_vars.update(env)
199
+
200
+ # run a setup script, preparing wrapper + test scripts
201
+ setup_script = scripts.test_setup(
202
+ test=scripts.Test(test_name, test_data, self.fmf_tests.test_dirs[test_name]),
203
+ tests_dir=self.tests_dir,
204
+ wrapper_exec=f"{self.work_dir}/wrapper.sh",
205
+ test_exec=f"{self.work_dir}/test.sh",
206
+ )
207
+ self.conn.cmd(("bash",), input=setup_script, text=True, check=True)
208
+
209
+ with contextlib.ExitStack() as stack:
210
+ reporter = stack.enter_context(Reporter(json_file, files_dir))
211
+ testout_fd = stack.enter_context(reporter.open_tmpfile())
212
+ duration = Duration(test_data.get("duration", "5m"))
213
+
214
+ test_proc = None
215
+ control_fd = None
216
+ stack.callback(lambda: os.close(control_fd) if control_fd else None)
217
+
218
+ reconnects = 0
219
+
220
+ def abort(msg):
221
+ if test_proc:
222
+ test_proc.kill()
223
+ test_proc.wait()
224
+ raise TestAbortedError(msg) from None
225
+
226
+ try:
227
+ # TODO: probably enum
228
+ state = "starting_test"
229
+ while not duration.out_of_time():
230
+ with self.lock:
231
+ if self.cancelled:
232
+ abort("cancel requested")
233
+
234
+ if state == "starting_test":
235
+ control_fd, pipe_w = os.pipe()
236
+ os.set_blocking(control_fd, False)
237
+ control = testcontrol.TestControl(
238
+ control_fd=control_fd,
239
+ reporter=reporter,
240
+ duration=duration,
241
+ testout_fd=testout_fd,
242
+ )
243
+ # reconnect/reboot count (for compatibility)
244
+ env_vars["TMT_REBOOT_COUNT"] = str(reconnects)
245
+ env_vars["TMT_TEST_RESTART_COUNT"] = str(reconnects)
246
+ # run the test in the background, letting it log output directly to
247
+ # an opened file (we don't handle it, cmd client sends it to kernel)
248
+ env_args = (f"{k}={v}" for k, v in env_vars.items())
249
+ test_proc = self.conn.cmd(
250
+ ("env", *env_args, f"{self.work_dir}/wrapper.sh"),
251
+ stdout=pipe_w,
252
+ stderr=testout_fd,
253
+ func=util.subprocess_Popen,
254
+ )
255
+ os.close(pipe_w)
256
+ state = "reading_control"
257
+
258
+ elif state == "reading_control":
259
+ rlist, _, xlist = select.select((control_fd,), (), (control_fd,), 0.1)
260
+ if xlist:
261
+ abort(f"got exceptional condition on control_fd {control_fd}")
262
+ elif rlist:
263
+ control.process()
264
+ if control.eof:
265
+ os.close(control_fd)
266
+ control_fd = None
267
+ state = "waiting_for_exit"
268
+
269
+ elif state == "waiting_for_exit":
270
+ # control stream is EOF and it has nothing for us to read,
271
+ # we're now just waiting for proc to cleanly terminate
272
+ try:
273
+ code = test_proc.wait(0.1)
274
+ if code == 0:
275
+ # wrapper exited cleanly, testing is done
276
+ break
277
+ else:
278
+ # unexpected error happened (crash, disconnect, etc.)
279
+ self.conn.disconnect()
280
+ # if reconnect was requested, do so, otherwise abort
281
+ if control.reconnect:
282
+ state = "reconnecting"
283
+ if control.reconnect != "always":
284
+ control.reconnect = None
285
+ else:
286
+ abort(
287
+ f"test wrapper unexpectedly exited with {code} and "
288
+ "reconnect was not sent via test control",
289
+ )
290
+ test_proc = None
291
+ except subprocess.TimeoutExpired:
292
+ pass
293
+
294
+ elif state == "reconnecting":
295
+ try:
296
+ self.conn.connect(block=False)
297
+ reconnects += 1
298
+ state = "starting_test"
299
+ except BlockingIOError:
300
+ pass
301
+
302
+ else:
303
+ raise AssertionError("reached unexpected state")
304
+
305
+ else:
306
+ abort("test duration timeout reached")
307
+
308
+ # testing successful, do post-testing tasks
309
+
310
+ # test wrapper hasn't provided exitcode
311
+ if control.exit_code is None:
312
+ abort("exitcode not reported, wrapper bug?")
313
+
314
+ # partial results that were never reported
315
+ if control.partial_results:
316
+ for result in control.partial_results.values():
317
+ name = result.get("name")
318
+ if not name:
319
+ # partial result is also a result
320
+ control.nameless_result_seen = True
321
+ if testout := result.get("testout"):
322
+ try:
323
+ reporter.link_tmpfile_to(testout_fd, testout, name)
324
+ except FileExistsError:
325
+ raise testcontrol.BadReportJSONError(
326
+ f"file '{testout}' already exists",
327
+ ) from None
328
+ reporter.report(result)
329
+
330
+ # test hasn't reported a result for itself, add an automatic one
331
+ # as specified in RESULTS.md
332
+ # {"status": "pass", "testout": "output.txt"}
333
+ if not control.nameless_result_seen:
334
+ reporter.link_tmpfile_to(testout_fd, "output.txt")
335
+ reporter.report({
336
+ "status": "pass" if control.exit_code == 0 else "fail",
337
+ "testout": "output.txt",
338
+ })
339
+
340
+ return control.exit_code
341
+
342
+ except Exception:
343
+ # if the test hasn't reported a result for itself, but still
344
+ # managed to break something, provide at least the default log
345
+ # for manual investigation - otherwise test output disappears
346
+ if not control.nameless_result_seen:
347
+ try:
348
+ reporter.link_tmpfile_to(testout_fd, "output.txt")
349
+ reporter.report({
350
+ "status": "infra",
351
+ "testout": "output.txt",
352
+ })
353
+ # in case outout.txt exists as a directory
354
+ except FileExistsError:
355
+ pass
356
+ raise
357
+
358
+
359
+ #__all__ = [
360
+ # info.name for info in _pkgutil.iter_modules(__spec__.submodule_search_locations)
361
+ #]
362
+ #
363
+ #
364
+ #import importlib as _importlib
365
+ #import pkgutil as _pkgutil
366
+ #
367
+ #
368
+ #def __dir__():
369
+ # return __all__
370
+ #
371
+ #
372
+ ## lazily import submodules
373
+ #def __getattr__(attr):
374
+ # # importing a module known to exist
375
+ # if attr in __all__:
376
+ # return _importlib.import_module(f".{attr}", __name__)
377
+ # else:
378
+ # raise AttributeError(f"module '{__name__}' has no attribute '{attr}'")