atex 0.9__py3-none-any.whl → 0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. atex/aggregator/__init__.py +62 -0
  2. atex/aggregator/json.py +279 -0
  3. atex/cli/__init__.py +14 -1
  4. atex/cli/fmf.py +7 -7
  5. atex/cli/libvirt.py +3 -2
  6. atex/cli/testingfarm.py +74 -3
  7. atex/connection/podman.py +2 -4
  8. atex/connection/ssh.py +7 -14
  9. atex/executor/executor.py +21 -20
  10. atex/executor/scripts.py +5 -3
  11. atex/executor/testcontrol.py +1 -1
  12. atex/orchestrator/__init__.py +76 -3
  13. atex/orchestrator/{orchestrator.py → adhoc.py} +246 -108
  14. atex/orchestrator/contest.py +94 -0
  15. atex/{provision → provisioner}/__init__.py +48 -52
  16. atex/{provision → provisioner}/libvirt/libvirt.py +34 -15
  17. atex/{provision → provisioner}/libvirt/locking.py +3 -1
  18. atex/provisioner/podman/__init__.py +2 -0
  19. atex/provisioner/podman/podman.py +169 -0
  20. atex/{provision → provisioner}/testingfarm/api.py +56 -48
  21. atex/{provision → provisioner}/testingfarm/testingfarm.py +43 -45
  22. atex/util/log.py +62 -67
  23. atex/util/subprocess.py +46 -12
  24. atex/util/threads.py +7 -0
  25. atex-0.11.dist-info/METADATA +86 -0
  26. atex-0.11.dist-info/RECORD +45 -0
  27. {atex-0.9.dist-info → atex-0.11.dist-info}/WHEEL +1 -1
  28. atex/orchestrator/aggregator.py +0 -111
  29. atex/provision/podman/__init__.py +0 -1
  30. atex/provision/podman/podman.py +0 -274
  31. atex-0.9.dist-info/METADATA +0 -178
  32. atex-0.9.dist-info/RECORD +0 -43
  33. /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
  34. /atex/{provision → provisioner}/libvirt/__init__.py +0 -0
  35. /atex/{provision → provisioner}/libvirt/setup-libvirt.sh +0 -0
  36. /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
  37. {atex-0.9.dist-info → atex-0.11.dist-info}/entry_points.txt +0 -0
  38. {atex-0.9.dist-info → atex-0.11.dist-info}/licenses/COPYING.txt +0 -0
@@ -16,12 +16,9 @@ import urllib3
16
16
 
17
17
  DEFAULT_API_URL = "https://api.testing-farm.io/v0.1"
18
18
 
19
- # how many seconds to sleep for during API polling
20
- API_QUERY_DELAY = 30
21
-
22
19
  DEFAULT_RESERVE_TEST = {
23
20
  "url": "https://github.com/RHSecurityCompliance/atex-reserve",
24
- "ref": "v0.9",
21
+ "ref": "0.11",
25
22
  "path": ".",
26
23
  "name": "/plans/reserve",
27
24
  }
@@ -37,10 +34,10 @@ _http = urllib3.PoolManager(
37
34
  maxsize=10,
38
35
  block=True,
39
36
  retries=urllib3.Retry(
40
- total=10,
37
+ total=24,
41
38
  # account for API restarts / short outages
42
- backoff_factor=60,
43
- backoff_max=600,
39
+ backoff_factor=10,
40
+ backoff_max=3600,
44
41
  # retry on API server errors too, not just connection issues
45
42
  status=10,
46
43
  status_forcelist={403,404,408,429,500,502,503,504},
@@ -88,12 +85,13 @@ class TestingFarmAPI:
88
85
  self.api_url = url
89
86
  self.api_token = token or os.environ.get("TESTING_FARM_API_TOKEN")
90
87
 
91
- def _query(self, method, path, *args, headers=None, **kwargs):
88
+ def _query(self, method, path, *args, headers=None, auth=True, **kwargs):
92
89
  url = f"{self.api_url}{path}"
93
- if headers is not None:
94
- headers["Authorization"] = f"Bearer {self.api_token}"
95
- else:
96
- headers = {"Authorization": f"Bearer {self.api_token}"}
90
+ if self.api_token and auth:
91
+ if headers is not None:
92
+ headers["Authorization"] = f"Bearer {self.api_token}"
93
+ else:
94
+ headers = {"Authorization": f"Bearer {self.api_token}"}
97
95
 
98
96
  reply = _http.request(method, url, *args, headers=headers, preload_content=False, **kwargs)
99
97
 
@@ -180,7 +178,7 @@ class TestingFarmAPI:
180
178
  fields["token_id"] = self.whoami()["token"]["id"]
181
179
  fields["user_id"] = self.whoami()["user"]["id"]
182
180
 
183
- return self._query("GET", "/requests", fields=fields)
181
+ return self._query("GET", "/requests", fields=fields, auth=mine)
184
182
 
185
183
  def get_request(self, request_id):
186
184
  """
@@ -210,19 +208,22 @@ class Request:
210
208
  request.
211
209
  """
212
210
 
213
- # TODO: maintain internal time.monotonic() clock and call .update() from
214
- # functions like .alive() if last update is > API_QUERY_DELAY
211
+ # actually query the TestingFarm API at most every X seconds,
212
+ # re-using cached state between updates
213
+ api_query_limit = 30
215
214
 
216
215
  def __init__(self, id=None, api=None, initial_data=None):
217
216
  """
218
217
  'id' is a Testing Farm request UUID
218
+
219
219
  'api' is a TestingFarmAPI instance - if unspecified, a sensible default
220
- 'initial_data' (dict) can be used to pre-fill an initial Request state
221
- will be used.
220
+
221
+ 'initial_data' (dict) can be used to pre-fill an initial Request state.
222
222
  """
223
223
  self.id = id
224
224
  self.api = api or TestingFarmAPI()
225
225
  self.data = initial_data or {}
226
+ self.next_query = 0
226
227
 
227
228
  def submit(self, spec):
228
229
  """
@@ -234,16 +235,12 @@ class Request:
234
235
  self.data = self.api.submit_request(spec)
235
236
  self.id = self.data["id"]
236
237
 
237
- def update(self):
238
- """
239
- Query Testing Farm API to get a more up-to-date version of the request
240
- metadata. Do not call too frequently.
241
- This function is also used internally by others, you do not need to
242
- always call it manually.
243
- """
244
- self.data = self.api.get_request(self.id)
245
- # TODO: refresh internal time.monotonic() timer
246
- return self.data
238
+ def _refresh(self):
239
+ if not self.id:
240
+ return
241
+ if time.monotonic() > self.next_query:
242
+ self.data = self.api.get_request(self.id)
243
+ self.next_query = time.monotonic() + self.api_query_limit
247
244
 
248
245
  def cancel(self):
249
246
  if not self.id:
@@ -254,35 +251,44 @@ class Request:
254
251
  return data
255
252
 
256
253
  def alive(self):
257
- if "state" not in self.data:
258
- self.update()
254
+ if not self.id:
255
+ return False
256
+ self._refresh()
259
257
  return self.data["state"] not in END_STATES
260
258
 
261
259
  def assert_alive(self):
262
260
  if not self.alive():
263
261
  state = self.data["state"]
264
- raise GoneAwayError(f"request {self.data['id']} not alive anymore, entered: {state}")
262
+ raise GoneAwayError(f"request {self.id} not alive anymore, entered: {state}")
265
263
 
266
264
  def wait_for_state(self, state):
267
- if "state" not in self.data:
268
- self.update()
269
- self.assert_alive()
270
- while self.data["state"] != state:
271
- time.sleep(API_QUERY_DELAY)
272
- self.update()
273
- self.assert_alive()
265
+ """
266
+ 'state' is a str or a tuple of states to wait for.
267
+ """
268
+ watched = (state,) if isinstance(state, str) else state
269
+ while True:
270
+ self._refresh()
271
+ if self.data["state"] in watched:
272
+ break
273
+ # if the request ended in one of END_STATES and the above condition
274
+ # did not catch it, the wait will never end
275
+ if self.data["state"] in END_STATES:
276
+ raise GoneAwayError(f"request {self.id} ended with {self.data['state']}")
274
277
 
275
278
  def __repr__(self):
276
279
  return f"Request(id={self.id})"
277
280
 
278
281
  def __str__(self):
282
+ self._refresh()
279
283
  # python has no better dict-pretty-printing logic
280
284
  return json.dumps(self.data, sort_keys=True, indent=4)
281
285
 
282
286
  def __contains__(self, item):
287
+ self._refresh()
283
288
  return item in self.data
284
289
 
285
290
  def __getitem__(self, key):
291
+ self._refresh()
286
292
  return self.data[key]
287
293
 
288
294
 
@@ -292,6 +298,10 @@ class PipelineLogStreamer:
292
298
  to "stream" its contents over time (over many requests), never having to
293
299
  re-read old pipeline.log content.
294
300
  """
301
+
302
+ # how frequently to check for pipeline.log updates (seconds)
303
+ pipeline_query_limit = 30
304
+
295
305
  def __init__(self, request):
296
306
  self.request = request
297
307
 
@@ -324,8 +334,7 @@ class PipelineLogStreamer:
324
334
  return log
325
335
 
326
336
  finally:
327
- time.sleep(API_QUERY_DELAY)
328
- self.request.update()
337
+ time.sleep(self.pipeline_query_limit)
329
338
 
330
339
  def __iter__(self):
331
340
  url = self._wait_for_entry()
@@ -356,8 +365,7 @@ class PipelineLogStreamer:
356
365
  buffer = buffer[index+1:]
357
366
 
358
367
  finally:
359
- time.sleep(API_QUERY_DELAY)
360
- self.request.update()
368
+ time.sleep(self.pipeline_query_limit)
361
369
 
362
370
 
363
371
  class Reserve:
@@ -430,7 +438,6 @@ class Reserve:
430
438
  'api' is a TestingFarmAPI instance - if unspecified, a sensible default
431
439
  will be used.
432
440
  """
433
- util.info(f"will reserve compose:{compose} on arch:{arch} for {timeout}min")
434
441
  spec = {
435
442
  "test": {
436
443
  "fmf": reserve_test or DEFAULT_RESERVE_TEST,
@@ -532,14 +539,17 @@ class Reserve:
532
539
  with self.lock:
533
540
  self.request = Request(api=self.api)
534
541
  self.request.submit(spec)
535
- util.debug(f"submitted request:\n{textwrap.indent(str(self.request), ' ')}")
542
+ util.debug(f"submitted request {self.request.id}")
543
+ util.extradebug(
544
+ f"request {self.request.id}:\n{textwrap.indent(str(self.request), ' ')}",
545
+ )
536
546
 
537
547
  # wait for user/host to ssh to
538
548
  ssh_user = ssh_host = None
539
549
  for line in PipelineLogStreamer(self.request):
540
550
  # the '\033[0m' is to reset colors sometimes left in a bad
541
551
  # state by pipeline.log
542
- util.debug(f"pipeline: {line}\033[0m")
552
+ util.extradebug(f"{line}\033[0m")
543
553
  # find hidden login details
544
554
  m = re.search(
545
555
  # host address can be an IP address or a hostname
@@ -559,14 +569,12 @@ class Reserve:
559
569
  # (it will be failing to login for a while, until the reserve test
560
570
  # installs our ssh pubkey into authorized_keys)
561
571
  ssh_attempt_cmd = (
562
- "ssh", "-q", "-i", ssh_key, f"-oConnectionAttempts={API_QUERY_DELAY}",
572
+ "ssh", "-q", "-i", ssh_key.absolute(), "-oConnectionAttempts=60",
563
573
  "-oStrictHostKeyChecking=no", "-oUserKnownHostsFile=/dev/null",
564
574
  f"{ssh_user}@{ssh_host}", "exit 123",
565
575
  )
566
576
  while True:
567
- # wait for API_QUERY_DELAY between ssh retries, seems like GEFN sleep time
568
- time.sleep(API_QUERY_DELAY)
569
- self.request.update()
577
+ time.sleep(1)
570
578
  self.request.assert_alive()
571
579
 
572
580
  proc = util.subprocess_run(
@@ -1,6 +1,7 @@
1
1
  import time
2
2
  import tempfile
3
3
  import threading
4
+ import concurrent.futures
4
5
 
5
6
  from ... import connection, util
6
7
  from .. import Provisioner, Remote
@@ -8,22 +9,22 @@ from .. import Provisioner, Remote
8
9
  from . import api
9
10
 
10
11
 
11
- class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConn):
12
+ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConnection):
12
13
  """
13
14
  Built on the official Remote API, pulling in the Connection API
14
- as implemented by ManagedSSHConn.
15
+ as implemented by ManagedSSHConnection.
15
16
  """
16
17
 
17
18
  def __init__(self, request_id, ssh_options, *, release_hook):
18
19
  """
19
20
  'request_id' is a string with Testing Farm request UUID (for printouts).
20
21
 
21
- 'ssh_options' are a dict, passed to ManagedSSHConn __init__().
22
+ 'ssh_options' are a dict, passed to ManagedSSHConnection __init__().
22
23
 
23
24
  'release_hook' is a callable called on .release() in addition
24
25
  to disconnecting the connection.
25
26
  """
26
- # NOTE: self.lock inherited from ManagedSSHConn
27
+ # NOTE: self.lock inherited from ManagedSSHConnection
27
28
  super().__init__(options=ssh_options)
28
29
  self.request_id = request_id
29
30
  self.release_hook = release_hook
@@ -35,8 +36,8 @@ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConn):
35
36
  return
36
37
  else:
37
38
  self.release_called = True
38
- self.release_hook(self)
39
39
  self.disconnect()
40
+ self.release_hook(self)
40
41
 
41
42
  # not /technically/ a valid repr(), but meh
42
43
  def __repr__(self):
@@ -49,25 +50,25 @@ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConn):
49
50
 
50
51
 
51
52
  class TestingFarmProvisioner(Provisioner):
52
- # TODO: have max_systems as (min,default,max) tuple; have an algorithm that
53
- # starts at default and scales up/down as needed
54
-
55
- def __init__(self, compose, arch="x86_64", *, max_systems=1, max_retries=10, **reserve_kwargs):
53
+ # maximum number of TF requests the user can .provision(),
54
+ # as a last safety measure against Orchestrator(remotes=math.inf)
55
+ absolute_max_remotes = 100
56
+ # number of parallel threads running HTTP DELETE calls to cancel
57
+ # TF requests on .stop() or Context Manager exit
58
+ stop_release_workers = 10
59
+
60
+ def __init__(self, compose, arch="x86_64", *, max_retries=10, **reserve_kwargs):
56
61
  """
57
62
  'compose' is a Testing Farm compose to prepare.
58
63
 
59
64
  'arch' is an architecture associated with the compose.
60
65
 
61
- 'max_systems' is an int of how many systems to reserve (and keep
62
- reserved) in an internal pool.
63
-
64
66
  'max_retries' is a maximum number of provisioning (Testing Farm) errors
65
67
  that will be reprovisioned before giving up.
66
68
  """
67
69
  self.lock = threading.RLock()
68
70
  self.compose = compose
69
71
  self.arch = arch
70
- self.max_systems = max_systems
71
72
  self.reserve_kwargs = reserve_kwargs
72
73
  self.retries = max_retries
73
74
 
@@ -100,7 +101,7 @@ class TestingFarmProvisioner(Provisioner):
100
101
  "Hostname": machine.host,
101
102
  "User": machine.user,
102
103
  "Port": machine.port,
103
- "IdentityFile": machine.ssh_key,
104
+ "IdentityFile": machine.ssh_key.absolute(),
104
105
  "ConnectionAttempts": "1000",
105
106
  "Compression": "yes",
106
107
  }
@@ -134,6 +135,7 @@ class TestingFarmProvisioner(Provisioner):
134
135
  # instantiate a class Reserve from the Testing Farm api module
135
136
  # (which typically provides context manager, but we use its .reserve()
136
137
  # and .release() functions directly)
138
+ util.info(f"{repr(self)}: reserving new remote")
137
139
  tf_reserve = api.Reserve(
138
140
  compose=self.compose,
139
141
  arch=self.arch,
@@ -157,45 +159,40 @@ class TestingFarmProvisioner(Provisioner):
157
159
  with self.lock:
158
160
  self._tmpdir = tempfile.TemporaryDirectory()
159
161
  self.ssh_key, self.ssh_pubkey = util.ssh_keygen(self._tmpdir.name)
160
- # start up all initial reservations
161
- for i in range(self.max_systems):
162
- delay = (api.API_QUERY_DELAY / self.max_systems) * i
163
- #self.queue.start_thread(target=self._schedule_one_reservation, args=(delay,))
164
- self._schedule_one_reservation(delay)
165
162
 
166
163
  def stop(self):
167
- with self.lock:
168
- # abort reservations in progress
169
- while self.reserving:
170
- # testingfarm api.Reserve instances
171
- self.reserving.pop().release()
172
- # cancel/release all Remotes ever created by us
173
- while self.remotes:
174
- # TestingFarmRemote instances
175
- self.remotes.pop().release()
176
- # explicitly remove the tmpdir rather than relying on destructor
177
- self._tmpdir.cleanup()
178
- self._tmpdir = None
164
+ release_funcs = []
179
165
 
180
- def stop_defer(self):
181
- callables = []
182
166
  with self.lock:
183
- callables += (f.release for f in self.reserving)
167
+ release_funcs += (f.release for f in self.reserving)
184
168
  self.reserving = []
185
- callables += (r.release for r in self.remotes)
186
- self.remotes = [] # just in case
187
- callables.append(self._tmpdir.cleanup)
169
+ release_funcs += (r.release for r in self.remotes)
170
+ self.remotes = [] # just in case of a later .start()
171
+
172
+ # parallelize at most stop_release_workers TF API release (DELETE) calls
173
+ if release_funcs:
174
+ workers = min(len(release_funcs), self.stop_release_workers)
175
+ with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as ex:
176
+ for func in release_funcs:
177
+ ex.submit(func)
178
+
179
+ with self.lock:
180
+ # explicitly remove the tmpdir rather than relying on destructor
181
+ self._tmpdir.cleanup()
188
182
  self._tmpdir = None
189
- return callables
190
183
 
191
- def get_remote(self, block=True):
192
- # fill .release()d remotes back up with reservations
184
+ def provision(self, count=1):
193
185
  with self.lock:
194
- deficit = self.max_systems - len(self.remotes) - len(self.reserving)
195
- for i in range(deficit):
196
- delay = (api.API_QUERY_DELAY / deficit) * i
186
+ reservations = len(self.remotes) + len(self.reserving)
187
+ # clamp count to absolute_max_remotes
188
+ if count + reservations > self.absolute_max_remotes:
189
+ count = self.absolute_max_remotes - reservations
190
+ # spread out the request submissions
191
+ for i in range(count):
192
+ delay = (api.Request.api_query_limit / count) * i
197
193
  self._schedule_one_reservation(delay)
198
194
 
195
+ def get_remote(self, block=True):
199
196
  while True:
200
197
  # otherwise wait on a queue of Remotes being provisioned
201
198
  try:
@@ -204,10 +201,11 @@ class TestingFarmProvisioner(Provisioner):
204
201
  # always non-blocking
205
202
  return None
206
203
  except (api.TestingFarmError, connection.ssh.SSHError) as e:
204
+ exc_str = f"{type(e).__name__}({e})"
207
205
  with self.lock:
208
206
  if self.retries > 0:
209
207
  util.warning(
210
- f"caught while reserving a TF system: {repr(e)}, "
208
+ f"caught while reserving a TF system: {exc_str}, "
211
209
  f"retrying ({self.retries} left)",
212
210
  )
213
211
  self.retries -= 1
@@ -218,7 +216,7 @@ class TestingFarmProvisioner(Provisioner):
218
216
  return None
219
217
  else:
220
218
  util.warning(
221
- f"caught while reserving a TF system: {repr(e)}, "
219
+ f"caught while reserving a TF system: {exc_str}, "
222
220
  "exhausted all retries, giving up",
223
221
  )
224
222
  raise
atex/util/log.py CHANGED
@@ -1,76 +1,71 @@
1
- import inspect
1
+ import os
2
2
  import logging
3
- from pathlib import Path
3
+ import inspect
4
4
 
5
5
  _logger = logging.getLogger("atex")
6
6
 
7
+ # which functions to skip when determining the logger function caller;
8
+ # typically, these are wrappers and we want to see their caller in the trace
9
+ # instead of them
10
+ #
11
+ # ( file basename , qualname )
12
+ # where qualname is '<module>' or 'funcname' or 'Classname.funcname'
13
+ skip_levels = {
14
+ ("subprocess.py", "subprocess_run"),
15
+ ("subprocess.py", "subprocess_output"),
16
+ ("subprocess.py", "subprocess_Popen"),
17
+ ("subprocess.py", "subprocess_stream"),
18
+ ("subprocess.py", "subprocess_log"),
7
19
 
8
- def in_debug_mode():
9
- """
10
- Return True if the root logger is using the DEBUG (or more verbose) level.
11
- """
12
- # TODO: use _logger.isEnabledFor() ?
13
- root_level = logging.getLogger().level
14
- return root_level > 0 and root_level <= logging.DEBUG
20
+ ("podman.py", "PodmanConnection.cmd"),
21
+ ("podman.py", "PodmanConnection.rsync"),
15
22
 
23
+ ("ssh.py", "StatelessSSHConnection.cmd"),
24
+ ("ssh.py", "StatelessSSHConnection.rsync"),
25
+ ("ssh.py", "ManagedSSHConnection.forward"),
26
+ ("ssh.py", "ManagedSSHConnection.cmd"),
27
+ ("ssh.py", "ManagedSSHConnection.rsync"),
28
+ }
16
29
 
17
- def _format_msg(msg, *, skip_frames=0):
18
- stack = inspect.stack()
19
- if len(stack)-1 <= skip_frames:
20
- raise SyntaxError("skip_frames exceeds call stack (frame count)")
21
- stack = stack[skip_frames+1:]
22
30
 
23
- # bottom of the stack, or runpy executed module
24
- for frame_info in stack:
25
- if frame_info.function == "<module>":
31
+ def _log_msg(logger_func, *args, stacklevel=1, **kwargs):
32
+ # inspect.stack() is MUCH slower
33
+ caller = inspect.currentframe().f_back.f_back
34
+ extra_levels = 2 # skip this func and the debug/info/warning parent
35
+ while caller.f_back:
36
+ code = caller.f_code
37
+ # pathlib is much slower
38
+ basename = os.path.basename(code.co_filename) # noqa: PTH119
39
+ qualname = code.co_qualname
40
+ if (basename, qualname) in skip_levels:
41
+ extra_levels += 1
42
+ caller = caller.f_back
43
+ else:
26
44
  break
27
- module = frame_info
28
-
29
- # last (topmost) function that isn't us
30
- parent = stack[0]
31
- function = parent.function
32
-
33
- # if the function has 'self' and it looks like a class instance,
34
- # prepend it to the function name
35
- argvals = inspect.getargvalues(parent.frame)
36
- if argvals.args:
37
- if argvals.args[0] == "self":
38
- self = argvals.locals["self"]
39
- if hasattr(self, "__class__") and inspect.isclass(self.__class__):
40
- function = f"{self.__class__.__name__}.{function}"
41
- elif argvals.args[0] == "cls":
42
- cls = argvals.locals["cls"]
43
- if inspect.isclass(cls):
44
- function = f"{cls.__name__}.{function}"
45
-
46
- # don't report module name of a function if it's the same as running module
47
- if parent.filename != module.filename:
48
- parent_modname = parent.frame.f_globals["__name__"]
49
- # avoid everything having the package name prefixed
50
- parent_modname = parent_modname.partition(".")[2] or parent_modname
51
- return f"{parent_modname}.{function}:{parent.lineno}: {msg}"
52
- elif parent.function != "<module>":
53
- return f"{function}:{parent.lineno}: {msg}"
54
- else:
55
- return f"{Path(parent.filename).name}:{parent.lineno}: {msg}"
56
-
57
-
58
- def debug(msg, *, skip_frames=0):
59
- if in_debug_mode():
60
- _logger.debug(_format_msg(msg, skip_frames=skip_frames+1))
61
- else:
62
- _logger.debug(msg)
63
-
64
-
65
- def info(msg, *, skip_frames=0):
66
- if in_debug_mode():
67
- _logger.info(_format_msg(msg, skip_frames=skip_frames+1))
68
- else:
69
- _logger.info(msg)
70
-
71
-
72
- def warning(msg, *, skip_frames=0):
73
- if in_debug_mode():
74
- _logger.warning(_format_msg(msg, skip_frames=skip_frames+1))
75
- else:
76
- _logger.warning(msg)
45
+ return logger_func(*args, stacklevel=stacklevel+extra_levels, **kwargs)
46
+
47
+
48
+ def warning(*args, **kwargs):
49
+ return _log_msg(_logger.warning, *args, **kwargs)
50
+
51
+
52
+ def info(*args, **kwargs):
53
+ return _log_msg(_logger.info, *args, **kwargs)
54
+
55
+
56
+ def debug(*args, **kwargs):
57
+ return _log_msg(_logger.debug, *args, **kwargs)
58
+
59
+
60
+ # add a log level more verbose than logging.DEBUG, for verbose command
61
+ # outputs, big JSON / XML printouts, and other outputs unsuitable for
62
+ # large parallel runs; to be used in targeted debugging
63
+ #
64
+ # logging.DEBUG is 10, and programs tend to add TRACE as 5, so be somewhere
65
+ # in between
66
+ EXTRADEBUG = 8
67
+ logging.addLevelName(EXTRADEBUG, "EXTRADEBUG")
68
+
69
+
70
+ def extradebug(*args, **kwargs):
71
+ return _log_msg(_logger.log, EXTRADEBUG, *args, **kwargs)
atex/util/subprocess.py CHANGED
@@ -1,52 +1,86 @@
1
1
  import subprocess
2
2
 
3
- from .log import debug
3
+ from .log import extradebug
4
4
 
5
5
 
6
- def subprocess_run(cmd, *, skip_frames=0, **kwargs):
6
+ def subprocess_run(cmd, **kwargs):
7
7
  """
8
8
  A simple wrapper for the real subprocess.run() that logs the command used.
9
9
  """
10
10
  # when logging, skip current stack frame - report the place we were called
11
11
  # from, not util.subprocess_run itself
12
- debug(f"running: {cmd}", skip_frames=skip_frames+1)
12
+ extradebug(f"running: '{cmd}' with {kwargs=}")
13
13
  return subprocess.run(cmd, **kwargs)
14
14
 
15
15
 
16
- def subprocess_output(cmd, *, skip_frames=0, check=True, text=True, **kwargs):
16
+ def subprocess_output(cmd, *, check=True, text=True, **kwargs):
17
17
  """
18
18
  A wrapper simulating subprocess.check_output() via a modern .run() API.
19
19
  """
20
- debug(f"running: {cmd}", skip_frames=skip_frames+1)
20
+ extradebug(f"running: '{cmd}' with {check=}, {text=} and {kwargs=}")
21
21
  proc = subprocess.run(cmd, check=check, text=text, stdout=subprocess.PIPE, **kwargs)
22
22
  return proc.stdout.rstrip("\n") if text else proc.stdout
23
23
 
24
24
 
25
- def subprocess_Popen(cmd, *, skip_frames=0, **kwargs): # noqa: N802
25
+ def subprocess_Popen(cmd, **kwargs): # noqa: N802
26
26
  """
27
27
  A simple wrapper for the real subprocess.Popen() that logs the command used.
28
28
  """
29
- debug(f"running: {cmd}", skip_frames=skip_frames+1)
29
+ extradebug(f"running: '{cmd}' with {kwargs=}")
30
30
  return subprocess.Popen(cmd, **kwargs)
31
31
 
32
32
 
33
- def subprocess_stream(cmd, *, check=False, skip_frames=0, **kwargs):
33
+ def subprocess_stream(cmd, *, stream="stdout", check=False, input=None, **kwargs):
34
34
  """
35
35
  Run 'cmd' via subprocess.Popen() and return an iterator over any lines
36
36
  the command outputs on stdout, in text mode.
37
37
 
38
+ The 'stream' is a subprocess.Popen attribute (either 'stdout' or 'stderr')
39
+ to read from.
40
+ To capture both stdout and stderr as yielded lines, use 'stream="stdout"'
41
+ and pass an additional 'stderr=subprocess.STDOUT'.
42
+
38
43
  With 'check' set to True, raise a CalledProcessError if the 'cmd' failed.
39
44
 
40
- To capture both stdout and stderr as yielded lines, use subprocess.STDOUT.
45
+ Similarly, 'input' simulates the 'input' arg of subprocess.run().
46
+ Note that the input is written to stdin of the process *before* any outputs
47
+ are streamed, so it should be sufficiently small and/or not cause a deadlock
48
+ with the process waiting for outputs to be read before consuming more input.
49
+ Use 'stdin=subprocess.PIPE' and write to it manually if you need more.
41
50
  """
42
- debug(f"running: {cmd}", skip_frames=skip_frames+1)
43
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, **kwargs)
51
+ all_kwargs = {
52
+ "text": True,
53
+ stream: subprocess.PIPE,
54
+ }
55
+ if input is not None:
56
+ all_kwargs["stdin"] = subprocess.PIPE
57
+ all_kwargs |= kwargs
58
+
59
+ extradebug(f"running: '{cmd}' with {all_kwargs=}")
60
+ proc = subprocess.Popen(cmd, **all_kwargs)
44
61
 
45
62
  def generate_lines():
46
- for line in proc.stdout:
63
+ if input is not None:
64
+ proc.stdin.write(input)
65
+ proc.stdin.close()
66
+ line_stream = getattr(proc, stream)
67
+ for line in line_stream:
47
68
  yield line.rstrip("\n")
48
69
  code = proc.wait()
49
70
  if code > 0 and check:
50
71
  raise subprocess.CalledProcessError(cmd=cmd, returncode=code)
51
72
 
52
73
  return (proc, generate_lines())
74
+
75
+
76
+ def subprocess_log(cmd, **kwargs):
77
+ """
78
+ A wrapper to stream every (text) line output from the process to the
79
+ logging module.
80
+
81
+ Uses subprocess_stream() to gather the lines.
82
+ """
83
+ extradebug(f"running: '{cmd}' with {kwargs=}")
84
+ _, lines = subprocess_stream(cmd, **kwargs)
85
+ for line in lines:
86
+ extradebug(line)
atex/util/threads.py CHANGED
@@ -97,3 +97,10 @@ class ThreadQueue:
97
97
  except KeyError:
98
98
  break
99
99
  thread.join()
100
+
101
+ def qsize(self):
102
+ """
103
+ Return the amount of elements .get() can retrieve before it raises
104
+ queue.Empty.
105
+ """
106
+ return self.queue.qsize()