atex 0.5__py3-none-any.whl → 0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,28 +9,28 @@ import collections
9
9
 
10
10
  from pathlib import Path
11
11
 
12
- from . import util
12
+ from ... import util
13
13
 
14
14
  import json
15
15
  import urllib3
16
16
 
17
- DEFAULT_API_URL = 'https://api.testing-farm.io/v0.1'
17
+ DEFAULT_API_URL = "https://api.testing-farm.io/v0.1"
18
18
 
19
19
  # how many seconds to sleep for during API polling
20
20
  API_QUERY_DELAY = 10
21
21
 
22
22
  RESERVE_TASK = {
23
- 'fmf': {
24
- 'url': 'https://github.com/RHSecurityCompliance/atex',
25
- 'ref': 'main',
26
- 'path': 'tmt_tests',
27
- 'name': "/plans/reserve",
23
+ "fmf": {
24
+ "url": "https://github.com/RHSecurityCompliance/atex",
25
+ "ref": "main",
26
+ "path": "tmt_tests",
27
+ "name": "/plans/reserve",
28
28
  },
29
29
  }
30
30
 
31
31
  # final states of a request,
32
32
  # https://gitlab.com/testing-farm/nucleus/-/blob/main/api/src/tft/nucleus/api/core/schemes/test_request.py
33
- END_STATES = ('error', 'complete', 'canceled')
33
+ END_STATES = ("error", "complete", "canceled")
34
34
 
35
35
  # always have at most 3 outstanding HTTP requests to every given API host,
36
36
  # shared by all instances of all classes here, to avoid flooding the host
@@ -48,6 +48,8 @@ class APIError(TestingFarmError):
48
48
  pass
49
49
 
50
50
 
51
+ # TODO docstrings for these:
52
+
51
53
  class BadHTTPError(TestingFarmError):
52
54
  pass
53
55
 
@@ -74,21 +76,21 @@ class TestingFarmAPI:
74
76
  Note that token-less operation is supported, with limited functionality.
75
77
  """
76
78
  self.api_url = url
77
- self.api_token = token or os.environ.get('TESTING_FARM_API_TOKEN')
79
+ self.api_token = token or os.environ.get("TESTING_FARM_API_TOKEN")
78
80
 
79
81
  def _query(self, method, path, *args, headers=None, **kwargs):
80
- url = f'{self.api_url}{path}'
82
+ url = f"{self.api_url}{path}"
81
83
  if headers is not None:
82
- headers['Authorization'] = f'Bearer {self.api_token}'
84
+ headers["Authorization"] = f"Bearer {self.api_token}"
83
85
  else:
84
- headers = {'Authorization': f'Bearer {self.api_token}'}
86
+ headers = {"Authorization": f"Bearer {self.api_token}"}
85
87
 
86
88
  reply = _http.request(method, url, *args, headers=headers, preload_content=False, **kwargs)
87
89
 
88
90
  if reply.status != 200 and not reply.data:
89
91
  raise APIError(f"got HTTP {reply.status} on {method} {url}", reply)
90
92
 
91
- if reply.headers.get('Content-Type') != 'application/json':
93
+ if reply.headers.get("Content-Type") != "application/json":
92
94
  raise BadHTTPError(
93
95
  f"HTTP {reply.status} on {method} {url} is not application/json",
94
96
  reply,
@@ -97,7 +99,10 @@ class TestingFarmAPI:
97
99
  try:
98
100
  decoded = reply.json()
99
101
  except json.decoder.JSONDecodeError:
100
- raise BadHTTPError(f"failed to decode JSON for {method} {url}: {reply.data}", reply)
102
+ raise BadHTTPError(
103
+ f"failed to decode JSON for {method} {url}: {reply.data}",
104
+ reply,
105
+ ) from None
101
106
 
102
107
  if reply.status != 200:
103
108
  raise APIError(f"got HTTP {reply.status} on {method} {url}: {decoded}", reply)
@@ -107,14 +112,14 @@ class TestingFarmAPI:
107
112
  def whoami(self):
108
113
  if not self.api_token:
109
114
  raise ValueError("whoami() requires an auth token")
110
- if hasattr(self, '_whoami_cached'):
115
+ if hasattr(self, "_whoami_cached"):
111
116
  return self._whoami_cached
112
117
  else:
113
- self._whoami_cached = self._query('GET', '/whoami')
118
+ self._whoami_cached = self._query("GET", "/whoami")
114
119
  return self._whoami_cached
115
120
 
116
121
  def about(self):
117
- return self._query('GET', '/about')
122
+ return self._query("GET", "/about")
118
123
 
119
124
  def composes(self, ranch=None):
120
125
  """
@@ -123,8 +128,8 @@ class TestingFarmAPI:
123
128
  if not ranch:
124
129
  if not self.api_token:
125
130
  raise ValueError("composes() requires an auth token to identify ranch")
126
- ranch = self.whoami()['token']['ranch']
127
- return self._query('GET', f'/composes/{ranch}')
131
+ ranch = self.whoami()["token"]["ranch"]
132
+ return self._query("GET", f"/composes/{ranch}")
128
133
 
129
134
  def search_requests(
130
135
  self, state, mine=True, ranch=None, created_before=None, created_after=None,
@@ -141,27 +146,27 @@ class TestingFarmAPI:
141
146
  elsewhere, ie. 'YYYY-MM-DD' or 'YYYY-MM-DDTHH:MM:SS' (or with '.MS'),
142
147
  without timezone.
143
148
  """
144
- fields = {'state': state}
149
+ fields = {"state": state}
145
150
  if ranch:
146
- fields['ranch'] = ranch
151
+ fields["ranch"] = ranch
147
152
  if created_before:
148
- fields['created_before'] = created_before
153
+ fields["created_before"] = created_before
149
154
  if created_after:
150
- fields['created_after'] = created_after
155
+ fields["created_after"] = created_after
151
156
 
152
157
  if mine:
153
158
  if not self.api_token:
154
159
  raise ValueError("search_requests(mine=True) requires an auth token")
155
- fields['token_id'] = self.whoami()['token']['id']
156
- fields['user_id'] = self.whoami()['user']['id']
160
+ fields["token_id"] = self.whoami()["token"]["id"]
161
+ fields["user_id"] = self.whoami()["user"]["id"]
157
162
 
158
- return self._query('GET', '/requests', fields=fields)
163
+ return self._query("GET", "/requests", fields=fields)
159
164
 
160
165
  def get_request(self, request_id):
161
166
  """
162
167
  'request_id' is the UUID (string) of the request.
163
168
  """
164
- return self._query('GET', f'/requests/{request_id}')
169
+ return self._query("GET", f"/requests/{request_id}")
165
170
 
166
171
  def submit_request(self, spec):
167
172
  """
@@ -170,13 +175,13 @@ class TestingFarmAPI:
170
175
  """
171
176
  if not self.api_token:
172
177
  raise ValueError("submit_request() requires an auth token")
173
- return self._query('POST', '/requests', json=spec)
178
+ return self._query("POST", "/requests", json=spec)
174
179
 
175
180
  def cancel_request(self, request_id):
176
181
  """
177
182
  'request_id' is the UUID (string) of the request.
178
183
  """
179
- return self._query('DELETE', f'/requests/{request_id}')
184
+ return self._query("DELETE", f"/requests/{request_id}")
180
185
 
181
186
 
182
187
  class Request:
@@ -185,6 +190,9 @@ class Request:
185
190
  request.
186
191
  """
187
192
 
193
+ # TODO: maintain internal time.monotonic() clock and call .update() from
194
+ # functions like .alive() if last update is > API_QUERY_DELAY
195
+
188
196
  def __init__(self, id=None, api=None, initial_data=None):
189
197
  """
190
198
  'id' is a Testing Farm request UUID
@@ -204,14 +212,17 @@ class Request:
204
212
  if self.id:
205
213
  raise ValueError("this Request instance already has 'id', refusing submit")
206
214
  self.data = self.api.submit_request(spec)
207
- self.id = self.data['id']
215
+ self.id = self.data["id"]
208
216
 
209
217
  def update(self):
210
218
  """
211
219
  Query Testing Farm API to get a more up-to-date version of the request
212
- metadata accessible via __str__().
220
+ metadata. Do not call too frequently.
221
+ This function is also used internally by others, you do not need to
222
+ always call it manually.
213
223
  """
214
224
  self.data = self.api.get_request(self.id)
225
+ # TODO: refresh internal time.monotonic() timer
215
226
  return self.data
216
227
 
217
228
  def cancel(self):
@@ -223,26 +234,26 @@ class Request:
223
234
  return data
224
235
 
225
236
  def alive(self):
226
- if 'state' not in self.data:
237
+ if "state" not in self.data:
227
238
  self.update()
228
- return self.data['state'] not in END_STATES
239
+ return self.data["state"] not in END_STATES
229
240
 
230
241
  def assert_alive(self):
231
242
  if not self.alive():
232
- state = self.data['state']
243
+ state = self.data["state"]
233
244
  raise GoneAwayError(f"request {self.data['id']} not alive anymore, entered: {state}")
234
245
 
235
246
  def wait_for_state(self, state):
236
- if 'state' not in self.data:
247
+ if "state" not in self.data:
237
248
  self.update()
238
249
  self.assert_alive()
239
- while self.data['state'] != state:
250
+ while self.data["state"] != state:
240
251
  time.sleep(API_QUERY_DELAY)
241
252
  self.update()
242
253
  self.assert_alive()
243
254
 
244
255
  def __repr__(self):
245
- return f'Request(id={self.id})'
256
+ return f"Request(id={self.id})"
246
257
 
247
258
  def __str__(self):
248
259
  # python has no better dict-pretty-printing logic
@@ -266,18 +277,18 @@ class PipelineLogStreamer:
266
277
 
267
278
  def _wait_for_entry(self):
268
279
  while True:
269
- self.request.wait_for_state('running')
280
+ self.request.wait_for_state("running")
270
281
 
271
282
  try:
272
- if 'run' not in self.request or 'artifacts' not in self.request['run']:
283
+ if "run" not in self.request or "artifacts" not in self.request["run"]:
273
284
  continue
274
285
 
275
- artifacts = self.request['run']['artifacts']
286
+ artifacts = self.request["run"]["artifacts"]
276
287
  if not artifacts:
277
288
  continue
278
289
 
279
- log = f'{artifacts}/pipeline.log'
280
- reply = _http.request('HEAD', log)
290
+ log = f"{artifacts}/pipeline.log"
291
+ reply = _http.request("HEAD", log)
281
292
  # TF has a race condition of adding the .log entry without it being created
282
293
  if reply.status == 404:
283
294
  util.debug(f"got 404 for {log}, retrying")
@@ -285,6 +296,8 @@ class PipelineLogStreamer:
285
296
  elif reply.status != 200:
286
297
  raise APIError(f"got HTTP {reply.status} on HEAD {log}", reply)
287
298
 
299
+ util.info(f"artifacts: {artifacts}")
300
+
288
301
  return log
289
302
 
290
303
  finally:
@@ -293,17 +306,17 @@ class PipelineLogStreamer:
293
306
 
294
307
  def __iter__(self):
295
308
  url = self._wait_for_entry()
296
- buffer = ''
309
+ buffer = ""
297
310
  bytes_read = 0
298
311
  while True:
299
312
  self.request.assert_alive()
300
313
 
301
314
  try:
302
- headers = {'Range': f'bytes={bytes_read}-'}
315
+ headers = {"Range": f"bytes={bytes_read}-"}
303
316
  # load all returned data via .decode() rather than streaming it
304
317
  # in chunks, because we don't want to leave the connection open
305
318
  # (blocking others) while the user code runs between __next__ calls
306
- reply = _http.request('GET', url, headers=headers)
319
+ reply = _http.request("GET", url, headers=headers)
307
320
 
308
321
  # 416=Range Not Satisfiable, typically meaning "no new data to send"
309
322
  if reply.status == 416:
@@ -313,9 +326,9 @@ class PipelineLogStreamer:
313
326
  raise BadHTTPError(f"got {reply.status} when trying to GET {url}", reply)
314
327
 
315
328
  bytes_read += len(reply.data)
316
- buffer += reply.data.decode(errors='ignore')
329
+ buffer += reply.data.decode(errors="ignore")
317
330
 
318
- while (index := buffer.find('\n')) != -1:
331
+ while (index := buffer.find("\n")) != -1:
319
332
  yield buffer[:index]
320
333
  buffer = buffer[index+1:]
321
334
 
@@ -333,14 +346,17 @@ class Reserve:
333
346
  When used in a context manager, it produces a ReservedMachine tuple with
334
347
  connection details for an ssh client:
335
348
 
336
- with Reserve(compose='CentOS-Stream-9', timeout=720) as m:
337
- subprocess.run(['ssh', '-i', m.ssh_key, f'{m.user}@{m.host}', 'ls /'])
349
+ with Reserve(compose="CentOS-Stream-9", timeout=720) as m:
350
+ subprocess.run(["ssh", "-i", m.ssh_key, f"{m.user}@{m.host}", "ls /"])
338
351
  """
339
352
 
340
- Reserved = collections.namedtuple('ReservedMachine', ['user', 'host', 'ssh_key', 'request'])
353
+ Reserved = collections.namedtuple(
354
+ "ReservedMachine",
355
+ ("host", "port", "user", "ssh_key", "request"),
356
+ )
341
357
 
342
358
  def __init__(
343
- self, compose=None, arch='x86_64', pool=None, hardware=None, kickstart=None,
359
+ self, *, compose, arch="x86_64", pool=None, hardware=None, kickstart=None,
344
360
  timeout=60, ssh_key=None, source_host=None, api=None,
345
361
  ):
346
362
  """
@@ -377,37 +393,38 @@ class Reserve:
377
393
  'api' is a TestingFarmAPI instance - if unspecified, a sensible default
378
394
  will be used.
379
395
  """
396
+ util.info(f"Will reserve compose:{compose} on arch:{arch} for {timeout}min")
380
397
  spec = {
381
- 'test': RESERVE_TASK,
382
- 'environments': [{
383
- 'arch': arch,
384
- 'os': {
385
- 'compose': compose,
398
+ "test": RESERVE_TASK,
399
+ "environments": [{
400
+ "arch": arch,
401
+ "os": {
402
+ "compose": compose,
386
403
  },
387
- 'pool': pool,
388
- 'settings': {
389
- 'pipeline': {
390
- 'skip_guest_setup': True,
404
+ "pool": pool,
405
+ "settings": {
406
+ "pipeline": {
407
+ "skip_guest_setup": True,
391
408
  },
392
- 'provisioning': {
393
- 'tags': {
394
- 'ArtemisUseSpot': 'false',
409
+ "provisioning": {
410
+ "tags": {
411
+ "ArtemisUseSpot": "false",
395
412
  },
396
- 'security_group_rules_ingress': [],
413
+ "security_group_rules_ingress": [],
397
414
  },
398
415
  },
399
- 'secrets': {},
416
+ "secrets": {},
400
417
  }],
401
- 'settings': {
402
- 'pipeline': {
403
- 'timeout': timeout,
418
+ "settings": {
419
+ "pipeline": {
420
+ "timeout": timeout,
404
421
  },
405
422
  },
406
423
  }
407
424
  if hardware:
408
- spec['environments'][0]['hardware'] = hardware
425
+ spec["environments"][0]["hardware"] = hardware
409
426
  if kickstart:
410
- spec['environments'][0]['kickstart'] = kickstart
427
+ spec["environments"][0]["kickstart"] = kickstart
411
428
 
412
429
  self._spec = spec
413
430
  self._ssh_key = Path(ssh_key) if ssh_key else None
@@ -419,39 +436,39 @@ class Reserve:
419
436
 
420
437
  @staticmethod
421
438
  def _guess_host_ipv4():
422
- curl_agent = {'User-Agent': 'curl/1.2.3'}
439
+ curl_agent = {"User-Agent": "curl/1.2.3"}
423
440
  try:
424
- r = _http.request('GET', 'https://ifconfig.me', headers=curl_agent)
441
+ r = _http.request("GET", "https://ifconfig.me", headers=curl_agent)
425
442
  if r.status != 200:
426
443
  raise ConnectionError()
427
444
  except (ConnectionError, urllib3.exceptions.RequestError):
428
- r = _http.request('GET', 'https://ifconfig.co', headers=curl_agent)
445
+ r = _http.request("GET", "https://ifconfig.co", headers=curl_agent)
429
446
  return r.data.decode().strip()
430
447
 
431
448
  @staticmethod
432
449
  def _gen_ssh_keypair(tmpdir):
433
450
  tmpdir = Path(tmpdir)
434
451
  subprocess.run(
435
- ['ssh-keygen', '-t', 'rsa', '-N', '', '-f', tmpdir / 'key_rsa'],
452
+ ("ssh-keygen", "-t", "rsa", "-N", "", "-f", tmpdir / "key_rsa"),
436
453
  stdout=subprocess.DEVNULL,
437
454
  check=True,
438
455
  )
439
- return (tmpdir / 'key_rsa', tmpdir / 'key_rsa.pub')
456
+ return (tmpdir / "key_rsa", tmpdir / "key_rsa.pub")
440
457
 
441
458
  def __enter__(self):
442
459
  spec = self._spec.copy()
443
460
 
444
461
  try:
445
462
  # add source_host firewall filter
446
- source_host = self._source_host or f'{self._guess_host_ipv4()}/32'
463
+ source_host = self._source_host or f"{self._guess_host_ipv4()}/32"
447
464
  ingress = \
448
- spec['environments'][0]['settings']['provisioning']['security_group_rules_ingress']
465
+ spec["environments"][0]["settings"]["provisioning"]["security_group_rules_ingress"]
449
466
  ingress.append({
450
- 'type': 'ingress',
451
- 'protocol': '-1',
452
- 'cidr': source_host,
453
- 'port_min': 0,
454
- 'port_max': 65535,
467
+ "type": "ingress",
468
+ "protocol": "-1",
469
+ "cidr": source_host,
470
+ "port_min": 0,
471
+ "port_max": 65535,
455
472
  })
456
473
 
457
474
  # read user-provided ssh key, or generate one
@@ -459,14 +476,14 @@ class Reserve:
459
476
  if ssh_key:
460
477
  if not ssh_key.exists():
461
478
  raise FileNotFoundError(f"{ssh_key} specified, but does not exist")
462
- ssh_pubkey = Path(f'{ssh_key}.pub')
479
+ ssh_pubkey = Path(f"{ssh_key}.pub")
463
480
  else:
464
481
  self._tmpdir = tempfile.TemporaryDirectory()
465
482
  ssh_key, ssh_pubkey = self._gen_ssh_keypair(self._tmpdir.name)
466
483
 
467
484
  pubkey_contents = ssh_pubkey.read_text().strip()
468
- secrets = spec['environments'][0]['secrets']
469
- secrets['RESERVE_SSH_PUBKEY'] = pubkey_contents
485
+ secrets = spec["environments"][0]["secrets"]
486
+ secrets["RESERVE_SSH_PUBKEY"] = pubkey_contents
470
487
 
471
488
  self.request = Request(api=self.api)
472
489
  self.request.submit(spec)
@@ -477,24 +494,24 @@ class Reserve:
477
494
  for line in PipelineLogStreamer(self.request):
478
495
  util.debug(f"pipeline: {line}")
479
496
  # find hidden login details
480
- m = re.search(r'\] Guest is ready: ArtemisGuest\([^,]+, (\w+)@([0-9\.]+), ', line)
497
+ m = re.search(r"\] Guest is ready: ArtemisGuest\([^,]+, (\w+)@([0-9\.]+), ", line)
481
498
  if m:
482
499
  ssh_user, ssh_host = m.groups()
483
500
  continue
484
501
  # but wait until much later despite having login, at least until
485
502
  # the test starts running (and we get closer to it inserting our
486
503
  # ~/.ssh/authorized_keys entry)
487
- if ssh_user and re.search(r'\] starting tests execution', line):
504
+ if ssh_user and re.search(r"\] starting tests execution", line):
488
505
  break
489
506
 
490
507
  # wait for a successful connection over ssh
491
508
  # (it will be failing to login for a while, until the reserve test
492
509
  # installs our ssh pubkey into authorized_keys)
493
- ssh_attempt_cmd = [
494
- 'ssh', '-q', '-i', ssh_key, f'-oConnectionAttempts={API_QUERY_DELAY}',
495
- '-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/dev/null',
496
- f'{ssh_user}@{ssh_host}', 'exit 123',
497
- ]
510
+ ssh_attempt_cmd = (
511
+ "ssh", "-q", "-i", ssh_key, f"-oConnectionAttempts={API_QUERY_DELAY}",
512
+ "-oStrictHostKeyChecking=no", "-oUserKnownHostsFile=/dev/null",
513
+ f"{ssh_user}@{ssh_host}", "exit 123",
514
+ )
498
515
  while True:
499
516
  # wait for API_QUERY_DELAY between ssh retries, seems like GEFN sleep time
500
517
  time.sleep(API_QUERY_DELAY)
@@ -508,7 +525,13 @@ class Reserve:
508
525
  if proc.returncode == 123:
509
526
  break
510
527
 
511
- return self.Reserved(ssh_user, ssh_host, ssh_key, self.request)
528
+ return self.Reserved(
529
+ host=ssh_host,
530
+ port=22,
531
+ user=ssh_user,
532
+ ssh_key=ssh_key,
533
+ request=self.request,
534
+ )
512
535
 
513
536
  except:
514
537
  self.__exit__(*sys.exc_info())
@@ -0,0 +1 @@
1
+ a = 123
atex/util/__init__.py CHANGED
@@ -17,21 +17,21 @@ def __dir__():
17
17
  # (function to avoid polluting global namespace with extra variables)
18
18
  def _import_submodules():
19
19
  for info in _pkgutil.iter_modules(__spec__.submodule_search_locations):
20
- mod = _importlib.import_module(f'.{info.name}', __name__)
20
+ mod = _importlib.import_module(f".{info.name}", __name__)
21
21
 
22
22
  # if the module defines __all__, just use it
23
- if hasattr(mod, '__all__'):
23
+ if hasattr(mod, "__all__"):
24
24
  keys = mod.__all__
25
25
  else:
26
26
  # https://docs.python.org/3/reference/executionmodel.html#binding-of-names
27
- keys = (x for x in dir(mod) if not x.startswith('_'))
27
+ keys = (x for x in dir(mod) if not x.startswith("_"))
28
28
 
29
29
  for key in keys:
30
30
  attr = getattr(mod, key)
31
31
 
32
32
  # avoid objects that belong to other known modules
33
33
  # (ie. imported function from another util module)
34
- if hasattr(attr, '__module__'):
34
+ if hasattr(attr, "__module__"):
35
35
  if attr.__module__ != mod.__name__:
36
36
  continue
37
37
  # avoid some common pollution / imports
atex/util/dedent.py CHANGED
@@ -22,4 +22,4 @@ def dedent(text):
22
22
  Like textwrap.dedent(), but also strip leading and trailing spaces/newlines
23
23
  up to the content.
24
24
  """
25
- return textwrap.dedent(text.lstrip('\n').rstrip(' \n'))
25
+ return textwrap.dedent(text.lstrip("\n").rstrip(" \n"))
atex/util/log.py CHANGED
@@ -2,7 +2,7 @@ import inspect
2
2
  import logging
3
3
  from pathlib import Path
4
4
 
5
- _logger = logging.getLogger('atex')
5
+ _logger = logging.getLogger("atex")
6
6
 
7
7
 
8
8
  def _format_msg(msg, *, skip_frames=0):
@@ -13,7 +13,7 @@ def _format_msg(msg, *, skip_frames=0):
13
13
 
14
14
  # bottom of the stack, or runpy executed module
15
15
  for frame_info in stack:
16
- if frame_info.function == '<module>':
16
+ if frame_info.function == "<module>":
17
17
  break
18
18
  module = frame_info
19
19
 
@@ -24,21 +24,21 @@ def _format_msg(msg, *, skip_frames=0):
24
24
  # if the function has 'self' and it looks like a class instance,
25
25
  # prepend it to the function name
26
26
  p_locals = parent.frame.f_locals
27
- if 'self' in p_locals:
28
- self = p_locals['self']
29
- if hasattr(self, '__class__') and inspect.isclass(self.__class__):
30
- function = f'{self.__class__.__name__}.{function}'
27
+ if "self" in p_locals:
28
+ self = p_locals["self"]
29
+ if hasattr(self, "__class__") and inspect.isclass(self.__class__):
30
+ function = f"{self.__class__.__name__}.{function}"
31
31
 
32
32
  # don't report module name of a function if it's the same as running module
33
33
  if parent.filename != module.filename:
34
- parent_modname = parent.frame.f_globals['__name__']
34
+ parent_modname = parent.frame.f_globals["__name__"]
35
35
  # avoid everything having the package name prefixed
36
- parent_modname = parent_modname.partition('.')[2] or parent_modname
37
- return f'{parent_modname}.{function}:{parent.lineno}: {msg}'
38
- elif parent.function != '<module>':
39
- return f'{function}:{parent.lineno}: {msg}'
36
+ parent_modname = parent_modname.partition(".")[2] or parent_modname
37
+ return f"{parent_modname}.{function}:{parent.lineno}: {msg}"
38
+ elif parent.function != "<module>":
39
+ return f"{function}:{parent.lineno}: {msg}"
40
40
  else:
41
- return f'{Path(parent.filename).name}:{parent.lineno}: {msg}'
41
+ return f"{Path(parent.filename).name}:{parent.lineno}: {msg}"
42
42
 
43
43
 
44
44
  def debug(msg, *, skip_frames=0):
atex/util/subprocess.py CHANGED
@@ -3,29 +3,30 @@ import subprocess
3
3
  from .log import debug
4
4
 
5
5
 
6
- def _format_subprocess_cmd(cmd):
7
- return cmd
8
- # if isinstance(cmd, (list, tuple)):
9
- # return ' '.join(str(x) for x in cmd)
10
- # else:
11
- # return cmd
12
-
13
-
14
6
  def subprocess_run(cmd, *, skip_frames=0, **kwargs):
15
7
  """
16
8
  A simple wrapper for the real subprocess.run() that logs the command used.
17
9
  """
18
10
  # when logging, skip current stack frame - report the place we were called
19
11
  # from, not util.subprocess_run itself
20
- debug(f'running: {_format_subprocess_cmd(cmd)}', skip_frames=skip_frames+1)
12
+ debug(f"running: {cmd}", skip_frames=skip_frames+1)
21
13
  return subprocess.run(cmd, **kwargs)
22
14
 
23
15
 
16
+ def subprocess_output(cmd, *, skip_frames=0, check=True, text=True, **kwargs):
17
+ """
18
+ A wrapper simulating subprocess.check_output() via a modern .run() API.
19
+ """
20
+ debug(f"running: {cmd}", skip_frames=skip_frames+1)
21
+ proc = subprocess.run(cmd, check=check, text=text, stdout=subprocess.PIPE, **kwargs)
22
+ return proc.stdout.rstrip("\n") if text else proc.stdout
23
+
24
+
24
25
  def subprocess_Popen(cmd, *, skip_frames=0, **kwargs): # noqa: N802
25
26
  """
26
27
  A simple wrapper for the real subprocess.Popen() that logs the command used.
27
28
  """
28
- debug(f'running: {_format_subprocess_cmd(cmd)}', skip_frames=skip_frames+1)
29
+ debug(f"running: {cmd}", skip_frames=skip_frames+1)
29
30
  return subprocess.Popen(cmd, **kwargs)
30
31
 
31
32
 
@@ -38,12 +39,12 @@ def subprocess_stream(cmd, *, check=False, skip_frames=0, **kwargs):
38
39
 
39
40
  To capture both stdout and stderr as yielded lines, use subprocess.STDOUT.
40
41
  """
41
- debug(f'running: {_format_subprocess_cmd(cmd)}', skip_frames=skip_frames+1)
42
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, **kwargs)
42
+ debug(f"running: {cmd}", skip_frames=skip_frames+1)
43
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, **kwargs)
43
44
 
44
45
  def generate_lines():
45
46
  for line in proc.stdout:
46
- yield line.rstrip('\n')
47
+ yield line.rstrip("\n")
47
48
  code = proc.wait()
48
49
  if code > 0 and check:
49
50
  raise subprocess.CalledProcessError(cmd=cmd, returncode=code)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: atex
3
- Version: 0.5
3
+ Version: 0.7
4
4
  Summary: Ad-hoc Test EXecutor
5
5
  Project-URL: Homepage, https://github.com/RHSecurityCompliance/atex
6
6
  License-Expression: GPL-3.0-or-later