tft-cli 0.0.17__tar.gz → 0.0.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tft-cli
3
- Version: 0.0.17
3
+ Version: 0.0.18
4
4
  Summary: Testing Farm CLI tool
5
5
  License: Apache-2.0
6
6
  Author: Miroslav Vadkerti
@@ -15,4 +15,5 @@ Requires-Dist: click (>=8.0.4,<8.1.0)
15
15
  Requires-Dist: colorama (>=0.4.4,<0.5.0)
16
16
  Requires-Dist: dynaconf (>=3.1.7,<4.0.0)
17
17
  Requires-Dist: requests (>=2.27.1,<3.0.0)
18
+ Requires-Dist: ruamel-yaml (>=0.18.6,<0.19.0)
18
19
  Requires-Dist: typer[all] (>=0.7.0,<0.8.0)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "tft-cli"
3
- version = "0.0.17"
3
+ version = "0.0.18"
4
4
  description = "Testing Farm CLI tool"
5
5
  authors = ["Miroslav Vadkerti <mvadkert@redhat.com>"]
6
6
  license = "Apache-2.0"
@@ -21,6 +21,7 @@ click = "~8.0.4"
21
21
  dynaconf = "^3.1.7"
22
22
  colorama = "^0.4.4"
23
23
  requests = "^2.27.1"
24
+ ruamel-yaml = "^0.18.6"
24
25
 
25
26
  [tool.poetry.dev-dependencies]
26
27
  pyre-check = "^0.9.10"
@@ -10,6 +10,7 @@ import subprocess
10
10
  import textwrap
11
11
  import time
12
12
  import urllib.parse
13
+ import xml.etree.ElementTree as ET
13
14
  from enum import Enum
14
15
  from typing import Any, Dict, List, Optional
15
16
 
@@ -18,12 +19,14 @@ import requests
18
19
  import typer
19
20
  from rich import print
20
21
  from rich.progress import Progress, SpinnerColumn, TextColumn
22
+ from rich.table import Table
21
23
 
22
24
  from tft.cli.config import settings
23
25
  from tft.cli.utils import (
24
26
  artifacts,
25
27
  cmd_output_or_exit,
26
28
  console,
29
+ console_stderr,
27
30
  exit_error,
28
31
  hw_constraints,
29
32
  install_http_retries,
@@ -57,6 +60,11 @@ RESERVE_REF = os.getenv("TESTING_FARM_RESERVE_REF", "main")
57
60
  DEFAULT_PIPELINE_TIMEOUT = 60 * 12
58
61
 
59
62
 
63
+ class WatchFormat(str, Enum):
64
+ text = 'text'
65
+ json = 'json'
66
+
67
+
60
68
  class PipelineType(str, Enum):
61
69
  tmt_multihost = "tmt-multihost"
62
70
 
@@ -127,11 +135,11 @@ OPTION_POST_INSTALL_SCRIPT: Optional[str] = typer.Option(
127
135
  )
128
136
  OPTION_KICKSTART: Optional[List[str]] = typer.Option(
129
137
  None,
130
- metavar="key=value",
138
+ metavar="key=value|@file",
131
139
  help=(
132
140
  "Kickstart specification to customize the guest installation. Expressed as a key=value pair. "
133
141
  "For more information about the supported keys see "
134
- "https://tmt.readthedocs.io/en/stable/spec/plans.html#kickstart."
142
+ "https://tmt.readthedocs.io/en/stable/spec/plans.html#kickstart. The @ prefix marks a yaml file to load."
135
143
  ),
136
144
  )
137
145
  OPTION_POOL: Optional[str] = typer.Option(
@@ -169,19 +177,27 @@ OPTION_DRY_RUN: bool = typer.Option(
169
177
  False, help="Do not submit a request to Testing Farm, just print it.", rich_help_panel=RESERVE_PANEL_GENERAL
170
178
  )
171
179
  OPTION_VARIABLES: Optional[List[str]] = typer.Option(
172
- None, "-e", "--environment", metavar="key=value", help="Variables to pass to the test environment."
180
+ None,
181
+ "-e",
182
+ "--environment",
183
+ metavar="key=value|@file",
184
+ help="Variables to pass to the test environment. The @ prefix marks a yaml file to load.",
173
185
  )
174
186
  OPTION_SECRETS: Optional[List[str]] = typer.Option(
175
- None, "-s", "--secret", metavar="key=value", help="Secret variables to pass to the test environment."
187
+ None,
188
+ "-s",
189
+ "--secret",
190
+ metavar="key=value|@file",
191
+ help="Secret variables to pass to the test environment. The @ prefix marks a yaml file to load.",
176
192
  )
177
193
  OPTION_HARDWARE: List[str] = typer.Option(
178
194
  None,
179
195
  help=(
180
196
  "HW requirements, expressed as key/value pairs. Keys can consist of several properties, "
181
- "e.g. ``disk.space='>= 40 GiB'``, such keys will be merged in the resulting environment "
182
- "with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, "
183
- "not overwriting each other. See https://tmt.readthedocs.io/en/stable/spec/hardware.html "
184
- "for the hardware specification."
197
+ "e.g. ``disk.size='>= 40 GiB'``, such keys will be merged in the resulting environment "
198
+ "with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, not overwriting "
199
+ "each other. See https://docs.testing-farm.io/Testing%20Farm/0.1/test-request.html#hardware "
200
+ "for the supported hardware selection possibilities."
185
201
  ),
186
202
  )
187
203
  OPTION_WORKER_IMAGE: Optional[str] = typer.Option(
@@ -197,11 +213,157 @@ OPTION_PARALLEL_LIMIT: Optional[int] = typer.Option(
197
213
  )
198
214
 
199
215
 
216
+ def _parse_xunit(xunit: str):
217
+ """
218
+ A helper that parses xunit file into sets of passed_plans/failed_plans/errored_plans per arch.
219
+
220
+ The plans are returned as a {'arch': ['plan1', 'plan2', ..]} map. If it was impossible to deduce architecture
221
+ from a certain plan result (happens in case of early fails / infra issues), the plan will be listed under the 'N/A'
222
+ key.
223
+ """
224
+
225
+ def _add_plan(collection: dict, arch: str, plan: ET.Element):
226
+ # NOTE(ivasilev) name property will always be defined at this point, defaulting to '' to make type check happy
227
+ plan_name = plan.get('name', '')
228
+ if arch in collection:
229
+ collection[arch].append(plan_name)
230
+ else:
231
+ collection[arch] = [plan_name]
232
+
233
+ failed_plans = {}
234
+ passed_plans = {}
235
+ errored_plans = {}
236
+
237
+ results_root = ET.fromstring(xunit)
238
+ for plan in results_root.findall('./testsuite'):
239
+ # Try to get information about the environment (stored under testcase/testing-environment), may be
240
+ # absent if state is undefined
241
+ testing_environment: Optional[ET.Element] = plan.find('./testcase/testing-environment[@name="requested"]')
242
+ if not testing_environment:
243
+ console_stderr.print(
244
+ f'Could not find env specifications for {plan.get("name")}, assuming fail for all arches'
245
+ )
246
+ arch = 'N/A'
247
+ else:
248
+ arch_property = testing_environment.find('./property[@name="arch"]')
249
+ if arch_property is None:
250
+ console_stderr.print(f'Could not find arch property for plan {plan.get("name")} results, skipping')
251
+ continue
252
+ # NOTE(ivasilev) arch property will always be defined at this point, defaulting to '' to make type check
253
+ # happy
254
+ arch = arch_property.get('value', '')
255
+ if plan.get('result') == 'passed':
256
+ _add_plan(passed_plans, arch, plan)
257
+ elif plan.get('result') == 'failed':
258
+ _add_plan(failed_plans, arch, plan)
259
+ else:
260
+ _add_plan(errored_plans, arch, plan)
261
+
262
+ # Let's remove possible duplicates among N/A errored out tests
263
+ if 'N/A' in errored_plans:
264
+ errored_plans['N/A'] = list(set(errored_plans['N/A']))
265
+ return passed_plans, failed_plans, errored_plans
266
+
267
+
268
+ def _get_request_summary(request: dict, session: requests.Session):
269
+ """A helper that prepares json summary of the test run"""
270
+ state = request.get('state')
271
+ artifacts_url = (request.get('run') or {}).get('artifacts')
272
+ xpath_url = f'{artifacts_url}/results.xml' if artifacts_url else ''
273
+ xunit = (request.get('result') or {}).get('xunit') or '<testsuites></testsuites>'
274
+ if state not in ['queued', 'running'] and artifacts_url:
275
+ # NOTE(ivasilev) xunit can be None (ex. in case of timed out requests) so let's fetch results.xml and use it
276
+ # as source of truth
277
+ try:
278
+ response = session.get(xpath_url)
279
+ if response.status_code == 200:
280
+ xunit = response.text
281
+ except requests.exceptions.ConnectionError:
282
+ console_stderr.print("Could not get xunit results")
283
+ passed_plans, failed_plans, errored_plans = _parse_xunit(xunit)
284
+ overall = (request.get("result") or {}).get("overall")
285
+ arches_requested = [env['arch'] for env in request['environments_requested']]
286
+
287
+ return {
288
+ 'id': request['id'],
289
+ 'state': request['state'],
290
+ 'artifacts': artifacts_url,
291
+ 'overall': overall,
292
+ 'arches_requested': arches_requested,
293
+ 'errored_plans': errored_plans,
294
+ 'failed_plans': failed_plans,
295
+ 'passed_plans': passed_plans,
296
+ }
297
+
298
+
299
+ def _print_summary_table(summary: dict, format: Optional[WatchFormat], show_details=True):
300
+ if not format == WatchFormat.text:
301
+ # Nothing to do, table is printed only when text output is requested
302
+ return
303
+
304
+ def _get_plans_list(collection):
305
+ return list(collection.values())[0] if collection.values() else []
306
+
307
+ def _has_plan(collection, arch, plan):
308
+ return plan in collection.get(arch, [])
309
+
310
+ # Let's transform plans maps into collection of plans to display plan result per arch statistics
311
+ errored = _get_plans_list(summary['errored_plans'])
312
+ failed = _get_plans_list(summary['failed_plans'])
313
+ passed = _get_plans_list(summary['passed_plans'])
314
+ generic_info_table = Table(show_header=True, header_style="bold magenta")
315
+ arches_requested = summary['arches_requested']
316
+ artifacts_url = summary['artifacts'] or ''
317
+ for column in summary.keys():
318
+ generic_info_table.add_column(column)
319
+ generic_info_table.add_row(
320
+ summary['id'],
321
+ summary['state'],
322
+ f'[link]{artifacts_url}[/link]',
323
+ summary['overall'],
324
+ ','.join(arches_requested),
325
+ str(len(errored)),
326
+ str(len(failed)),
327
+ str(len(passed)),
328
+ )
329
+ console.print(generic_info_table)
330
+
331
+ all_plans = sorted(set(errored + failed + passed))
332
+ details_table = Table(show_header=True, header_style="bold magenta")
333
+ for column in ["plan"] + arches_requested:
334
+ details_table.add_column(column)
335
+
336
+ for plan in all_plans:
337
+ row = [plan]
338
+ for arch in arches_requested:
339
+ if _has_plan(summary['passed_plans'], arch, plan):
340
+ res = '[green]pass[/green]'
341
+ elif _has_plan(summary['failed_plans'], arch, plan):
342
+ res = '[red]fail[/red]'
343
+ elif _has_plan(summary['errored_plans'], 'N/A', plan):
344
+ res = '[yellow]error[/yellow]'
345
+ else:
346
+ # If for some reason the plan has not been executed for this arch (this can happen after
347
+ # applying adjust rules) -> don't show anything
348
+ res = None
349
+ row.append(res)
350
+ details_table.add_row(*row)
351
+ if show_details:
352
+ console.print(details_table)
353
+
354
+
200
355
  def watch(
201
356
  api_url: str = typer.Option(settings.API_URL, help="Testing Farm API URL."),
202
357
  id: str = typer.Option(..., help="Request ID to watch"),
203
358
  no_wait: bool = typer.Option(False, help="Skip waiting for request completion."),
359
+ format: Optional[WatchFormat] = typer.Option(WatchFormat.text, help="Output format"),
204
360
  ):
361
+ def _console_print(*args, **kwargs):
362
+ """A helper function that will skip printing to console if output format is json"""
363
+ if format == WatchFormat.json:
364
+ return
365
+ console.print(*args, **kwargs)
366
+
205
367
  """Watch request for completion."""
206
368
 
207
369
  if not uuid_valid(id):
@@ -210,10 +372,10 @@ def watch(
210
372
  get_url = urllib.parse.urljoin(api_url, f"/v0.1/requests/{id}")
211
373
  current_state: str = ""
212
374
 
213
- console.print(f"🔎 api [blue]{get_url}[/blue]")
375
+ _console_print(f"🔎 api [blue]{get_url}[/blue]")
214
376
 
215
377
  if not no_wait:
216
- console.print("💡 waiting for request to finish, use ctrl+c to skip", style="bright_yellow")
378
+ _console_print("💡 waiting for request to finish, use ctrl+c to skip", style="bright_yellow")
217
379
 
218
380
  artifacts_shown = False
219
381
 
@@ -245,37 +407,45 @@ def watch(
245
407
 
246
408
  current_state = state
247
409
 
410
+ request_summary = _get_request_summary(request, session)
411
+ if format == WatchFormat.json:
412
+ console.print(json.dumps(request_summary, indent=2))
413
+
248
414
  if state == "new":
249
- console.print("👶 request is [blue]waiting to be queued[/blue]")
415
+ _console_print("👶 request is [blue]waiting to be queued[/blue]")
250
416
 
251
417
  elif state == "queued":
252
- console.print("👷 request is [blue]queued[/blue]")
418
+ _console_print("👷 request is [blue]queued[/blue]")
253
419
 
254
420
  elif state == "running":
255
- console.print("🚀 request is [blue]running[/blue]")
256
- console.print(f"🚢 artifacts [blue]{request['run']['artifacts']}[/blue]")
421
+ _console_print("🚀 request is [blue]running[/blue]")
422
+ _console_print(f"🚢 artifacts [blue]{request['run']['artifacts']}[/blue]")
257
423
  artifacts_shown = True
258
424
 
259
425
  elif state == "complete":
260
426
  if not artifacts_shown:
261
- console.print(f"🚢 artifacts [blue]{request['run']['artifacts']}[/blue]")
427
+ _console_print(f"🚢 artifacts [blue]{request['run']['artifacts']}[/blue]")
262
428
 
263
429
  overall = request["result"]["overall"]
264
430
  if overall in ["passed", "skipped"]:
265
- console.print("✅ tests passed", style="green")
431
+ _console_print("✅ tests passed", style="green")
432
+ _print_summary_table(request_summary, format)
266
433
  raise typer.Exit()
267
434
 
268
435
  if overall in ["failed", "error", "unknown"]:
269
- console.print(f"❌ tests {overall}", style="red")
436
+ _console_print(f"❌ tests {overall}", style="red")
270
437
  if overall == "error":
271
- console.print(f"{request['result']['summary']}", style="red")
438
+ _console_print(f"{request['result']['summary']}", style="red")
439
+ _print_summary_table(request_summary, format)
272
440
  raise typer.Exit(code=1)
273
441
 
274
442
  elif state == "error":
275
- console.print(f"📛 pipeline error\n{request['result']['summary']}", style="red")
443
+ _console_print(f"📛 pipeline error\n{request['result']['summary']}", style="red")
444
+ _print_summary_table(request_summary, format)
276
445
  raise typer.Exit(code=2)
277
446
 
278
447
  if no_wait:
448
+ _print_summary_table(request_summary, format, show_details=False)
279
449
  raise typer.Exit()
280
450
 
281
451
  time.sleep(settings.WATCH_TICK)
@@ -319,20 +489,15 @@ def request(
319
489
  None,
320
490
  help="Compose used to provision system-under-test. If not set, tests will expect 'container' provision method specified in tmt plans.", # noqa
321
491
  ),
322
- hardware: List[str] = typer.Option(
323
- None,
324
- help=(
325
- "HW requirements, expressed as key/value pairs. Keys can consist of several properties, "
326
- "e.g. ``disk.space='>= 40 GiB'``, such keys will be merged in the resulting environment "
327
- "with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, "
328
- "not overwriting each other. See https://tmt.readthedocs.io/en/stable/spec/hardware.html "
329
- "for the hardware specification."
330
- ),
331
- ),
492
+ hardware: List[str] = OPTION_HARDWARE,
332
493
  kickstart: Optional[List[str]] = OPTION_KICKSTART,
333
494
  pool: Optional[str] = OPTION_POOL,
334
495
  cli_tmt_context: Optional[List[str]] = typer.Option(
335
- None, "-c", "--context", metavar="key=value", help="Context variables to pass to `tmt`."
496
+ None,
497
+ "-c",
498
+ "--context",
499
+ metavar="key=value|@file",
500
+ help="Context variables to pass to `tmt`. The @ prefix marks a yaml file to load.",
336
501
  ),
337
502
  variables: Optional[List[str]] = OPTION_VARIABLES,
338
503
  secrets: Optional[List[str]] = OPTION_SECRETS,
@@ -340,10 +505,11 @@ def request(
340
505
  None,
341
506
  "-T",
342
507
  "--tmt-environment",
343
- metavar="key=value",
508
+ metavar="key=value|@file",
344
509
  help=(
345
510
  "Environment variables to pass to the tmt process. "
346
- "Used to configure tmt report plugins like reportportal or polarion."
511
+ "Used to configure tmt report plugins like reportportal or polarion. "
512
+ "The @ prefix marks a yaml file to load."
347
513
  ),
348
514
  ),
349
515
  no_wait: bool = typer.Option(False, help="Skip waiting for request completion."),
@@ -354,7 +520,11 @@ def request(
354
520
  repository: List[str] = OPTION_REPOSITORY,
355
521
  repository_file: List[str] = OPTION_REPOSITORY_FILE,
356
522
  tags: Optional[List[str]] = typer.Option(
357
- None, "-t", "--tag", metavar="key=value", help="Tag cloud resources with given value."
523
+ None,
524
+ "-t",
525
+ "--tag",
526
+ metavar="key=value|@file",
527
+ help="Tag cloud resources with given value. The @ prefix marks a yaml file to load.",
358
528
  ),
359
529
  watchdog_dispatch_delay: Optional[int] = typer.Option(
360
530
  None,
@@ -603,7 +773,7 @@ def request(
603
773
  exit_error(f"Unexpected error. Please file an issue to {settings.ISSUE_TRACKER}.")
604
774
 
605
775
  # watch
606
- watch(api_url, response.json()['id'], no_wait)
776
+ watch(api_url, response.json()['id'], no_wait, format=WatchFormat.text)
607
777
 
608
778
 
609
779
  def restart(
@@ -627,16 +797,7 @@ def restart(
627
797
  git_url: Optional[str] = typer.Option(None, help="Force URL of the GIT repository to test."),
628
798
  git_ref: Optional[str] = typer.Option(None, help="Force GIT ref or branch to test."),
629
799
  git_merge_sha: Optional[str] = typer.Option(None, help="Force GIT ref or branch into which --ref will be merged."),
630
- hardware: List[str] = typer.Option(
631
- None,
632
- help=(
633
- "HW requirements, expressed as key/value pairs. Keys can consist of several properties, "
634
- "e.g. ``disk.space='>= 40 GiB'``, such keys will be merged in the resulting environment "
635
- "with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, "
636
- "not overwriting each other. See https://tmt.readthedocs.io/en/stable/spec/hardware.html "
637
- "for the hardware specification."
638
- ),
639
- ),
800
+ hardware: List[str] = OPTION_HARDWARE,
640
801
  tmt_plan_name: Optional[str] = OPTION_TMT_PLAN_NAME,
641
802
  tmt_plan_filter: Optional[str] = OPTION_TMT_PLAN_FILTER,
642
803
  tmt_test_name: Optional[str] = OPTION_TMT_TEST_NAME,
@@ -806,7 +967,7 @@ def restart(
806
967
  exit_error(f"Unexpected error. Please file an issue to {settings.ISSUE_TRACKER}.")
807
968
 
808
969
  # watch
809
- watch(str(api_url), response.json()['id'], no_wait)
970
+ watch(str(api_url), response.json()['id'], no_wait, format=WatchFormat.text)
810
971
 
811
972
 
812
973
  def run(
@@ -894,6 +1055,8 @@ def run(
894
1055
  if verbose:
895
1056
  console.print(f"🔎 api [blue]{get_url}[/blue]")
896
1057
 
1058
+ search: Optional[re.Match[str]] = None
1059
+
897
1060
  # wait for the sanity test to finish
898
1061
  with Progress(
899
1062
  SpinnerColumn(),
@@ -941,6 +1104,12 @@ def run(
941
1104
  try:
942
1105
  search = re.search(r'href="(.*)" name="workdir"', session.get(f"{artifacts_url}/results.xml").text)
943
1106
 
1107
+ except requests.exceptions.SSLError:
1108
+ console.print(
1109
+ "\r🚫 [yellow]artifacts unreachable via SSL, do you have RH CA certificates installed?[/yellow]"
1110
+ )
1111
+ console.print(f"\r🚢 artifacts [blue]{artifacts_url}[/blue]")
1112
+
944
1113
  except requests.exceptions.ConnectionError:
945
1114
  console.print("\r🚫 [yellow]artifacts unreachable, are you on VPN?[/yellow]")
946
1115
  console.print(f"\r🚢 artifacts [blue]{artifacts_url}[/blue]")
@@ -949,7 +1118,6 @@ def run(
949
1118
  if not search:
950
1119
  exit_error("Could not find working directory, cannot continue")
951
1120
 
952
- assert search
953
1121
  workdir = str(search.groups(1)[0])
954
1122
  output = f"{workdir}/testing-farm/sanity/execute/data/guest/default-0/testing-farm/script-1/output.txt"
955
1123
 
@@ -1186,12 +1354,24 @@ def reserve(
1186
1354
  if not pipeline_log:
1187
1355
  exit_error(f"Pipeline log was empty. Please file an issue to {settings.ISSUE_TRACKER}.")
1188
1356
 
1357
+ except requests.exceptions.SSLError:
1358
+ exit_error(
1359
+ textwrap.dedent(
1360
+ f"""
1361
+ Failed to access Testing Farm artifacts because of SSL validation error.
1362
+ If you use Red Hat Ranch please make sure you have Red Hat CA certificates installed.
1363
+ Otherwise file an issue to {settings.ISSUE_TRACKER}.
1364
+ """
1365
+ )
1366
+ )
1367
+ return
1368
+
1189
1369
  except requests.exceptions.ConnectionError:
1190
1370
  exit_error(
1191
1371
  textwrap.dedent(
1192
1372
  f"""
1193
1373
  Failed to access Testing Farm artifacts.
1194
- If you use Red Hat Ranch please make sure you are conneted to the VPN.
1374
+ If you use Red Hat Ranch please make sure you are connected to the VPN.
1195
1375
  Otherwise file an issue to {settings.ISSUE_TRACKER}.
1196
1376
  """
1197
1377
  )
@@ -19,4 +19,6 @@ settings = LazySettings(
19
19
  DEFAULT_API_RETRIES=7,
20
20
  # should lead to delays of 0.5, 1, 2, 4, 8, 16, 32 seconds
21
21
  DEFAULT_RETRY_BACKOFF_FACTOR=1,
22
+ # system CA certificates path, default for RHEL variants
23
+ REQUESTS_CA_BUNDLE="/etc/ssl/certs/ca-bundle.crt",
22
24
  )
@@ -21,3 +21,8 @@ app.command()(commands.watch)
21
21
  # This command is available only for the container based deployment
22
22
  if os.path.exists(settings.CONTAINER_SIGN):
23
23
  app.command()(commands.update)
24
+
25
+ # Expose REQUESTS_CA_BUNDLE in the environment for RHEL-like systems
26
+ # This is needed for custom CA certificates to nicely work.
27
+ if "REQUESTS_CA_BUNDLE" not in os.environ and os.path.exists(settings.REQUESTS_CA_BUNDLE):
28
+ os.environ["REQUESTS_CA_BUNDLE"] = settings.REQUESTS_CA_BUNDLE
@@ -4,21 +4,24 @@
4
4
  import glob
5
5
  import os
6
6
  import subprocess
7
+ import sys
7
8
  import uuid
8
- from typing import Any, Dict, List, Optional, Union
9
+ from typing import Any, Dict, List, NoReturn, Optional, Union
9
10
 
10
11
  import requests
11
12
  import requests.adapters
12
13
  import typer
13
14
  from rich.console import Console
15
+ from ruamel.yaml import YAML
14
16
  from urllib3 import Retry
15
17
 
16
18
  from tft.cli.config import settings
17
19
 
18
20
  console = Console(soft_wrap=True)
21
+ console_stderr = Console(soft_wrap=True, file=sys.stderr)
19
22
 
20
23
 
21
- def exit_error(error: str):
24
+ def exit_error(error: str) -> NoReturn:
22
25
  """Exit with given error message"""
23
26
  console.print(f"⛔ {error}", style="red")
24
27
  raise typer.Exit(code=255)
@@ -91,15 +94,46 @@ def hw_constraints(hardware: List[str]) -> Dict[Any, Any]:
91
94
  return {key: value if key not in ("disk", "network") else [value] for key, value in constraints.items()}
92
95
 
93
96
 
97
+ def options_from_file(filepath) -> Dict[str, str]:
98
+ """Read environment variables from a yaml file."""
99
+
100
+ with open(filepath, 'r') as file:
101
+ try:
102
+ yaml = YAML(typ="safe").load(file.read())
103
+ except Exception:
104
+ exit_error(f"Failed to load variables from yaml file {filepath}.")
105
+
106
+ if not yaml: # pyre-ignore[61] # pyre ignores NoReturn in exit_error
107
+ return {}
108
+
109
+ if not isinstance(yaml, dict): # pyre-ignore[61] # pyre ignores NoReturn in exit_error
110
+ exit_error(f"Environment file {filepath} is not a dict.")
111
+
112
+ if any([isinstance(value, (list, dict)) for value in yaml.values()]):
113
+ exit_error(f"Values of environment file {filepath} are not primitive types.")
114
+
115
+ return yaml # pyre-ignore[61] # pyre ignores NoReturn in exit_error
116
+
117
+
94
118
  def options_to_dict(name: str, options: List[str]) -> Dict[str, str]:
95
- """Create a dictionary from list of `key=value` options"""
96
- try:
97
- return {option.split("=", 1)[0]: option.split("=", 1)[1] for option in options}
119
+ """Create a dictionary from list of `key=value|@file` options"""
98
120
 
99
- except IndexError:
100
- exit_error(f"Options for {name} are invalid, must be defined as `key=value`")
121
+ options_dict = {}
122
+ for option in options:
123
+ # Option is `@file`
124
+ if option.startswith('@'):
125
+ if not os.path.isfile(option[1:]):
126
+ exit_error(f"Invalid environment file in option `{option}` specified.")
127
+ options_dict.update(options_from_file(option[1:]))
128
+
129
+ # Option is `key=value`
130
+ else:
131
+ try:
132
+ options_dict.update({option.split("=", 1)[0]: option.split("=", 1)[1]})
133
+ except IndexError:
134
+ exit_error(f"Option `{option}` is invalid, must be defined as `key=value|@file`.")
101
135
 
102
- return {}
136
+ return options_dict
103
137
 
104
138
 
105
139
  def uuid_valid(value: str, version: int = 4) -> bool:
File without changes
File without changes