tft-cli 0.0.16__tar.gz → 0.0.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tft_cli-0.0.16 → tft_cli-0.0.18}/PKG-INFO +2 -1
- {tft_cli-0.0.16 → tft_cli-0.0.18}/pyproject.toml +2 -1
- {tft_cli-0.0.16 → tft_cli-0.0.18}/src/tft/cli/commands.py +318 -79
- {tft_cli-0.0.16 → tft_cli-0.0.18}/src/tft/cli/config.py +2 -0
- {tft_cli-0.0.16 → tft_cli-0.0.18}/src/tft/cli/tool.py +5 -0
- {tft_cli-0.0.16 → tft_cli-0.0.18}/src/tft/cli/utils.py +42 -8
- {tft_cli-0.0.16 → tft_cli-0.0.18}/LICENSE +0 -0
- {tft_cli-0.0.16 → tft_cli-0.0.18}/LICENSE_SPDX +0 -0
- {tft_cli-0.0.16 → tft_cli-0.0.18}/src/tft/cli/__init__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: tft-cli
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.18
|
|
4
4
|
Summary: Testing Farm CLI tool
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Miroslav Vadkerti
|
|
@@ -15,4 +15,5 @@ Requires-Dist: click (>=8.0.4,<8.1.0)
|
|
|
15
15
|
Requires-Dist: colorama (>=0.4.4,<0.5.0)
|
|
16
16
|
Requires-Dist: dynaconf (>=3.1.7,<4.0.0)
|
|
17
17
|
Requires-Dist: requests (>=2.27.1,<3.0.0)
|
|
18
|
+
Requires-Dist: ruamel-yaml (>=0.18.6,<0.19.0)
|
|
18
19
|
Requires-Dist: typer[all] (>=0.7.0,<0.8.0)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "tft-cli"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.18"
|
|
4
4
|
description = "Testing Farm CLI tool"
|
|
5
5
|
authors = ["Miroslav Vadkerti <mvadkert@redhat.com>"]
|
|
6
6
|
license = "Apache-2.0"
|
|
@@ -21,6 +21,7 @@ click = "~8.0.4"
|
|
|
21
21
|
dynaconf = "^3.1.7"
|
|
22
22
|
colorama = "^0.4.4"
|
|
23
23
|
requests = "^2.27.1"
|
|
24
|
+
ruamel-yaml = "^0.18.6"
|
|
24
25
|
|
|
25
26
|
[tool.poetry.dev-dependencies]
|
|
26
27
|
pyre-check = "^0.9.10"
|
|
@@ -10,6 +10,7 @@ import subprocess
|
|
|
10
10
|
import textwrap
|
|
11
11
|
import time
|
|
12
12
|
import urllib.parse
|
|
13
|
+
import xml.etree.ElementTree as ET
|
|
13
14
|
from enum import Enum
|
|
14
15
|
from typing import Any, Dict, List, Optional
|
|
15
16
|
|
|
@@ -18,12 +19,14 @@ import requests
|
|
|
18
19
|
import typer
|
|
19
20
|
from rich import print
|
|
20
21
|
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
22
|
+
from rich.table import Table
|
|
21
23
|
|
|
22
24
|
from tft.cli.config import settings
|
|
23
25
|
from tft.cli.utils import (
|
|
24
26
|
artifacts,
|
|
25
27
|
cmd_output_or_exit,
|
|
26
28
|
console,
|
|
29
|
+
console_stderr,
|
|
27
30
|
exit_error,
|
|
28
31
|
hw_constraints,
|
|
29
32
|
install_http_retries,
|
|
@@ -57,6 +60,11 @@ RESERVE_REF = os.getenv("TESTING_FARM_RESERVE_REF", "main")
|
|
|
57
60
|
DEFAULT_PIPELINE_TIMEOUT = 60 * 12
|
|
58
61
|
|
|
59
62
|
|
|
63
|
+
class WatchFormat(str, Enum):
|
|
64
|
+
text = 'text'
|
|
65
|
+
json = 'json'
|
|
66
|
+
|
|
67
|
+
|
|
60
68
|
class PipelineType(str, Enum):
|
|
61
69
|
tmt_multihost = "tmt-multihost"
|
|
62
70
|
|
|
@@ -72,20 +80,48 @@ ARGUMENT_API_TOKEN: str = typer.Argument(
|
|
|
72
80
|
metavar='',
|
|
73
81
|
rich_help_panel='Environment variables',
|
|
74
82
|
)
|
|
75
|
-
|
|
83
|
+
OPTION_TMT_PLAN_NAME: Optional[str] = typer.Option(
|
|
76
84
|
None,
|
|
77
85
|
"--plan",
|
|
78
|
-
help=
|
|
86
|
+
help=(
|
|
87
|
+
'Select plans to be executed. '
|
|
88
|
+
'Passed as `--name` option to the `tmt plan` command. '
|
|
89
|
+
'Can be a regular expression.'
|
|
90
|
+
),
|
|
79
91
|
rich_help_panel=REQUEST_PANEL_TMT,
|
|
80
92
|
)
|
|
81
|
-
|
|
93
|
+
OPTION_TMT_PLAN_FILTER: Optional[str] = typer.Option(
|
|
82
94
|
None,
|
|
83
95
|
"--plan-filter",
|
|
84
|
-
help=
|
|
96
|
+
help=(
|
|
97
|
+
'Filter tmt plans. '
|
|
98
|
+
'Passed as `--filter` option to the `tmt plan` command. '
|
|
99
|
+
'By default, `enabled:true` filter is applied. '
|
|
100
|
+
'Plan filtering is similar to test filtering, '
|
|
101
|
+
'see https://tmt.readthedocs.io/en/stable/examples.html#filter-tests for more information.'
|
|
102
|
+
),
|
|
103
|
+
rich_help_panel=REQUEST_PANEL_TMT,
|
|
104
|
+
)
|
|
105
|
+
OPTION_TMT_TEST_NAME: Optional[str] = typer.Option(
|
|
106
|
+
None,
|
|
107
|
+
"--test",
|
|
108
|
+
help=(
|
|
109
|
+
'Select tests to be executed. '
|
|
110
|
+
'Passed as `--name` option to the `tmt test` command. '
|
|
111
|
+
'Can be a regular expression.'
|
|
112
|
+
),
|
|
85
113
|
rich_help_panel=REQUEST_PANEL_TMT,
|
|
86
114
|
)
|
|
87
|
-
|
|
88
|
-
None,
|
|
115
|
+
OPTION_TMT_TEST_FILTER: Optional[str] = typer.Option(
|
|
116
|
+
None,
|
|
117
|
+
"--test-filter",
|
|
118
|
+
help=(
|
|
119
|
+
'Filter tmt tests. '
|
|
120
|
+
'Passed as `--filter` option to the `tmt test` command. '
|
|
121
|
+
'It overrides any test filter defined in the plan. '
|
|
122
|
+
'See https://tmt.readthedocs.io/en/stable/examples.html#filter-tests for more information.'
|
|
123
|
+
),
|
|
124
|
+
rich_help_panel=REQUEST_PANEL_TMT,
|
|
89
125
|
)
|
|
90
126
|
OPTION_TMT_PATH: str = typer.Option(
|
|
91
127
|
'.',
|
|
@@ -99,11 +135,11 @@ OPTION_POST_INSTALL_SCRIPT: Optional[str] = typer.Option(
|
|
|
99
135
|
)
|
|
100
136
|
OPTION_KICKSTART: Optional[List[str]] = typer.Option(
|
|
101
137
|
None,
|
|
102
|
-
metavar="key=value",
|
|
138
|
+
metavar="key=value|@file",
|
|
103
139
|
help=(
|
|
104
140
|
"Kickstart specification to customize the guest installation. Expressed as a key=value pair. "
|
|
105
141
|
"For more information about the supported keys see "
|
|
106
|
-
"https://tmt.readthedocs.io/en/stable/spec/plans.html#kickstart."
|
|
142
|
+
"https://tmt.readthedocs.io/en/stable/spec/plans.html#kickstart. The @ prefix marks a yaml file to load."
|
|
107
143
|
),
|
|
108
144
|
)
|
|
109
145
|
OPTION_POOL: Optional[str] = typer.Option(
|
|
@@ -141,31 +177,193 @@ OPTION_DRY_RUN: bool = typer.Option(
|
|
|
141
177
|
False, help="Do not submit a request to Testing Farm, just print it.", rich_help_panel=RESERVE_PANEL_GENERAL
|
|
142
178
|
)
|
|
143
179
|
OPTION_VARIABLES: Optional[List[str]] = typer.Option(
|
|
144
|
-
None,
|
|
180
|
+
None,
|
|
181
|
+
"-e",
|
|
182
|
+
"--environment",
|
|
183
|
+
metavar="key=value|@file",
|
|
184
|
+
help="Variables to pass to the test environment. The @ prefix marks a yaml file to load.",
|
|
145
185
|
)
|
|
146
186
|
OPTION_SECRETS: Optional[List[str]] = typer.Option(
|
|
147
|
-
None,
|
|
187
|
+
None,
|
|
188
|
+
"-s",
|
|
189
|
+
"--secret",
|
|
190
|
+
metavar="key=value|@file",
|
|
191
|
+
help="Secret variables to pass to the test environment. The @ prefix marks a yaml file to load.",
|
|
148
192
|
)
|
|
149
193
|
OPTION_HARDWARE: List[str] = typer.Option(
|
|
150
194
|
None,
|
|
151
195
|
help=(
|
|
152
196
|
"HW requirements, expressed as key/value pairs. Keys can consist of several properties, "
|
|
153
|
-
"e.g. ``disk.
|
|
154
|
-
"with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, "
|
|
155
|
-
"
|
|
156
|
-
"for the hardware
|
|
197
|
+
"e.g. ``disk.size='>= 40 GiB'``, such keys will be merged in the resulting environment "
|
|
198
|
+
"with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, not overwriting "
|
|
199
|
+
"each other. See https://docs.testing-farm.io/Testing%20Farm/0.1/test-request.html#hardware "
|
|
200
|
+
"for the supported hardware selection possibilities."
|
|
157
201
|
),
|
|
158
202
|
)
|
|
159
203
|
OPTION_WORKER_IMAGE: Optional[str] = typer.Option(
|
|
160
204
|
None, "--worker-image", help="Force worker container image. Requires Testing Farm developer permissions."
|
|
161
205
|
)
|
|
206
|
+
OPTION_PARALLEL_LIMIT: Optional[int] = typer.Option(
|
|
207
|
+
None,
|
|
208
|
+
'--parallel-limit',
|
|
209
|
+
help=(
|
|
210
|
+
"Maximum amount of plans to be executed in parallel. Default values are 12 for Public Ranch and 5 for "
|
|
211
|
+
"Red Hat Ranch."
|
|
212
|
+
),
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _parse_xunit(xunit: str):
|
|
217
|
+
"""
|
|
218
|
+
A helper that parses xunit file into sets of passed_plans/failed_plans/errored_plans per arch.
|
|
219
|
+
|
|
220
|
+
The plans are returned as a {'arch': ['plan1', 'plan2', ..]} map. If it was impossible to deduce architecture
|
|
221
|
+
from a certain plan result (happens in case of early fails / infra issues), the plan will be listed under the 'N/A'
|
|
222
|
+
key.
|
|
223
|
+
"""
|
|
224
|
+
|
|
225
|
+
def _add_plan(collection: dict, arch: str, plan: ET.Element):
|
|
226
|
+
# NOTE(ivasilev) name property will always be defined at this point, defaulting to '' to make type check happy
|
|
227
|
+
plan_name = plan.get('name', '')
|
|
228
|
+
if arch in collection:
|
|
229
|
+
collection[arch].append(plan_name)
|
|
230
|
+
else:
|
|
231
|
+
collection[arch] = [plan_name]
|
|
232
|
+
|
|
233
|
+
failed_plans = {}
|
|
234
|
+
passed_plans = {}
|
|
235
|
+
errored_plans = {}
|
|
236
|
+
|
|
237
|
+
results_root = ET.fromstring(xunit)
|
|
238
|
+
for plan in results_root.findall('./testsuite'):
|
|
239
|
+
# Try to get information about the environment (stored under testcase/testing-environment), may be
|
|
240
|
+
# absent if state is undefined
|
|
241
|
+
testing_environment: Optional[ET.Element] = plan.find('./testcase/testing-environment[@name="requested"]')
|
|
242
|
+
if not testing_environment:
|
|
243
|
+
console_stderr.print(
|
|
244
|
+
f'Could not find env specifications for {plan.get("name")}, assuming fail for all arches'
|
|
245
|
+
)
|
|
246
|
+
arch = 'N/A'
|
|
247
|
+
else:
|
|
248
|
+
arch_property = testing_environment.find('./property[@name="arch"]')
|
|
249
|
+
if arch_property is None:
|
|
250
|
+
console_stderr.print(f'Could not find arch property for plan {plan.get("name")} results, skipping')
|
|
251
|
+
continue
|
|
252
|
+
# NOTE(ivasilev) arch property will always be defined at this point, defaulting to '' to make type check
|
|
253
|
+
# happy
|
|
254
|
+
arch = arch_property.get('value', '')
|
|
255
|
+
if plan.get('result') == 'passed':
|
|
256
|
+
_add_plan(passed_plans, arch, plan)
|
|
257
|
+
elif plan.get('result') == 'failed':
|
|
258
|
+
_add_plan(failed_plans, arch, plan)
|
|
259
|
+
else:
|
|
260
|
+
_add_plan(errored_plans, arch, plan)
|
|
261
|
+
|
|
262
|
+
# Let's remove possible duplicates among N/A errored out tests
|
|
263
|
+
if 'N/A' in errored_plans:
|
|
264
|
+
errored_plans['N/A'] = list(set(errored_plans['N/A']))
|
|
265
|
+
return passed_plans, failed_plans, errored_plans
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def _get_request_summary(request: dict, session: requests.Session):
|
|
269
|
+
"""A helper that prepares json summary of the test run"""
|
|
270
|
+
state = request.get('state')
|
|
271
|
+
artifacts_url = (request.get('run') or {}).get('artifacts')
|
|
272
|
+
xpath_url = f'{artifacts_url}/results.xml' if artifacts_url else ''
|
|
273
|
+
xunit = (request.get('result') or {}).get('xunit') or '<testsuites></testsuites>'
|
|
274
|
+
if state not in ['queued', 'running'] and artifacts_url:
|
|
275
|
+
# NOTE(ivasilev) xunit can be None (ex. in case of timed out requests) so let's fetch results.xml and use it
|
|
276
|
+
# as source of truth
|
|
277
|
+
try:
|
|
278
|
+
response = session.get(xpath_url)
|
|
279
|
+
if response.status_code == 200:
|
|
280
|
+
xunit = response.text
|
|
281
|
+
except requests.exceptions.ConnectionError:
|
|
282
|
+
console_stderr.print("Could not get xunit results")
|
|
283
|
+
passed_plans, failed_plans, errored_plans = _parse_xunit(xunit)
|
|
284
|
+
overall = (request.get("result") or {}).get("overall")
|
|
285
|
+
arches_requested = [env['arch'] for env in request['environments_requested']]
|
|
286
|
+
|
|
287
|
+
return {
|
|
288
|
+
'id': request['id'],
|
|
289
|
+
'state': request['state'],
|
|
290
|
+
'artifacts': artifacts_url,
|
|
291
|
+
'overall': overall,
|
|
292
|
+
'arches_requested': arches_requested,
|
|
293
|
+
'errored_plans': errored_plans,
|
|
294
|
+
'failed_plans': failed_plans,
|
|
295
|
+
'passed_plans': passed_plans,
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
def _print_summary_table(summary: dict, format: Optional[WatchFormat], show_details=True):
|
|
300
|
+
if not format == WatchFormat.text:
|
|
301
|
+
# Nothing to do, table is printed only when text output is requested
|
|
302
|
+
return
|
|
303
|
+
|
|
304
|
+
def _get_plans_list(collection):
|
|
305
|
+
return list(collection.values())[0] if collection.values() else []
|
|
306
|
+
|
|
307
|
+
def _has_plan(collection, arch, plan):
|
|
308
|
+
return plan in collection.get(arch, [])
|
|
309
|
+
|
|
310
|
+
# Let's transform plans maps into collection of plans to display plan result per arch statistics
|
|
311
|
+
errored = _get_plans_list(summary['errored_plans'])
|
|
312
|
+
failed = _get_plans_list(summary['failed_plans'])
|
|
313
|
+
passed = _get_plans_list(summary['passed_plans'])
|
|
314
|
+
generic_info_table = Table(show_header=True, header_style="bold magenta")
|
|
315
|
+
arches_requested = summary['arches_requested']
|
|
316
|
+
artifacts_url = summary['artifacts'] or ''
|
|
317
|
+
for column in summary.keys():
|
|
318
|
+
generic_info_table.add_column(column)
|
|
319
|
+
generic_info_table.add_row(
|
|
320
|
+
summary['id'],
|
|
321
|
+
summary['state'],
|
|
322
|
+
f'[link]{artifacts_url}[/link]',
|
|
323
|
+
summary['overall'],
|
|
324
|
+
','.join(arches_requested),
|
|
325
|
+
str(len(errored)),
|
|
326
|
+
str(len(failed)),
|
|
327
|
+
str(len(passed)),
|
|
328
|
+
)
|
|
329
|
+
console.print(generic_info_table)
|
|
330
|
+
|
|
331
|
+
all_plans = sorted(set(errored + failed + passed))
|
|
332
|
+
details_table = Table(show_header=True, header_style="bold magenta")
|
|
333
|
+
for column in ["plan"] + arches_requested:
|
|
334
|
+
details_table.add_column(column)
|
|
335
|
+
|
|
336
|
+
for plan in all_plans:
|
|
337
|
+
row = [plan]
|
|
338
|
+
for arch in arches_requested:
|
|
339
|
+
if _has_plan(summary['passed_plans'], arch, plan):
|
|
340
|
+
res = '[green]pass[/green]'
|
|
341
|
+
elif _has_plan(summary['failed_plans'], arch, plan):
|
|
342
|
+
res = '[red]fail[/red]'
|
|
343
|
+
elif _has_plan(summary['errored_plans'], 'N/A', plan):
|
|
344
|
+
res = '[yellow]error[/yellow]'
|
|
345
|
+
else:
|
|
346
|
+
# If for some reason the plan has not been executed for this arch (this can happen after
|
|
347
|
+
# applying adjust rules) -> don't show anything
|
|
348
|
+
res = None
|
|
349
|
+
row.append(res)
|
|
350
|
+
details_table.add_row(*row)
|
|
351
|
+
if show_details:
|
|
352
|
+
console.print(details_table)
|
|
162
353
|
|
|
163
354
|
|
|
164
355
|
def watch(
|
|
165
356
|
api_url: str = typer.Option(settings.API_URL, help="Testing Farm API URL."),
|
|
166
357
|
id: str = typer.Option(..., help="Request ID to watch"),
|
|
167
358
|
no_wait: bool = typer.Option(False, help="Skip waiting for request completion."),
|
|
359
|
+
format: Optional[WatchFormat] = typer.Option(WatchFormat.text, help="Output format"),
|
|
168
360
|
):
|
|
361
|
+
def _console_print(*args, **kwargs):
|
|
362
|
+
"""A helper function that will skip printing to console if output format is json"""
|
|
363
|
+
if format == WatchFormat.json:
|
|
364
|
+
return
|
|
365
|
+
console.print(*args, **kwargs)
|
|
366
|
+
|
|
169
367
|
"""Watch request for completion."""
|
|
170
368
|
|
|
171
369
|
if not uuid_valid(id):
|
|
@@ -174,10 +372,10 @@ def watch(
|
|
|
174
372
|
get_url = urllib.parse.urljoin(api_url, f"/v0.1/requests/{id}")
|
|
175
373
|
current_state: str = ""
|
|
176
374
|
|
|
177
|
-
|
|
375
|
+
_console_print(f"🔎 api [blue]{get_url}[/blue]")
|
|
178
376
|
|
|
179
377
|
if not no_wait:
|
|
180
|
-
|
|
378
|
+
_console_print("💡 waiting for request to finish, use ctrl+c to skip", style="bright_yellow")
|
|
181
379
|
|
|
182
380
|
artifacts_shown = False
|
|
183
381
|
|
|
@@ -209,37 +407,45 @@ def watch(
|
|
|
209
407
|
|
|
210
408
|
current_state = state
|
|
211
409
|
|
|
410
|
+
request_summary = _get_request_summary(request, session)
|
|
411
|
+
if format == WatchFormat.json:
|
|
412
|
+
console.print(json.dumps(request_summary, indent=2))
|
|
413
|
+
|
|
212
414
|
if state == "new":
|
|
213
|
-
|
|
415
|
+
_console_print("👶 request is [blue]waiting to be queued[/blue]")
|
|
214
416
|
|
|
215
417
|
elif state == "queued":
|
|
216
|
-
|
|
418
|
+
_console_print("👷 request is [blue]queued[/blue]")
|
|
217
419
|
|
|
218
420
|
elif state == "running":
|
|
219
|
-
|
|
220
|
-
|
|
421
|
+
_console_print("🚀 request is [blue]running[/blue]")
|
|
422
|
+
_console_print(f"🚢 artifacts [blue]{request['run']['artifacts']}[/blue]")
|
|
221
423
|
artifacts_shown = True
|
|
222
424
|
|
|
223
425
|
elif state == "complete":
|
|
224
426
|
if not artifacts_shown:
|
|
225
|
-
|
|
427
|
+
_console_print(f"🚢 artifacts [blue]{request['run']['artifacts']}[/blue]")
|
|
226
428
|
|
|
227
429
|
overall = request["result"]["overall"]
|
|
228
430
|
if overall in ["passed", "skipped"]:
|
|
229
|
-
|
|
431
|
+
_console_print("✅ tests passed", style="green")
|
|
432
|
+
_print_summary_table(request_summary, format)
|
|
230
433
|
raise typer.Exit()
|
|
231
434
|
|
|
232
435
|
if overall in ["failed", "error", "unknown"]:
|
|
233
|
-
|
|
436
|
+
_console_print(f"❌ tests {overall}", style="red")
|
|
234
437
|
if overall == "error":
|
|
235
|
-
|
|
438
|
+
_console_print(f"{request['result']['summary']}", style="red")
|
|
439
|
+
_print_summary_table(request_summary, format)
|
|
236
440
|
raise typer.Exit(code=1)
|
|
237
441
|
|
|
238
442
|
elif state == "error":
|
|
239
|
-
|
|
443
|
+
_console_print(f"📛 pipeline error\n{request['result']['summary']}", style="red")
|
|
444
|
+
_print_summary_table(request_summary, format)
|
|
240
445
|
raise typer.Exit(code=2)
|
|
241
446
|
|
|
242
447
|
if no_wait:
|
|
448
|
+
_print_summary_table(request_summary, format, show_details=False)
|
|
243
449
|
raise typer.Exit()
|
|
244
450
|
|
|
245
451
|
time.sleep(settings.WATCH_TICK)
|
|
@@ -258,9 +464,10 @@ def request(
|
|
|
258
464
|
help="Set the timeout for the request in minutes. If the test takes longer than this, it will be terminated.",
|
|
259
465
|
),
|
|
260
466
|
test_type: str = typer.Option("fmf", help="Test type to use, if not set autodetected."),
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
467
|
+
tmt_plan_name: Optional[str] = OPTION_TMT_PLAN_NAME,
|
|
468
|
+
tmt_plan_filter: Optional[str] = OPTION_TMT_PLAN_FILTER,
|
|
469
|
+
tmt_test_name: Optional[str] = OPTION_TMT_TEST_NAME,
|
|
470
|
+
tmt_test_filter: Optional[str] = OPTION_TMT_TEST_FILTER,
|
|
264
471
|
tmt_path: str = OPTION_TMT_PATH,
|
|
265
472
|
sti_playbooks: Optional[List[str]] = typer.Option(
|
|
266
473
|
None,
|
|
@@ -282,20 +489,15 @@ def request(
|
|
|
282
489
|
None,
|
|
283
490
|
help="Compose used to provision system-under-test. If not set, tests will expect 'container' provision method specified in tmt plans.", # noqa
|
|
284
491
|
),
|
|
285
|
-
hardware: List[str] =
|
|
286
|
-
None,
|
|
287
|
-
help=(
|
|
288
|
-
"HW requirements, expressed as key/value pairs. Keys can consist of several properties, "
|
|
289
|
-
"e.g. ``disk.space='>= 40 GiB'``, such keys will be merged in the resulting environment "
|
|
290
|
-
"with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, "
|
|
291
|
-
"not overwriting each other. See https://tmt.readthedocs.io/en/stable/spec/hardware.html "
|
|
292
|
-
"for the hardware specification."
|
|
293
|
-
),
|
|
294
|
-
),
|
|
492
|
+
hardware: List[str] = OPTION_HARDWARE,
|
|
295
493
|
kickstart: Optional[List[str]] = OPTION_KICKSTART,
|
|
296
494
|
pool: Optional[str] = OPTION_POOL,
|
|
297
|
-
|
|
298
|
-
None,
|
|
495
|
+
cli_tmt_context: Optional[List[str]] = typer.Option(
|
|
496
|
+
None,
|
|
497
|
+
"-c",
|
|
498
|
+
"--context",
|
|
499
|
+
metavar="key=value|@file",
|
|
500
|
+
help="Context variables to pass to `tmt`. The @ prefix marks a yaml file to load.",
|
|
299
501
|
),
|
|
300
502
|
variables: Optional[List[str]] = OPTION_VARIABLES,
|
|
301
503
|
secrets: Optional[List[str]] = OPTION_SECRETS,
|
|
@@ -303,10 +505,11 @@ def request(
|
|
|
303
505
|
None,
|
|
304
506
|
"-T",
|
|
305
507
|
"--tmt-environment",
|
|
306
|
-
metavar="key=value",
|
|
508
|
+
metavar="key=value|@file",
|
|
307
509
|
help=(
|
|
308
510
|
"Environment variables to pass to the tmt process. "
|
|
309
|
-
"Used to configure tmt report plugins like reportportal or polarion."
|
|
511
|
+
"Used to configure tmt report plugins like reportportal or polarion. "
|
|
512
|
+
"The @ prefix marks a yaml file to load."
|
|
310
513
|
),
|
|
311
514
|
),
|
|
312
515
|
no_wait: bool = typer.Option(False, help="Skip waiting for request completion."),
|
|
@@ -317,7 +520,11 @@ def request(
|
|
|
317
520
|
repository: List[str] = OPTION_REPOSITORY,
|
|
318
521
|
repository_file: List[str] = OPTION_REPOSITORY_FILE,
|
|
319
522
|
tags: Optional[List[str]] = typer.Option(
|
|
320
|
-
None,
|
|
523
|
+
None,
|
|
524
|
+
"-t",
|
|
525
|
+
"--tag",
|
|
526
|
+
metavar="key=value|@file",
|
|
527
|
+
help="Tag cloud resources with given value. The @ prefix marks a yaml file to load.",
|
|
321
528
|
),
|
|
322
529
|
watchdog_dispatch_delay: Optional[int] = typer.Option(
|
|
323
530
|
None,
|
|
@@ -339,6 +546,7 @@ def request(
|
|
|
339
546
|
user_webpage_icon: Optional[str] = typer.Option(
|
|
340
547
|
None, help="URL of the icon of the user's webpage. It will be shown in the results viewer."
|
|
341
548
|
),
|
|
549
|
+
parallel_limit: Optional[int] = OPTION_PARALLEL_LIMIT,
|
|
342
550
|
):
|
|
343
551
|
"""
|
|
344
552
|
Request testing from Testing Farm.
|
|
@@ -420,14 +628,17 @@ def request(
|
|
|
420
628
|
if git_merge_sha:
|
|
421
629
|
test["merge_sha"] = git_merge_sha
|
|
422
630
|
|
|
423
|
-
if
|
|
424
|
-
test["name"] =
|
|
631
|
+
if tmt_plan_name:
|
|
632
|
+
test["name"] = tmt_plan_name
|
|
425
633
|
|
|
426
|
-
if
|
|
427
|
-
test["plan_filter"] =
|
|
634
|
+
if tmt_plan_filter:
|
|
635
|
+
test["plan_filter"] = tmt_plan_filter
|
|
428
636
|
|
|
429
|
-
if
|
|
430
|
-
test["
|
|
637
|
+
if tmt_test_name:
|
|
638
|
+
test["test_name"] = tmt_test_name
|
|
639
|
+
|
|
640
|
+
if tmt_test_filter:
|
|
641
|
+
test["test_filter"] = tmt_test_filter
|
|
431
642
|
|
|
432
643
|
if sti_playbooks:
|
|
433
644
|
test["playbooks"] = sti_playbooks
|
|
@@ -441,15 +652,20 @@ def request(
|
|
|
441
652
|
environment["artifacts"] = []
|
|
442
653
|
environment["tmt"] = {}
|
|
443
654
|
|
|
655
|
+
# NOTE(ivasilev) From now on tmt.context will be always set. Even if user didn't request anything then
|
|
656
|
+
# arch requested will be passed into the context
|
|
657
|
+
tmt_context = options_to_dict("tmt context", cli_tmt_context or [])
|
|
658
|
+
if "arch" not in tmt_context:
|
|
659
|
+
# If context distro is not set by the user directly via -c let's set it according to arch requested
|
|
660
|
+
tmt_context["arch"] = arch
|
|
661
|
+
environment["tmt"].update({"context": tmt_context})
|
|
662
|
+
|
|
444
663
|
if compose:
|
|
445
664
|
environment["os"] = {"compose": compose}
|
|
446
665
|
|
|
447
666
|
if secrets:
|
|
448
667
|
environment["secrets"] = options_to_dict("environment secrets", secrets)
|
|
449
668
|
|
|
450
|
-
if tmt_context:
|
|
451
|
-
environment["tmt"].update({"context": options_to_dict("tmt context", tmt_context)})
|
|
452
|
-
|
|
453
669
|
if variables:
|
|
454
670
|
environment["variables"] = options_to_dict("environment variables", variables)
|
|
455
671
|
|
|
@@ -514,6 +730,9 @@ def request(
|
|
|
514
730
|
if pipeline_type:
|
|
515
731
|
request["settings"]["pipeline"]["type"] = pipeline_type.value
|
|
516
732
|
|
|
733
|
+
if parallel_limit:
|
|
734
|
+
request["settings"]["pipeline"]["parallel-limit"] = parallel_limit
|
|
735
|
+
|
|
517
736
|
# worker image
|
|
518
737
|
if worker_image:
|
|
519
738
|
request["settings"]["worker"] = {"image": worker_image}
|
|
@@ -554,7 +773,7 @@ def request(
|
|
|
554
773
|
exit_error(f"Unexpected error. Please file an issue to {settings.ISSUE_TRACKER}.")
|
|
555
774
|
|
|
556
775
|
# watch
|
|
557
|
-
watch(api_url, response.json()['id'], no_wait)
|
|
776
|
+
watch(api_url, response.json()['id'], no_wait, format=WatchFormat.text)
|
|
558
777
|
|
|
559
778
|
|
|
560
779
|
def restart(
|
|
@@ -578,24 +797,17 @@ def restart(
|
|
|
578
797
|
git_url: Optional[str] = typer.Option(None, help="Force URL of the GIT repository to test."),
|
|
579
798
|
git_ref: Optional[str] = typer.Option(None, help="Force GIT ref or branch to test."),
|
|
580
799
|
git_merge_sha: Optional[str] = typer.Option(None, help="Force GIT ref or branch into which --ref will be merged."),
|
|
581
|
-
hardware: List[str] =
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
"with other keys sharing the path: ``cpu.family=79`` and ``cpu.model=6`` would be merged, "
|
|
587
|
-
"not overwriting each other. See https://tmt.readthedocs.io/en/stable/spec/hardware.html "
|
|
588
|
-
"for the hardware specification."
|
|
589
|
-
),
|
|
590
|
-
),
|
|
591
|
-
tmt_plan_regex: Optional[str] = OPTION_TMT_PLAN_REGEX,
|
|
592
|
-
tmt_plan_filter_regex: Optional[str] = OPTION_TMT_PLAN_FILTER_REGEX,
|
|
593
|
-
tmt_test_filter_regex: Optional[str] = OPTION_TMT_TEST_FILTER_REGEX,
|
|
800
|
+
hardware: List[str] = OPTION_HARDWARE,
|
|
801
|
+
tmt_plan_name: Optional[str] = OPTION_TMT_PLAN_NAME,
|
|
802
|
+
tmt_plan_filter: Optional[str] = OPTION_TMT_PLAN_FILTER,
|
|
803
|
+
tmt_test_name: Optional[str] = OPTION_TMT_TEST_NAME,
|
|
804
|
+
tmt_test_filter: Optional[str] = OPTION_TMT_TEST_FILTER,
|
|
594
805
|
tmt_path: str = OPTION_TMT_PATH,
|
|
595
806
|
worker_image: Optional[str] = OPTION_WORKER_IMAGE,
|
|
596
807
|
no_wait: bool = typer.Option(False, help="Skip waiting for request completion."),
|
|
597
808
|
dry_run: bool = OPTION_DRY_RUN,
|
|
598
809
|
pipeline_type: Optional[PipelineType] = OPTION_PIPELINE_TYPE,
|
|
810
|
+
parallel_limit: Optional[int] = OPTION_PARALLEL_LIMIT,
|
|
599
811
|
):
|
|
600
812
|
"""
|
|
601
813
|
Restart a Testing Farm request.
|
|
@@ -661,8 +873,11 @@ def restart(
|
|
|
661
873
|
if git_ref:
|
|
662
874
|
test["ref"] = git_ref
|
|
663
875
|
|
|
664
|
-
if
|
|
665
|
-
test["
|
|
876
|
+
if tmt_test_name:
|
|
877
|
+
test["test_name"] = tmt_test_name
|
|
878
|
+
|
|
879
|
+
if tmt_test_filter:
|
|
880
|
+
test["test_filter"] = tmt_test_filter
|
|
666
881
|
|
|
667
882
|
merge_sha_info = ""
|
|
668
883
|
if git_merge_sha:
|
|
@@ -691,15 +906,15 @@ def restart(
|
|
|
691
906
|
|
|
692
907
|
test_type = "fmf" if "fmf" in request["test"] else "sti"
|
|
693
908
|
|
|
694
|
-
if
|
|
909
|
+
if tmt_plan_name:
|
|
695
910
|
if test_type == "sti":
|
|
696
911
|
exit_error("The '--plan' option is compabitble only with 'tmt` tests.")
|
|
697
|
-
request["test"][test_type]["name"] =
|
|
912
|
+
request["test"][test_type]["name"] = tmt_plan_name
|
|
698
913
|
|
|
699
|
-
if
|
|
914
|
+
if tmt_plan_filter:
|
|
700
915
|
if test_type == "sti":
|
|
701
916
|
exit_error("The '--plan-filter' option is compabitble only with 'tmt` tests.")
|
|
702
|
-
request["test"][test_type]["plan_filter"] =
|
|
917
|
+
request["test"][test_type]["plan_filter"] = tmt_plan_filter
|
|
703
918
|
|
|
704
919
|
if test_type == "fmf":
|
|
705
920
|
request["test"][test_type]["path"] = tmt_path
|
|
@@ -715,13 +930,18 @@ def restart(
|
|
|
715
930
|
# Add API key
|
|
716
931
|
request['api_key'] = api_token
|
|
717
932
|
|
|
718
|
-
if pipeline_type:
|
|
933
|
+
if pipeline_type or parallel_limit:
|
|
719
934
|
if "settings" not in request:
|
|
720
935
|
request["settings"] = {}
|
|
721
936
|
if "pipeline" not in request["settings"]:
|
|
722
937
|
request["settings"]["pipeline"] = {}
|
|
938
|
+
|
|
939
|
+
if pipeline_type:
|
|
723
940
|
request["settings"]["pipeline"]["type"] = pipeline_type.value
|
|
724
941
|
|
|
942
|
+
if parallel_limit:
|
|
943
|
+
request["settings"]["pipeline"]["parallel-limit"] = parallel_limit
|
|
944
|
+
|
|
725
945
|
# dry run
|
|
726
946
|
if dry_run:
|
|
727
947
|
console.print("🔍 Dry run, showing POST json only", style="bright_yellow")
|
|
@@ -747,7 +967,7 @@ def restart(
|
|
|
747
967
|
exit_error(f"Unexpected error. Please file an issue to {settings.ISSUE_TRACKER}.")
|
|
748
968
|
|
|
749
969
|
# watch
|
|
750
|
-
watch(str(api_url), response.json()['id'], no_wait)
|
|
970
|
+
watch(str(api_url), response.json()['id'], no_wait, format=WatchFormat.text)
|
|
751
971
|
|
|
752
972
|
|
|
753
973
|
def run(
|
|
@@ -835,6 +1055,8 @@ def run(
|
|
|
835
1055
|
if verbose:
|
|
836
1056
|
console.print(f"🔎 api [blue]{get_url}[/blue]")
|
|
837
1057
|
|
|
1058
|
+
search: Optional[re.Match[str]] = None
|
|
1059
|
+
|
|
838
1060
|
# wait for the sanity test to finish
|
|
839
1061
|
with Progress(
|
|
840
1062
|
SpinnerColumn(),
|
|
@@ -882,6 +1104,12 @@ def run(
|
|
|
882
1104
|
try:
|
|
883
1105
|
search = re.search(r'href="(.*)" name="workdir"', session.get(f"{artifacts_url}/results.xml").text)
|
|
884
1106
|
|
|
1107
|
+
except requests.exceptions.SSLError:
|
|
1108
|
+
console.print(
|
|
1109
|
+
"\r🚫 [yellow]artifacts unreachable via SSL, do you have RH CA certificates installed?[/yellow]"
|
|
1110
|
+
)
|
|
1111
|
+
console.print(f"\r🚢 artifacts [blue]{artifacts_url}[/blue]")
|
|
1112
|
+
|
|
885
1113
|
except requests.exceptions.ConnectionError:
|
|
886
1114
|
console.print("\r🚫 [yellow]artifacts unreachable, are you on VPN?[/yellow]")
|
|
887
1115
|
console.print(f"\r🚢 artifacts [blue]{artifacts_url}[/blue]")
|
|
@@ -890,7 +1118,6 @@ def run(
|
|
|
890
1118
|
if not search:
|
|
891
1119
|
exit_error("Could not find working directory, cannot continue")
|
|
892
1120
|
|
|
893
|
-
assert search
|
|
894
1121
|
workdir = str(search.groups(1)[0])
|
|
895
1122
|
output = f"{workdir}/testing-farm/sanity/execute/data/guest/default-0/testing-farm/script-1/output.txt"
|
|
896
1123
|
|
|
@@ -1109,14 +1336,14 @@ def reserve(
|
|
|
1109
1336
|
if state in ["complete", "error"]:
|
|
1110
1337
|
exit_error("Reservation failed, check API request or contact Testing Farm")
|
|
1111
1338
|
|
|
1112
|
-
if not print_only_request_id and task_id:
|
|
1113
|
-
progress.update(task_id, description="Reservation job is [yellow]current_state[/yellow]")
|
|
1339
|
+
if not print_only_request_id and task_id is not None:
|
|
1340
|
+
progress.update(task_id, description=f"Reservation job is [yellow]{current_state}[/yellow]")
|
|
1114
1341
|
|
|
1115
1342
|
time.sleep(1)
|
|
1116
1343
|
|
|
1117
1344
|
while current_state != "ready":
|
|
1118
1345
|
if not print_only_request_id and task_id:
|
|
1119
|
-
progress.update(task_id, description="Reservation job is [yellow]current_state[/yellow]")
|
|
1346
|
+
progress.update(task_id, description=f"Reservation job is [yellow]{current_state}[/yellow]")
|
|
1120
1347
|
|
|
1121
1348
|
# get the command output
|
|
1122
1349
|
artifacts_url = response.json()['run']['artifacts']
|
|
@@ -1127,12 +1354,24 @@ def reserve(
|
|
|
1127
1354
|
if not pipeline_log:
|
|
1128
1355
|
exit_error(f"Pipeline log was empty. Please file an issue to {settings.ISSUE_TRACKER}.")
|
|
1129
1356
|
|
|
1357
|
+
except requests.exceptions.SSLError:
|
|
1358
|
+
exit_error(
|
|
1359
|
+
textwrap.dedent(
|
|
1360
|
+
f"""
|
|
1361
|
+
Failed to access Testing Farm artifacts because of SSL validation error.
|
|
1362
|
+
If you use Red Hat Ranch please make sure you have Red Hat CA certificates installed.
|
|
1363
|
+
Otherwise file an issue to {settings.ISSUE_TRACKER}.
|
|
1364
|
+
"""
|
|
1365
|
+
)
|
|
1366
|
+
)
|
|
1367
|
+
return
|
|
1368
|
+
|
|
1130
1369
|
except requests.exceptions.ConnectionError:
|
|
1131
1370
|
exit_error(
|
|
1132
1371
|
textwrap.dedent(
|
|
1133
1372
|
f"""
|
|
1134
1373
|
Failed to access Testing Farm artifacts.
|
|
1135
|
-
If you use Red Hat Ranch please make sure you are
|
|
1374
|
+
If you use Red Hat Ranch please make sure you are connected to the VPN.
|
|
1136
1375
|
Otherwise file an issue to {settings.ISSUE_TRACKER}.
|
|
1137
1376
|
"""
|
|
1138
1377
|
)
|
|
@@ -19,4 +19,6 @@ settings = LazySettings(
|
|
|
19
19
|
DEFAULT_API_RETRIES=7,
|
|
20
20
|
# should lead to delays of 0.5, 1, 2, 4, 8, 16, 32 seconds
|
|
21
21
|
DEFAULT_RETRY_BACKOFF_FACTOR=1,
|
|
22
|
+
# system CA certificates path, default for RHEL variants
|
|
23
|
+
REQUESTS_CA_BUNDLE="/etc/ssl/certs/ca-bundle.crt",
|
|
22
24
|
)
|
|
@@ -21,3 +21,8 @@ app.command()(commands.watch)
|
|
|
21
21
|
# This command is available only for the container based deployment
|
|
22
22
|
if os.path.exists(settings.CONTAINER_SIGN):
|
|
23
23
|
app.command()(commands.update)
|
|
24
|
+
|
|
25
|
+
# Expose REQUESTS_CA_BUNDLE in the environment for RHEL-like systems
|
|
26
|
+
# This is needed for custom CA certificates to nicely work.
|
|
27
|
+
if "REQUESTS_CA_BUNDLE" not in os.environ and os.path.exists(settings.REQUESTS_CA_BUNDLE):
|
|
28
|
+
os.environ["REQUESTS_CA_BUNDLE"] = settings.REQUESTS_CA_BUNDLE
|
|
@@ -4,21 +4,24 @@
|
|
|
4
4
|
import glob
|
|
5
5
|
import os
|
|
6
6
|
import subprocess
|
|
7
|
+
import sys
|
|
7
8
|
import uuid
|
|
8
|
-
from typing import Any, Dict, List, Optional, Union
|
|
9
|
+
from typing import Any, Dict, List, NoReturn, Optional, Union
|
|
9
10
|
|
|
10
11
|
import requests
|
|
11
12
|
import requests.adapters
|
|
12
13
|
import typer
|
|
13
14
|
from rich.console import Console
|
|
15
|
+
from ruamel.yaml import YAML
|
|
14
16
|
from urllib3 import Retry
|
|
15
17
|
|
|
16
18
|
from tft.cli.config import settings
|
|
17
19
|
|
|
18
20
|
console = Console(soft_wrap=True)
|
|
21
|
+
console_stderr = Console(soft_wrap=True, file=sys.stderr)
|
|
19
22
|
|
|
20
23
|
|
|
21
|
-
def exit_error(error: str):
|
|
24
|
+
def exit_error(error: str) -> NoReturn:
|
|
22
25
|
"""Exit with given error message"""
|
|
23
26
|
console.print(f"⛔ {error}", style="red")
|
|
24
27
|
raise typer.Exit(code=255)
|
|
@@ -91,15 +94,46 @@ def hw_constraints(hardware: List[str]) -> Dict[Any, Any]:
|
|
|
91
94
|
return {key: value if key not in ("disk", "network") else [value] for key, value in constraints.items()}
|
|
92
95
|
|
|
93
96
|
|
|
97
|
+
def options_from_file(filepath) -> Dict[str, str]:
|
|
98
|
+
"""Read environment variables from a yaml file."""
|
|
99
|
+
|
|
100
|
+
with open(filepath, 'r') as file:
|
|
101
|
+
try:
|
|
102
|
+
yaml = YAML(typ="safe").load(file.read())
|
|
103
|
+
except Exception:
|
|
104
|
+
exit_error(f"Failed to load variables from yaml file {filepath}.")
|
|
105
|
+
|
|
106
|
+
if not yaml: # pyre-ignore[61] # pyre ignores NoReturn in exit_error
|
|
107
|
+
return {}
|
|
108
|
+
|
|
109
|
+
if not isinstance(yaml, dict): # pyre-ignore[61] # pyre ignores NoReturn in exit_error
|
|
110
|
+
exit_error(f"Environment file {filepath} is not a dict.")
|
|
111
|
+
|
|
112
|
+
if any([isinstance(value, (list, dict)) for value in yaml.values()]):
|
|
113
|
+
exit_error(f"Values of environment file {filepath} are not primitive types.")
|
|
114
|
+
|
|
115
|
+
return yaml # pyre-ignore[61] # pyre ignores NoReturn in exit_error
|
|
116
|
+
|
|
117
|
+
|
|
94
118
|
def options_to_dict(name: str, options: List[str]) -> Dict[str, str]:
|
|
95
|
-
"""Create a dictionary from list of `key=value` options"""
|
|
96
|
-
try:
|
|
97
|
-
return {option.split("=", 1)[0]: option.split("=", 1)[1] for option in options}
|
|
119
|
+
"""Create a dictionary from list of `key=value|@file` options"""
|
|
98
120
|
|
|
99
|
-
|
|
100
|
-
|
|
121
|
+
options_dict = {}
|
|
122
|
+
for option in options:
|
|
123
|
+
# Option is `@file`
|
|
124
|
+
if option.startswith('@'):
|
|
125
|
+
if not os.path.isfile(option[1:]):
|
|
126
|
+
exit_error(f"Invalid environment file in option `{option}` specified.")
|
|
127
|
+
options_dict.update(options_from_file(option[1:]))
|
|
128
|
+
|
|
129
|
+
# Option is `key=value`
|
|
130
|
+
else:
|
|
131
|
+
try:
|
|
132
|
+
options_dict.update({option.split("=", 1)[0]: option.split("=", 1)[1]})
|
|
133
|
+
except IndexError:
|
|
134
|
+
exit_error(f"Option `{option}` is invalid, must be defined as `key=value|@file`.")
|
|
101
135
|
|
|
102
|
-
return
|
|
136
|
+
return options_dict
|
|
103
137
|
|
|
104
138
|
|
|
105
139
|
def uuid_valid(value: str, version: int = 4) -> bool:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|