tft-cli 0.0.19__tar.gz → 0.0.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tft_cli-0.0.19 → tft_cli-0.0.21}/PKG-INFO +2 -1
- {tft_cli-0.0.19 → tft_cli-0.0.21}/pyproject.toml +2 -1
- {tft_cli-0.0.19 → tft_cli-0.0.21}/src/tft/cli/commands.py +179 -26
- {tft_cli-0.0.19 → tft_cli-0.0.21}/src/tft/cli/utils.py +8 -0
- {tft_cli-0.0.19 → tft_cli-0.0.21}/LICENSE +0 -0
- {tft_cli-0.0.19 → tft_cli-0.0.21}/LICENSE_SPDX +0 -0
- {tft_cli-0.0.19 → tft_cli-0.0.21}/src/tft/cli/__init__.py +0 -0
- {tft_cli-0.0.19 → tft_cli-0.0.21}/src/tft/cli/config.py +0 -0
- {tft_cli-0.0.19 → tft_cli-0.0.21}/src/tft/cli/tool.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: tft-cli
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.21
|
|
4
4
|
Summary: Testing Farm CLI tool
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Miroslav Vadkerti
|
|
@@ -15,6 +15,7 @@ Requires-Dist: click (>=8.0.4,<8.1.0)
|
|
|
15
15
|
Requires-Dist: colorama (>=0.4.4,<0.5.0)
|
|
16
16
|
Requires-Dist: dynaconf (>=3.1.7,<4.0.0)
|
|
17
17
|
Requires-Dist: requests (>=2.27.1,<3.0.0)
|
|
18
|
+
Requires-Dist: rich (>=12,<13)
|
|
18
19
|
Requires-Dist: ruamel-yaml (>=0.18.6,<0.19.0)
|
|
19
20
|
Requires-Dist: setuptools
|
|
20
21
|
Requires-Dist: typer[all] (>=0.7.0,<0.8.0)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "tft-cli"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.21"
|
|
4
4
|
description = "Testing Farm CLI tool"
|
|
5
5
|
authors = ["Miroslav Vadkerti <mvadkert@redhat.com>"]
|
|
6
6
|
license = "Apache-2.0"
|
|
@@ -21,6 +21,7 @@ click = "~8.0.4"
|
|
|
21
21
|
dynaconf = "^3.1.7"
|
|
22
22
|
colorama = "^0.4.4"
|
|
23
23
|
requests = "^2.27.1"
|
|
24
|
+
rich = "^12"
|
|
24
25
|
ruamel-yaml = "^0.18.6"
|
|
25
26
|
setuptools = "*"
|
|
26
27
|
|
|
@@ -2,12 +2,13 @@
|
|
|
2
2
|
# SPDX-License-Identifier: Apache-2.0
|
|
3
3
|
|
|
4
4
|
import base64
|
|
5
|
+
import ipaddress
|
|
5
6
|
import json
|
|
6
7
|
import os
|
|
7
8
|
import re
|
|
8
9
|
import shutil
|
|
10
|
+
import stat
|
|
9
11
|
import subprocess
|
|
10
|
-
import tempfile
|
|
11
12
|
import textwrap
|
|
12
13
|
import time
|
|
13
14
|
import urllib.parse
|
|
@@ -60,6 +61,9 @@ RESERVE_REF = os.getenv("TESTING_FARM_RESERVE_REF", "main")
|
|
|
60
61
|
|
|
61
62
|
DEFAULT_PIPELINE_TIMEOUT = 60 * 12
|
|
62
63
|
|
|
64
|
+
# Won't be validating CIDR and 65535 max port range with regex here, not worth it
|
|
65
|
+
SECURITY_GROUP_RULE_FORMAT = re.compile(r"(tcp|ip|icmp|udp|-1|[0-255]):(.*):(\d{1,5}-\d{1,5}|\d{1,5}|-1)")
|
|
66
|
+
|
|
63
67
|
|
|
64
68
|
class WatchFormat(str, Enum):
|
|
65
69
|
text = 'text'
|
|
@@ -134,6 +138,24 @@ OPTION_PIPELINE_TYPE: Optional[PipelineType] = typer.Option(None, help="Force a
|
|
|
134
138
|
OPTION_POST_INSTALL_SCRIPT: Optional[str] = typer.Option(
|
|
135
139
|
None, help="Post-install script to run right after the guest boots for the first time."
|
|
136
140
|
)
|
|
141
|
+
OPTION_SECURITY_GROUP_RULE_INGRESS: Optional[List[str]] = typer.Option(
|
|
142
|
+
None,
|
|
143
|
+
help=(
|
|
144
|
+
"Additional ingress security group rules to be passed to guest in "
|
|
145
|
+
"PROTOCOL:CIDR:PORT format. Multiple rules can be specified as comma separated, "
|
|
146
|
+
"eg. `tcp:109.81.42.42/32:22,142.0.42.0/24:22`. "
|
|
147
|
+
"Supported by AWS only atm."
|
|
148
|
+
),
|
|
149
|
+
)
|
|
150
|
+
OPTION_SECURITY_GROUP_RULE_EGRESS: Optional[List[str]] = typer.Option(
|
|
151
|
+
None,
|
|
152
|
+
help=(
|
|
153
|
+
"Additional egress security group rules to be passed to guest in "
|
|
154
|
+
"PROTOCOL:CIDR:PORT format. Multiple rules can be specified as comma separated, "
|
|
155
|
+
"eg. `tcp:109.81.42.42/32:22,142.0.42.0/24:22`. "
|
|
156
|
+
"Supported by AWS only atm."
|
|
157
|
+
),
|
|
158
|
+
)
|
|
137
159
|
OPTION_KICKSTART: Optional[List[str]] = typer.Option(
|
|
138
160
|
None,
|
|
139
161
|
metavar="key=value|@file",
|
|
@@ -212,6 +234,58 @@ OPTION_PARALLEL_LIMIT: Optional[int] = typer.Option(
|
|
|
212
234
|
"Red Hat Ranch."
|
|
213
235
|
),
|
|
214
236
|
)
|
|
237
|
+
OPTION_TAGS = typer.Option(
|
|
238
|
+
None,
|
|
239
|
+
"-t",
|
|
240
|
+
"--tag",
|
|
241
|
+
metavar="key=value|@file",
|
|
242
|
+
help="Tag cloud resources with given value. The @ prefix marks a yaml file to load.",
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# NOTE(ivasilev) Largely borrowed from artemis-cli
|
|
247
|
+
def _parse_security_group_rules(ingress_rules: List[str], egress_rules: List[str]) -> Dict[str, Any]:
|
|
248
|
+
"""
|
|
249
|
+
Returns a dictionary with ingress/egress rules in TFT request friendly format
|
|
250
|
+
"""
|
|
251
|
+
security_group_rules = {}
|
|
252
|
+
|
|
253
|
+
def _add_secgroup_rules(sg_type: str, sg_data: List[str]) -> None:
|
|
254
|
+
security_group_rules[sg_type] = []
|
|
255
|
+
|
|
256
|
+
for sg_rule in normalize_multistring_option(sg_data):
|
|
257
|
+
matches = re.match(SECURITY_GROUP_RULE_FORMAT, sg_rule)
|
|
258
|
+
if not matches:
|
|
259
|
+
exit_error(f"Bad format of security group rule '{sg_rule}', should be PROTOCOL:CIDR:PORT") # noqa: E231
|
|
260
|
+
|
|
261
|
+
protocol, cidr, port = matches[1], matches[2], matches[3]
|
|
262
|
+
|
|
263
|
+
# Let's validate cidr
|
|
264
|
+
try:
|
|
265
|
+
# This way a single ip address will be converted to a valid ip/32 cidr.
|
|
266
|
+
cidr = str(ipaddress.ip_network(cidr))
|
|
267
|
+
except ValueError as err:
|
|
268
|
+
exit_error(f'CIDR {cidr} has incorrect format: {err}')
|
|
269
|
+
|
|
270
|
+
# Artemis expectes port_min/port_max, -1 has to be convered to a proper range 0-65535
|
|
271
|
+
port_min = 0 if port == '-1' else int(port.split('-')[0])
|
|
272
|
+
port_max = 65535 if port == '-1' else int(port.split('-')[-1])
|
|
273
|
+
|
|
274
|
+
# Add rule for Artemis API
|
|
275
|
+
security_group_rules[sg_type].append(
|
|
276
|
+
{
|
|
277
|
+
'type': sg_type.split('_')[-1],
|
|
278
|
+
'protocol': protocol,
|
|
279
|
+
'cidr': cidr,
|
|
280
|
+
'port_min': port_min,
|
|
281
|
+
'port_max': port_max,
|
|
282
|
+
}
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
_add_secgroup_rules('security_group_rules_ingress', ingress_rules)
|
|
286
|
+
_add_secgroup_rules('security_group_rules_egress', egress_rules)
|
|
287
|
+
|
|
288
|
+
return security_group_rules
|
|
215
289
|
|
|
216
290
|
|
|
217
291
|
def _parse_xunit(xunit: str):
|
|
@@ -441,7 +515,12 @@ def watch(
|
|
|
441
515
|
raise typer.Exit(code=1)
|
|
442
516
|
|
|
443
517
|
elif state == "error":
|
|
444
|
-
|
|
518
|
+
msg = (
|
|
519
|
+
request['result'].get('summary')
|
|
520
|
+
if request['result']
|
|
521
|
+
else '\n'.join(note['message'] for note in request['notes'])
|
|
522
|
+
)
|
|
523
|
+
_console_print(f"📛 pipeline error\n{msg}", style="red")
|
|
445
524
|
_print_summary_table(request_summary, format)
|
|
446
525
|
raise typer.Exit(code=2)
|
|
447
526
|
|
|
@@ -521,13 +600,7 @@ def request(
|
|
|
521
600
|
repository: List[str] = OPTION_REPOSITORY,
|
|
522
601
|
repository_file: List[str] = OPTION_REPOSITORY_FILE,
|
|
523
602
|
sanity: bool = typer.Option(False, help="Run Testing Farm sanity test.", rich_help_panel=RESERVE_PANEL_GENERAL),
|
|
524
|
-
tags: Optional[List[str]] =
|
|
525
|
-
None,
|
|
526
|
-
"-t",
|
|
527
|
-
"--tag",
|
|
528
|
-
metavar="key=value|@file",
|
|
529
|
-
help="Tag cloud resources with given value. The @ prefix marks a yaml file to load.",
|
|
530
|
-
),
|
|
603
|
+
tags: Optional[List[str]] = OPTION_TAGS,
|
|
531
604
|
watchdog_dispatch_delay: Optional[int] = typer.Option(
|
|
532
605
|
None,
|
|
533
606
|
help="How long (seconds) before the guest \"is-alive\" watchdog is dispatched. Note that this is implemented only in Artemis service.", # noqa
|
|
@@ -539,6 +612,8 @@ def request(
|
|
|
539
612
|
dry_run: bool = OPTION_DRY_RUN,
|
|
540
613
|
pipeline_type: Optional[PipelineType] = OPTION_PIPELINE_TYPE,
|
|
541
614
|
post_install_script: Optional[str] = OPTION_POST_INSTALL_SCRIPT,
|
|
615
|
+
security_group_rule_ingress: Optional[List[str]] = OPTION_SECURITY_GROUP_RULE_INGRESS,
|
|
616
|
+
security_group_rule_egress: Optional[List[str]] = OPTION_SECURITY_GROUP_RULE_EGRESS,
|
|
542
617
|
user_webpage: Optional[str] = typer.Option(
|
|
543
618
|
None, help="URL to the user's webpage. The link will be shown in the results viewer."
|
|
544
619
|
),
|
|
@@ -707,7 +782,17 @@ def request(
|
|
|
707
782
|
|
|
708
783
|
environments.append(environment)
|
|
709
784
|
|
|
710
|
-
if
|
|
785
|
+
if any(
|
|
786
|
+
provisioning_detail
|
|
787
|
+
for provisioning_detail in [
|
|
788
|
+
tags,
|
|
789
|
+
watchdog_dispatch_delay,
|
|
790
|
+
watchdog_period_delay,
|
|
791
|
+
post_install_script,
|
|
792
|
+
security_group_rule_ingress,
|
|
793
|
+
security_group_rule_egress,
|
|
794
|
+
]
|
|
795
|
+
):
|
|
711
796
|
if "settings" not in environments[0]:
|
|
712
797
|
environments[0]["settings"] = {}
|
|
713
798
|
|
|
@@ -726,6 +811,10 @@ def request(
|
|
|
726
811
|
if post_install_script:
|
|
727
812
|
environments[0]["settings"]["provisioning"]["post_install_script"] = post_install_script
|
|
728
813
|
|
|
814
|
+
if security_group_rule_ingress or security_group_rule_egress:
|
|
815
|
+
rules = _parse_security_group_rules(security_group_rule_ingress or [], security_group_rule_egress or [])
|
|
816
|
+
environments[0]["settings"]["provisioning"].update(rules)
|
|
817
|
+
|
|
729
818
|
# create final request
|
|
730
819
|
request = TestingFarmRequestV1
|
|
731
820
|
request["api_key"] = api_token
|
|
@@ -747,6 +836,7 @@ def request(
|
|
|
747
836
|
|
|
748
837
|
# worker image
|
|
749
838
|
if worker_image:
|
|
839
|
+
console.print(f"👷 Forcing worker image [blue]{worker_image}[/blue]")
|
|
750
840
|
request["settings"]["worker"] = {"image": worker_image}
|
|
751
841
|
|
|
752
842
|
if not user_webpage and (user_webpage_name or user_webpage_icon):
|
|
@@ -810,6 +900,7 @@ def restart(
|
|
|
810
900
|
git_ref: Optional[str] = typer.Option(None, help="Force GIT ref or branch to test."),
|
|
811
901
|
git_merge_sha: Optional[str] = typer.Option(None, help="Force GIT ref or branch into which --ref will be merged."),
|
|
812
902
|
hardware: List[str] = OPTION_HARDWARE,
|
|
903
|
+
tags: Optional[List[str]] = OPTION_TAGS,
|
|
813
904
|
tmt_plan_name: Optional[str] = OPTION_TMT_PLAN_NAME,
|
|
814
905
|
tmt_plan_filter: Optional[str] = OPTION_TMT_PLAN_FILTER,
|
|
815
906
|
tmt_test_name: Optional[str] = OPTION_TMT_TEST_NAME,
|
|
@@ -853,6 +944,17 @@ def restart(
|
|
|
853
944
|
if response.status_code == 401:
|
|
854
945
|
exit_error(f"API token is invalid. See {settings.ONBOARDING_DOCS} for more information.")
|
|
855
946
|
|
|
947
|
+
# The API token is valid, but it doesn't own the request
|
|
948
|
+
if response.status_code == 403:
|
|
949
|
+
console.print(
|
|
950
|
+
"⚠️ [yellow] You are not the owner of this request. Any secrets associated with the request will not be included on the restart.[/yellow]" # noqa: E501
|
|
951
|
+
)
|
|
952
|
+
# Construct URL to the internal API
|
|
953
|
+
get_url = urllib.parse.urljoin(str(api_url), f"v0.1/requests/{_request_id}")
|
|
954
|
+
|
|
955
|
+
# Get the request details
|
|
956
|
+
response = session.get(get_url)
|
|
957
|
+
|
|
856
958
|
if response.status_code != 200:
|
|
857
959
|
exit_error(f"Unexpected error. Please file an issue to {settings.ISSUE_TRACKER}.")
|
|
858
960
|
|
|
@@ -933,7 +1035,7 @@ def restart(
|
|
|
933
1035
|
|
|
934
1036
|
# worker image
|
|
935
1037
|
if worker_image:
|
|
936
|
-
console.print(f"👷
|
|
1038
|
+
console.print(f"👷 Forcing worker image [blue]{worker_image}[/blue]")
|
|
937
1039
|
request["settings"] = request["settings"] if request.get("settings") else {}
|
|
938
1040
|
request["settings"]["worker"] = {"image": worker_image}
|
|
939
1041
|
# it is required to have also pipeline key set, otherwise API will fail
|
|
@@ -954,6 +1056,16 @@ def restart(
|
|
|
954
1056
|
if parallel_limit:
|
|
955
1057
|
request["settings"]["pipeline"]["parallel-limit"] = parallel_limit
|
|
956
1058
|
|
|
1059
|
+
if tags:
|
|
1060
|
+
for environment in request["environments"]:
|
|
1061
|
+
if "settings" not in environment or not environment["settings"]:
|
|
1062
|
+
environment["settings"] = {}
|
|
1063
|
+
|
|
1064
|
+
if 'provisioning' not in environment["settings"]:
|
|
1065
|
+
environment["settings"]["provisioning"] = {}
|
|
1066
|
+
|
|
1067
|
+
environment["settings"]["provisioning"]["tags"] = options_to_dict("tags", tags)
|
|
1068
|
+
|
|
957
1069
|
# dry run
|
|
958
1070
|
if dry_run:
|
|
959
1071
|
console.print("🔍 Dry run, showing POST json only", style="bright_yellow")
|
|
@@ -1163,6 +1275,7 @@ def reserve(
|
|
|
1163
1275
|
rich_help_panel=RESERVE_PANEL_ENVIRONMENT,
|
|
1164
1276
|
),
|
|
1165
1277
|
hardware: List[str] = OPTION_HARDWARE,
|
|
1278
|
+
tags: Optional[List[str]] = OPTION_TAGS,
|
|
1166
1279
|
kickstart: Optional[List[str]] = OPTION_KICKSTART,
|
|
1167
1280
|
pool: Optional[str] = OPTION_POOL,
|
|
1168
1281
|
fedora_koji_build: List[str] = OPTION_FEDORA_KOJI_BUILD,
|
|
@@ -1180,6 +1293,9 @@ def reserve(
|
|
|
1180
1293
|
autoconnect: bool = typer.Option(
|
|
1181
1294
|
True, help="Automatically connect to the guest via SSH.", rich_help_panel=RESERVE_PANEL_GENERAL
|
|
1182
1295
|
),
|
|
1296
|
+
worker_image: Optional[str] = OPTION_WORKER_IMAGE,
|
|
1297
|
+
security_group_rule_ingress: Optional[List[str]] = OPTION_SECURITY_GROUP_RULE_INGRESS,
|
|
1298
|
+
security_group_rule_egress: Optional[List[str]] = OPTION_SECURITY_GROUP_RULE_EGRESS,
|
|
1183
1299
|
):
|
|
1184
1300
|
"""
|
|
1185
1301
|
Reserve a system in Testing Farm.
|
|
@@ -1189,6 +1305,28 @@ def reserve(
|
|
|
1189
1305
|
if not print_only_request_id:
|
|
1190
1306
|
console.print(message)
|
|
1191
1307
|
|
|
1308
|
+
# Sanity checks for ssh-agent
|
|
1309
|
+
|
|
1310
|
+
# Check of SSH_AUTH_SOCK is defined
|
|
1311
|
+
ssh_auth_sock = os.getenv("SSH_AUTH_SOCK")
|
|
1312
|
+
if not ssh_auth_sock:
|
|
1313
|
+
exit_error("SSH_AUTH_SOCK is not defined, make sure the ssh-agent is running by executing 'eval `ssh-agent`'.")
|
|
1314
|
+
|
|
1315
|
+
# Check if SSH_AUTH_SOCK exists
|
|
1316
|
+
if not os.path.exists(ssh_auth_sock):
|
|
1317
|
+
exit_error(
|
|
1318
|
+
"SSH_AUTH_SOCK socket does not exist, make sure the ssh-agent is running by executing 'eval `ssh-agent`'."
|
|
1319
|
+
)
|
|
1320
|
+
|
|
1321
|
+
# Check if value of SSH_AUTH_SOCK is socket
|
|
1322
|
+
if not stat.S_ISSOCK(os.stat(ssh_auth_sock).st_mode):
|
|
1323
|
+
exit_error("SSH_AUTH_SOCK is not a socket, make sure the ssh-agent is running by executing 'eval `ssh-agent`'.")
|
|
1324
|
+
|
|
1325
|
+
# Check if ssh-add -L is not empty
|
|
1326
|
+
ssh_add_output = subprocess.run(["ssh-add", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
1327
|
+
if ssh_add_output.returncode != 0:
|
|
1328
|
+
exit_error("No SSH identities found in the SSH agent. Please run `ssh-add`.")
|
|
1329
|
+
|
|
1192
1330
|
# check for token
|
|
1193
1331
|
if not settings.API_TOKEN:
|
|
1194
1332
|
exit_error("No API token found, export `TESTING_FARM_API_TOKEN` environment variable.")
|
|
@@ -1208,12 +1346,22 @@ def reserve(
|
|
|
1208
1346
|
environment["pool"] = pool
|
|
1209
1347
|
environment["artifacts"] = []
|
|
1210
1348
|
|
|
1211
|
-
if
|
|
1349
|
+
if "settings" not in environment:
|
|
1350
|
+
environment["settings"] = {}
|
|
1351
|
+
|
|
1352
|
+
if post_install_script or security_group_rule_ingress or security_group_rule_egress or tags:
|
|
1212
1353
|
if "settings" not in environment:
|
|
1213
1354
|
environment["settings"] = {}
|
|
1214
1355
|
|
|
1215
|
-
|
|
1216
|
-
|
|
1356
|
+
if "provisioning" not in environment["settings"]:
|
|
1357
|
+
environment["settings"]["provisioning"] = {}
|
|
1358
|
+
|
|
1359
|
+
if "tags" not in environment["settings"]["provisioning"]:
|
|
1360
|
+
environment["settings"]["provisioning"]["tags"] = {}
|
|
1361
|
+
|
|
1362
|
+
# reserve command is for interacting with the guest, and so non-spot instances
|
|
1363
|
+
# would be nicer for the user than them getting shocked when they loose their work.
|
|
1364
|
+
environment["settings"]["provisioning"]["tags"]["ArtemisUseSpot"] = "false"
|
|
1217
1365
|
|
|
1218
1366
|
if compose:
|
|
1219
1367
|
environment["os"] = {"compose": compose}
|
|
@@ -1221,6 +1369,9 @@ def reserve(
|
|
|
1221
1369
|
if hardware:
|
|
1222
1370
|
environment["hardware"] = hw_constraints(hardware)
|
|
1223
1371
|
|
|
1372
|
+
if tags:
|
|
1373
|
+
environment["settings"]["provisioning"]["tags"] = options_to_dict("tags", tags)
|
|
1374
|
+
|
|
1224
1375
|
if kickstart:
|
|
1225
1376
|
environment["kickstart"] = options_to_dict("environment kickstart", kickstart)
|
|
1226
1377
|
|
|
@@ -1242,6 +1393,10 @@ def reserve(
|
|
|
1242
1393
|
if post_install_script:
|
|
1243
1394
|
environment["settings"]["provisioning"]["post_install_script"] = post_install_script
|
|
1244
1395
|
|
|
1396
|
+
if security_group_rule_ingress or security_group_rule_egress:
|
|
1397
|
+
rules = _parse_security_group_rules(security_group_rule_ingress or [], security_group_rule_egress or [])
|
|
1398
|
+
environment["settings"]["provisioning"].update(rules)
|
|
1399
|
+
|
|
1245
1400
|
console.print(f"🕗 Reserved for [blue]{str(reservation_duration)}[/blue] minutes")
|
|
1246
1401
|
environment["variables"] = {"TF_RESERVATION_DURATION": str(reservation_duration)}
|
|
1247
1402
|
|
|
@@ -1261,6 +1416,12 @@ def reserve(
|
|
|
1261
1416
|
request["api_key"] = settings.API_TOKEN
|
|
1262
1417
|
request["test"]["fmf"] = test
|
|
1263
1418
|
|
|
1419
|
+
# worker image
|
|
1420
|
+
if worker_image:
|
|
1421
|
+
console.print(f"👷 Forcing worker image [blue]{worker_image}[/blue]")
|
|
1422
|
+
request["settings"] = request["settings"] if request.get("settings") else {}
|
|
1423
|
+
request["settings"]["worker"] = {"image": worker_image}
|
|
1424
|
+
|
|
1264
1425
|
request["environments"] = [environment]
|
|
1265
1426
|
|
|
1266
1427
|
# in case the reservation duration is more than the pipeline timeout, adjust also the pipeline timeout
|
|
@@ -1427,24 +1588,16 @@ def reserve(
|
|
|
1427
1588
|
|
|
1428
1589
|
ssh_proxy_option = f" -J {content['ssh_proxy']}" if content.get('ssh_proxy') else ""
|
|
1429
1590
|
|
|
1430
|
-
ssh_private_key_option = ""
|
|
1431
1591
|
if ssh_private_key:
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
tmp.flush()
|
|
1435
|
-
tmp.close()
|
|
1436
|
-
|
|
1437
|
-
os.chmod(tmp.name, 0o600)
|
|
1438
|
-
|
|
1439
|
-
ssh_private_key_option = f" -i {tmp.name}"
|
|
1592
|
+
console.print("🔑 [blue]Adding SSH proxy key[/blue]")
|
|
1593
|
+
subprocess.run(["ssh-add", "-"], input=ssh_private_key.encode())
|
|
1440
1594
|
|
|
1441
|
-
console.print(f"🌎 ssh{ssh_proxy_option}
|
|
1595
|
+
console.print(f"🌎 ssh{ssh_proxy_option} root@{guest}")
|
|
1442
1596
|
|
|
1443
1597
|
if autoconnect:
|
|
1444
1598
|
os.system(
|
|
1445
|
-
f"ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null{ssh_proxy_option}
|
|
1599
|
+
f"ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null{ssh_proxy_option} root@{guest}" # noqa: E501
|
|
1446
1600
|
)
|
|
1447
|
-
os.unlink(tmp.name)
|
|
1448
1601
|
|
|
1449
1602
|
|
|
1450
1603
|
def update():
|
|
@@ -2,7 +2,9 @@
|
|
|
2
2
|
# SPDX-License-Identifier: Apache-2.0
|
|
3
3
|
|
|
4
4
|
import glob
|
|
5
|
+
import itertools
|
|
5
6
|
import os
|
|
7
|
+
import shlex
|
|
6
8
|
import subprocess
|
|
7
9
|
import sys
|
|
8
10
|
import uuid
|
|
@@ -119,6 +121,12 @@ def options_to_dict(name: str, options: List[str]) -> Dict[str, str]:
|
|
|
119
121
|
"""Create a dictionary from list of `key=value|@file` options"""
|
|
120
122
|
|
|
121
123
|
options_dict = {}
|
|
124
|
+
|
|
125
|
+
# Turn option list such as
|
|
126
|
+
# `['aaa=bbb "foo foo=bar bar"', 'foo=bar']` into
|
|
127
|
+
# `['aaa=bbb', 'foo foo=bar bar', 'foo=bar']`
|
|
128
|
+
options = list(itertools.chain.from_iterable(shlex.split(option) for option in options))
|
|
129
|
+
|
|
122
130
|
for option in options:
|
|
123
131
|
# Option is `@file`
|
|
124
132
|
if option.startswith('@'):
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|