qontract-reconcile 0.10.2.dev57__py3-none-any.whl → 0.10.2.dev59__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {qontract_reconcile-0.10.2.dev57.dist-info → qontract_reconcile-0.10.2.dev59.dist-info}/METADATA +1 -1
- {qontract_reconcile-0.10.2.dev57.dist-info → qontract_reconcile-0.10.2.dev59.dist-info}/RECORD +29 -30
- reconcile/aws_cloudwatch_log_retention/integration.py +10 -17
- reconcile/dashdotdb_dora.py +5 -4
- reconcile/gitlab_housekeeping.py +10 -6
- reconcile/terraform_tgw_attachments.py +5 -5
- reconcile/terraform_vpc_peerings.py +1 -1
- reconcile/utils/aggregated_list.py +30 -20
- reconcile/utils/aws_api.py +595 -168
- reconcile/utils/aws_helper.py +7 -7
- reconcile/utils/binary.py +14 -7
- reconcile/utils/config.py +9 -6
- reconcile/utils/defer.py +4 -2
- reconcile/utils/elasticsearch_exceptions.py +7 -4
- reconcile/utils/environ.py +5 -3
- reconcile/utils/exceptions.py +5 -2
- reconcile/utils/git.py +6 -4
- reconcile/utils/gitlab_api.py +103 -82
- reconcile/utils/mr/base.py +6 -3
- reconcile/utils/mr/update_access_report_base.py +2 -2
- reconcile/utils/output.py +6 -3
- reconcile/utils/terrascript_aws_client.py +25 -0
- reconcile/utils/vcs.py +5 -3
- reconcile/vpc_peerings_validator.py +21 -15
- tools/app_interface_reporter.py +70 -44
- tools/cli_commands/gpg_encrypt.py +2 -2
- tools/qontract_cli.py +255 -307
- reconcile/utils/data_structures.py +0 -13
- {qontract_reconcile-0.10.2.dev57.dist-info → qontract_reconcile-0.10.2.dev59.dist-info}/WHEEL +0 -0
- {qontract_reconcile-0.10.2.dev57.dist-info → qontract_reconcile-0.10.2.dev59.dist-info}/entry_points.txt +0 -0
tools/qontract_cli.py
CHANGED
@@ -10,7 +10,6 @@ import sys
|
|
10
10
|
import tempfile
|
11
11
|
import textwrap
|
12
12
|
from collections import defaultdict
|
13
|
-
from collections.abc import Callable, Mapping
|
14
13
|
from datetime import (
|
15
14
|
UTC,
|
16
15
|
datetime,
|
@@ -20,7 +19,7 @@ from operator import itemgetter
|
|
20
19
|
from pathlib import Path
|
21
20
|
from statistics import median
|
22
21
|
from textwrap import dedent
|
23
|
-
from typing import Any
|
22
|
+
from typing import TYPE_CHECKING, Any, cast
|
24
23
|
|
25
24
|
import boto3
|
26
25
|
import click
|
@@ -55,6 +54,7 @@ from reconcile.change_owners.bundle import NoOpFileDiffResolver
|
|
55
54
|
from reconcile.change_owners.change_log_tracking import (
|
56
55
|
BUNDLE_DIFFS_OBJ,
|
57
56
|
ChangeLog,
|
57
|
+
ChangeLogItem,
|
58
58
|
)
|
59
59
|
from reconcile.change_owners.change_owners import (
|
60
60
|
fetch_change_type_processors,
|
@@ -81,7 +81,6 @@ from reconcile.gql_definitions.app_sre_tekton_access_revalidation.roles import (
|
|
81
81
|
from reconcile.gql_definitions.common.app_interface_vault_settings import (
|
82
82
|
AppInterfaceSettingsV1,
|
83
83
|
)
|
84
|
-
from reconcile.gql_definitions.common.clusters import ClusterSpecROSAV1
|
85
84
|
from reconcile.gql_definitions.fragments.aus_organization import AUSOCMOrganization
|
86
85
|
from reconcile.gql_definitions.integrations import integrations as integrations_gql
|
87
86
|
from reconcile.gql_definitions.maintenance import maintenances as maintenances_gql
|
@@ -153,7 +152,6 @@ from reconcile.utils.oc_map import (
|
|
153
152
|
init_oc_map_from_clusters,
|
154
153
|
)
|
155
154
|
from reconcile.utils.ocm import OCM_PRODUCT_ROSA, OCMMap
|
156
|
-
from reconcile.utils.ocm.upgrades import get_upgrade_policies
|
157
155
|
from reconcile.utils.ocm_base_client import init_ocm_base_client
|
158
156
|
from reconcile.utils.output import print_output
|
159
157
|
from reconcile.utils.saasherder.models import TargetSpec
|
@@ -186,8 +184,13 @@ from tools.sre_checkpoints import (
|
|
186
184
|
get_latest_sre_checkpoints,
|
187
185
|
)
|
188
186
|
|
187
|
+
if TYPE_CHECKING:
|
188
|
+
from mypy_boto3_s3.type_defs import CopySourceTypeDef
|
189
|
+
else:
|
190
|
+
CopySourceTypeDef = object
|
189
191
|
|
190
|
-
|
192
|
+
|
193
|
+
def output(function):
|
191
194
|
function = click.option(
|
192
195
|
"--output",
|
193
196
|
"-o",
|
@@ -198,14 +201,14 @@ def output(function: Callable) -> Callable:
|
|
198
201
|
return function
|
199
202
|
|
200
203
|
|
201
|
-
def sort(function
|
204
|
+
def sort(function):
|
202
205
|
function = click.option(
|
203
206
|
"--sort", "-s", help="sort output", default=True, type=bool
|
204
207
|
)(function)
|
205
208
|
return function
|
206
209
|
|
207
210
|
|
208
|
-
def to_string(function
|
211
|
+
def to_string(function):
|
209
212
|
function = click.option(
|
210
213
|
"--to-string", help="stringify output", default=False, type=bool
|
211
214
|
)(function)
|
@@ -215,14 +218,14 @@ def to_string(function: Callable) -> Callable:
|
|
215
218
|
@click.group()
|
216
219
|
@config_file
|
217
220
|
@click.pass_context
|
218
|
-
def root(ctx
|
221
|
+
def root(ctx, configfile):
|
219
222
|
ctx.ensure_object(dict)
|
220
223
|
config.init_from_toml(configfile)
|
221
224
|
gql.init_from_config()
|
222
225
|
|
223
226
|
|
224
227
|
@root.result_callback()
|
225
|
-
def exit_cli(ctx
|
228
|
+
def exit_cli(ctx, configfile):
|
226
229
|
GqlApiSingleton.close()
|
227
230
|
|
228
231
|
|
@@ -231,7 +234,7 @@ def exit_cli(ctx: click.Context, configfile: str) -> None:
|
|
231
234
|
@sort
|
232
235
|
@to_string
|
233
236
|
@click.pass_context
|
234
|
-
def get(ctx
|
237
|
+
def get(ctx, output, sort, to_string):
|
235
238
|
ctx.obj["options"] = {
|
236
239
|
"output": output,
|
237
240
|
"sort": sort,
|
@@ -242,7 +245,7 @@ def get(ctx: click.Context, output: str, sort: bool, to_string: bool) -> None:
|
|
242
245
|
@root.group()
|
243
246
|
@output
|
244
247
|
@click.pass_context
|
245
|
-
def describe(ctx
|
248
|
+
def describe(ctx, output):
|
246
249
|
ctx.obj["options"] = {
|
247
250
|
"output": output,
|
248
251
|
}
|
@@ -250,7 +253,7 @@ def describe(ctx: click.Context, output: str) -> None:
|
|
250
253
|
|
251
254
|
@get.command()
|
252
255
|
@click.pass_context
|
253
|
-
def settings(ctx
|
256
|
+
def settings(ctx):
|
254
257
|
settings = queries.get_app_interface_settings()
|
255
258
|
columns = ["vault", "kubeBinary", "mergeRequestGateway"]
|
256
259
|
print_output(ctx.obj["options"], [settings], columns)
|
@@ -259,7 +262,7 @@ def settings(ctx: click.Context) -> None:
|
|
259
262
|
@get.command()
|
260
263
|
@click.argument("name", default="")
|
261
264
|
@click.pass_context
|
262
|
-
def aws_accounts(ctx
|
265
|
+
def aws_accounts(ctx, name):
|
263
266
|
accounts = queries.get_aws_accounts(name=name)
|
264
267
|
if not accounts:
|
265
268
|
print("no aws accounts found")
|
@@ -271,7 +274,7 @@ def aws_accounts(ctx: click.Context, name: str) -> None:
|
|
271
274
|
@get.command()
|
272
275
|
@click.argument("name", default="")
|
273
276
|
@click.pass_context
|
274
|
-
def clusters(ctx
|
277
|
+
def clusters(ctx, name):
|
275
278
|
clusters = queries.get_clusters()
|
276
279
|
if name:
|
277
280
|
clusters = [c for c in clusters if c["name"] == name]
|
@@ -288,7 +291,7 @@ def clusters(ctx: click.Context, name: str) -> None:
|
|
288
291
|
@get.command()
|
289
292
|
@click.argument("name", default="")
|
290
293
|
@click.pass_context
|
291
|
-
def cluster_upgrades(ctx
|
294
|
+
def cluster_upgrades(ctx, name):
|
292
295
|
settings = queries.get_app_interface_settings()
|
293
296
|
|
294
297
|
clusters = queries.get_clusters()
|
@@ -319,11 +322,12 @@ def cluster_upgrades(ctx: click.Context, name: str) -> None:
|
|
319
322
|
if data.get("upgradePolicy") == "automatic":
|
320
323
|
data["schedule"] = c["upgradePolicy"]["schedule"]
|
321
324
|
ocm = ocm_map.get(c["name"])
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
325
|
+
if ocm:
|
326
|
+
upgrade_policy = ocm.get_upgrade_policies(c["name"])
|
327
|
+
if upgrade_policy and len(upgrade_policy) > 0:
|
328
|
+
next_run = upgrade_policy[0].get("next_run")
|
329
|
+
if next_run:
|
330
|
+
data["next_run"] = next_run
|
327
331
|
else:
|
328
332
|
data["upgradePolicy"] = "manual"
|
329
333
|
|
@@ -337,7 +341,7 @@ def cluster_upgrades(ctx: click.Context, name: str) -> None:
|
|
337
341
|
@get.command()
|
338
342
|
@environ(["APP_INTERFACE_STATE_BUCKET", "APP_INTERFACE_STATE_BUCKET_ACCOUNT"])
|
339
343
|
@click.pass_context
|
340
|
-
def version_history(ctx
|
344
|
+
def version_history(ctx):
|
341
345
|
import reconcile.aus.ocm_upgrade_scheduler as ous
|
342
346
|
|
343
347
|
clusters = aus_clusters_query(query_func=gql.get_api().query).clusters or []
|
@@ -373,11 +377,11 @@ def version_history(ctx: click.Context) -> None:
|
|
373
377
|
|
374
378
|
def get_upgrade_policies_data(
|
375
379
|
org_upgrade_specs: list[OrganizationUpgradeSpec],
|
376
|
-
md_output
|
377
|
-
integration
|
378
|
-
workload
|
379
|
-
show_only_soaking_upgrades
|
380
|
-
by_workload
|
380
|
+
md_output,
|
381
|
+
integration,
|
382
|
+
workload=None,
|
383
|
+
show_only_soaking_upgrades=False,
|
384
|
+
by_workload=False,
|
381
385
|
) -> list:
|
382
386
|
if not org_upgrade_specs:
|
383
387
|
return []
|
@@ -558,12 +562,12 @@ more than 6 hours will be highlighted.
|
|
558
562
|
)
|
559
563
|
@click.pass_context
|
560
564
|
def cluster_upgrade_policies(
|
561
|
-
ctx
|
562
|
-
cluster
|
563
|
-
workload
|
564
|
-
show_only_soaking_upgrades
|
565
|
-
by_workload
|
566
|
-
)
|
565
|
+
ctx,
|
566
|
+
cluster=None,
|
567
|
+
workload=None,
|
568
|
+
show_only_soaking_upgrades=False,
|
569
|
+
by_workload=False,
|
570
|
+
):
|
567
571
|
print(
|
568
572
|
"https://grafana.app-sre.devshift.net/d/ukLXCSwVz/aus-cluster-upgrade-overview"
|
569
573
|
)
|
@@ -578,7 +582,9 @@ def inherit_version_data_text(org: AUSOCMOrganization) -> str:
|
|
578
582
|
|
579
583
|
@get.command()
|
580
584
|
@click.pass_context
|
581
|
-
def ocm_fleet_upgrade_policies(
|
585
|
+
def ocm_fleet_upgrade_policies(
|
586
|
+
ctx,
|
587
|
+
):
|
582
588
|
from reconcile.aus.ocm_upgrade_scheduler_org import (
|
583
589
|
OCMClusterUpgradeSchedulerOrgIntegration,
|
584
590
|
)
|
@@ -611,12 +617,7 @@ def ocm_fleet_upgrade_policies(ctx: click.Context) -> None:
|
|
611
617
|
help="Ignore STS clusters",
|
612
618
|
)
|
613
619
|
@click.pass_context
|
614
|
-
def aus_fleet_upgrade_policies(
|
615
|
-
ctx: click.Context,
|
616
|
-
ocm_env: str | None,
|
617
|
-
ocm_org_ids: str | None,
|
618
|
-
ignore_sts_clusters: bool,
|
619
|
-
) -> None:
|
620
|
+
def aus_fleet_upgrade_policies(ctx, ocm_env, ocm_org_ids, ignore_sts_clusters):
|
620
621
|
from reconcile.aus.advanced_upgrade_service import AdvancedUpgradeServiceIntegration
|
621
622
|
|
622
623
|
parsed_ocm_org_ids = set(ocm_org_ids.split(",")) if ocm_org_ids else None
|
@@ -633,8 +634,8 @@ def aus_fleet_upgrade_policies(
|
|
633
634
|
|
634
635
|
|
635
636
|
def generate_fleet_upgrade_policices_report(
|
636
|
-
ctx
|
637
|
-
)
|
637
|
+
ctx, aus_integration: AdvancedUpgradeSchedulerBaseIntegration
|
638
|
+
):
|
638
639
|
md_output = ctx.obj["options"]["output"] == "md"
|
639
640
|
|
640
641
|
org_upgrade_specs: dict[str, OrganizationUpgradeSpec] = {}
|
@@ -952,7 +953,7 @@ def upgrade_cluster_addon(
|
|
952
953
|
)
|
953
954
|
|
954
955
|
|
955
|
-
def has_cluster_account_access(cluster: dict[str, Any])
|
956
|
+
def has_cluster_account_access(cluster: dict[str, Any]):
|
956
957
|
spec = cluster.get("spec") or {}
|
957
958
|
account = spec.get("account")
|
958
959
|
return account or cluster.get("awsInfrastructureManagementAccounts") is not None
|
@@ -961,7 +962,7 @@ def has_cluster_account_access(cluster: dict[str, Any]) -> bool:
|
|
961
962
|
@get.command()
|
962
963
|
@click.argument("name", default="")
|
963
964
|
@click.pass_context
|
964
|
-
def clusters_network(ctx
|
965
|
+
def clusters_network(ctx, name):
|
965
966
|
settings = queries.get_app_interface_settings()
|
966
967
|
clusters = [
|
967
968
|
c
|
@@ -1011,6 +1012,7 @@ def clusters_network(ctx: click.Context, name: str) -> None:
|
|
1011
1012
|
]
|
1012
1013
|
with AWSApi(1, [account], settings=settings, init_users=False) as aws_api:
|
1013
1014
|
vpc_id, _, _, _ = aws_api.get_cluster_vpc_details(account)
|
1015
|
+
assert vpc_id
|
1014
1016
|
cluster["vpc_id"] = vpc_id
|
1015
1017
|
egress_ips = aws_api.get_cluster_nat_gateways_egress_ips(account, vpc_id)
|
1016
1018
|
cluster["egress_ips"] = ", ".join(sorted(egress_ips))
|
@@ -1023,7 +1025,7 @@ def clusters_network(ctx: click.Context, name: str) -> None:
|
|
1023
1025
|
|
1024
1026
|
@get.command()
|
1025
1027
|
@click.pass_context
|
1026
|
-
def network_reservations(ctx
|
1028
|
+
def network_reservations(ctx) -> None:
|
1027
1029
|
from reconcile.typed_queries.reserved_networks import get_networks
|
1028
1030
|
|
1029
1031
|
columns = [
|
@@ -1036,10 +1038,11 @@ def network_reservations(ctx: click.Context) -> None:
|
|
1036
1038
|
]
|
1037
1039
|
network_table = []
|
1038
1040
|
|
1039
|
-
def md_link(url
|
1041
|
+
def md_link(url) -> str:
|
1040
1042
|
if ctx.obj["options"]["output"] == "md":
|
1041
1043
|
return f"[{url}]({url})"
|
1042
|
-
|
1044
|
+
else:
|
1045
|
+
return url
|
1043
1046
|
|
1044
1047
|
for network in get_networks():
|
1045
1048
|
parentAddress = "none"
|
@@ -1080,7 +1083,7 @@ def network_reservations(ctx: click.Context) -> None:
|
|
1080
1083
|
default=24,
|
1081
1084
|
)
|
1082
1085
|
@click.pass_context
|
1083
|
-
def cidr_blocks(ctx
|
1086
|
+
def cidr_blocks(ctx, for_cluster: int, mask: int) -> None:
|
1084
1087
|
import ipaddress
|
1085
1088
|
|
1086
1089
|
from reconcile.typed_queries.aws_vpcs import get_aws_vpcs
|
@@ -1206,7 +1209,7 @@ def ocm_aws_infrastructure_access_switch_role_links_data() -> list[dict]:
|
|
1206
1209
|
|
1207
1210
|
@get.command()
|
1208
1211
|
@click.pass_context
|
1209
|
-
def ocm_aws_infrastructure_access_switch_role_links_flat(ctx
|
1212
|
+
def ocm_aws_infrastructure_access_switch_role_links_flat(ctx):
|
1210
1213
|
results = ocm_aws_infrastructure_access_switch_role_links_data()
|
1211
1214
|
columns = ["cluster", "user_arn", "access_level", "switch_role_link"]
|
1212
1215
|
print_output(ctx.obj["options"], results, columns)
|
@@ -1214,11 +1217,11 @@ def ocm_aws_infrastructure_access_switch_role_links_flat(ctx: click.Context) ->
|
|
1214
1217
|
|
1215
1218
|
@get.command()
|
1216
1219
|
@click.pass_context
|
1217
|
-
def ocm_aws_infrastructure_access_switch_role_links(ctx
|
1220
|
+
def ocm_aws_infrastructure_access_switch_role_links(ctx):
|
1218
1221
|
if ctx.obj["options"]["output"] != "md":
|
1219
1222
|
raise Exception(f"Unupported output: {ctx.obj['options']['output']}")
|
1220
1223
|
results = ocm_aws_infrastructure_access_switch_role_links_data()
|
1221
|
-
by_user
|
1224
|
+
by_user = {}
|
1222
1225
|
for r in results:
|
1223
1226
|
by_user.setdefault(r["user"], []).append(r)
|
1224
1227
|
columns = ["cluster", "source_login", "access_level", "switch_role_link"]
|
@@ -1232,7 +1235,7 @@ def ocm_aws_infrastructure_access_switch_role_links(ctx: click.Context) -> None:
|
|
1232
1235
|
|
1233
1236
|
@get.command()
|
1234
1237
|
@click.pass_context
|
1235
|
-
def clusters_aws_account_ids(ctx
|
1238
|
+
def clusters_aws_account_ids(ctx):
|
1236
1239
|
settings = queries.get_app_interface_settings()
|
1237
1240
|
clusters = [c for c in queries.get_clusters() if c.get("ocm") is not None]
|
1238
1241
|
ocm_map = OCMMap(clusters=clusters, settings=settings)
|
@@ -1262,7 +1265,7 @@ def clusters_aws_account_ids(ctx: click.Context) -> None:
|
|
1262
1265
|
@root.command()
|
1263
1266
|
@click.argument("account_name")
|
1264
1267
|
@click.pass_context
|
1265
|
-
def user_credentials_migrate_output(ctx
|
1268
|
+
def user_credentials_migrate_output(ctx, account_name) -> None:
|
1266
1269
|
accounts = queries.get_state_aws_accounts()
|
1267
1270
|
state = init_state(integration="account-notifier")
|
1268
1271
|
skip_accounts, appsre_pgp_key, _ = tfu.get_reencrypt_settings()
|
@@ -1304,7 +1307,7 @@ def user_credentials_migrate_output(ctx: click.Context, account_name: str) -> No
|
|
1304
1307
|
|
1305
1308
|
@get.command()
|
1306
1309
|
@click.pass_context
|
1307
|
-
def aws_route53_zones(ctx
|
1310
|
+
def aws_route53_zones(ctx):
|
1308
1311
|
zones = queries.get_dns_zones()
|
1309
1312
|
|
1310
1313
|
results = []
|
@@ -1327,7 +1330,7 @@ def aws_route53_zones(ctx: click.Context) -> None:
|
|
1327
1330
|
@click.argument("cluster_name")
|
1328
1331
|
@click.option("--cluster-admin/--no-cluster-admin", default=False)
|
1329
1332
|
@click.pass_context
|
1330
|
-
def bot_login(ctx
|
1333
|
+
def bot_login(ctx, cluster_name, cluster_admin):
|
1331
1334
|
settings = queries.get_app_interface_settings()
|
1332
1335
|
secret_reader = SecretReader(settings=settings)
|
1333
1336
|
clusters = queries.get_clusters()
|
@@ -1350,7 +1353,7 @@ def bot_login(ctx: click.Context, cluster_name: str, cluster_admin: bool) -> Non
|
|
1350
1353
|
)
|
1351
1354
|
@click.argument("org_name")
|
1352
1355
|
@click.pass_context
|
1353
|
-
def ocm_login(ctx
|
1356
|
+
def ocm_login(ctx, org_name):
|
1354
1357
|
settings = queries.get_app_interface_settings()
|
1355
1358
|
secret_reader = SecretReader(settings=settings)
|
1356
1359
|
ocms = [
|
@@ -1377,7 +1380,7 @@ def ocm_login(ctx: click.Context, org_name: str) -> None:
|
|
1377
1380
|
)
|
1378
1381
|
@click.argument("account_name")
|
1379
1382
|
@click.pass_context
|
1380
|
-
def aws_creds(ctx
|
1383
|
+
def aws_creds(ctx, account_name):
|
1381
1384
|
settings = queries.get_app_interface_settings()
|
1382
1385
|
secret_reader = SecretReader(settings=settings)
|
1383
1386
|
accounts = queries.get_aws_accounts(name=account_name)
|
@@ -1420,14 +1423,8 @@ def aws_creds(ctx: click.Context, account_name: str) -> None:
|
|
1420
1423
|
)
|
1421
1424
|
@click.pass_context
|
1422
1425
|
def copy_tfstate(
|
1423
|
-
ctx
|
1424
|
-
|
1425
|
-
source_object_path: str,
|
1426
|
-
account_uid: str,
|
1427
|
-
rename: str | None,
|
1428
|
-
region: str | None,
|
1429
|
-
force: bool,
|
1430
|
-
) -> None:
|
1426
|
+
ctx, source_bucket, source_object_path, account_uid, rename, region, force
|
1427
|
+
):
|
1431
1428
|
settings = queries.get_app_interface_settings()
|
1432
1429
|
secret_reader = SecretReader(settings=settings)
|
1433
1430
|
accounts = queries.get_aws_accounts(uid=account_uid, terraform_state=True)
|
@@ -1448,6 +1445,7 @@ def copy_tfstate(
|
|
1448
1445
|
)
|
1449
1446
|
return
|
1450
1447
|
|
1448
|
+
dest_filename = ""
|
1451
1449
|
if rename:
|
1452
1450
|
dest_filename = rename.removesuffix(".tfstate")
|
1453
1451
|
else:
|
@@ -1459,10 +1457,13 @@ def copy_tfstate(
|
|
1459
1457
|
with AWSApi(1, accounts, settings, secret_reader) as aws:
|
1460
1458
|
session = aws.get_session(account["name"])
|
1461
1459
|
s3_client = aws.get_session_client(session, "s3", region)
|
1462
|
-
copy_source =
|
1463
|
-
|
1464
|
-
|
1465
|
-
|
1460
|
+
copy_source = cast(
|
1461
|
+
CopySourceTypeDef,
|
1462
|
+
{
|
1463
|
+
"Bucket": source_bucket,
|
1464
|
+
"Key": source_object_path,
|
1465
|
+
},
|
1466
|
+
)
|
1466
1467
|
|
1467
1468
|
dest_pretty_path = f"s3://{dest_bucket}/{dest_key}"
|
1468
1469
|
# check if dest already exists
|
@@ -1505,26 +1506,20 @@ def copy_tfstate(
|
|
1505
1506
|
@get.command(short_help='obtain "rosa create cluster" command by cluster name')
|
1506
1507
|
@click.argument("cluster_name")
|
1507
1508
|
@click.pass_context
|
1508
|
-
def rosa_create_cluster_command(ctx
|
1509
|
+
def rosa_create_cluster_command(ctx, cluster_name):
|
1509
1510
|
clusters = [c for c in get_clusters() if c.name == cluster_name]
|
1510
|
-
|
1511
|
+
try:
|
1512
|
+
cluster = clusters[0]
|
1513
|
+
except IndexError:
|
1511
1514
|
print(f"{cluster_name} not found.")
|
1512
1515
|
sys.exit(1)
|
1513
|
-
cluster = clusters[0]
|
1514
1516
|
|
1515
|
-
if
|
1516
|
-
not cluster.spec
|
1517
|
-
or cluster.spec.product != OCM_PRODUCT_ROSA
|
1518
|
-
or not isinstance(cluster.spec, ClusterSpecROSAV1)
|
1519
|
-
):
|
1517
|
+
if cluster.spec.product != OCM_PRODUCT_ROSA:
|
1520
1518
|
print("must be a rosa cluster.")
|
1521
1519
|
sys.exit(1)
|
1522
1520
|
|
1523
1521
|
settings = queries.get_app_interface_settings()
|
1524
1522
|
account = cluster.spec.account
|
1525
|
-
if not account:
|
1526
|
-
print("account not found.")
|
1527
|
-
sys.exit(1)
|
1528
1523
|
|
1529
1524
|
if account.billing_account:
|
1530
1525
|
billing_account = account.billing_account.uid
|
@@ -1534,19 +1529,6 @@ def rosa_create_cluster_command(ctx: click.Context, cluster_name: str) -> None:
|
|
1534
1529
|
) as aws_api:
|
1535
1530
|
billing_account = aws_api.get_organization_billing_account(account.name)
|
1536
1531
|
|
1537
|
-
if not cluster.spec.oidc_endpoint_url:
|
1538
|
-
print("oidc_endpoint_url not set.")
|
1539
|
-
sys.exit(1)
|
1540
|
-
if not cluster.spec.subnet_ids:
|
1541
|
-
print("subnet_ids not set.")
|
1542
|
-
sys.exit(1)
|
1543
|
-
if not cluster.network:
|
1544
|
-
print("network not set.")
|
1545
|
-
sys.exit(1)
|
1546
|
-
if not cluster.machine_pools:
|
1547
|
-
print("machine_pools not set.")
|
1548
|
-
sys.exit(1)
|
1549
|
-
|
1550
1532
|
print(
|
1551
1533
|
" ".join([
|
1552
1534
|
"rosa create cluster",
|
@@ -1600,9 +1582,7 @@ def rosa_create_cluster_command(ctx: click.Context, cluster_name: str) -> None:
|
|
1600
1582
|
@click.argument("jumphost_hostname", required=False)
|
1601
1583
|
@click.argument("cluster_name", required=False)
|
1602
1584
|
@click.pass_context
|
1603
|
-
def sshuttle_command(
|
1604
|
-
ctx: click.Context, jumphost_hostname: str | None, cluster_name: str | None
|
1605
|
-
) -> None:
|
1585
|
+
def sshuttle_command(ctx, jumphost_hostname: str | None, cluster_name: str | None):
|
1606
1586
|
jumphosts_query_data = queries.get_jumphosts(hostname=jumphost_hostname)
|
1607
1587
|
jumphosts = jumphosts_query_data.jumphosts or []
|
1608
1588
|
for jh in jumphosts:
|
@@ -1624,9 +1604,7 @@ def sshuttle_command(
|
|
1624
1604
|
@click.argument("instance_name")
|
1625
1605
|
@click.argument("job_name")
|
1626
1606
|
@click.pass_context
|
1627
|
-
def jenkins_job_vault_secrets(
|
1628
|
-
ctx: click.Context, instance_name: str, job_name: str
|
1629
|
-
) -> None:
|
1607
|
+
def jenkins_job_vault_secrets(ctx, instance_name: str, job_name: str) -> None:
|
1630
1608
|
secret_reader = SecretReader(queries.get_secret_reader_settings())
|
1631
1609
|
jjb: JJB = init_jjb(secret_reader, instance_name, config_name=None, print_only=True)
|
1632
1610
|
jobs = jjb.get_all_jobs([job_name], instance_name)[instance_name]
|
@@ -1651,7 +1629,7 @@ def jenkins_job_vault_secrets(
|
|
1651
1629
|
@get.command()
|
1652
1630
|
@click.argument("name", default="")
|
1653
1631
|
@click.pass_context
|
1654
|
-
def namespaces(ctx
|
1632
|
+
def namespaces(ctx, name):
|
1655
1633
|
namespaces = queries.get_namespaces()
|
1656
1634
|
if name:
|
1657
1635
|
namespaces = [ns for ns in namespaces if ns["name"] == name]
|
@@ -1663,7 +1641,7 @@ def namespaces(ctx: click.Context, name: str) -> None:
|
|
1663
1641
|
print_output(ctx.obj["options"], namespaces, columns)
|
1664
1642
|
|
1665
1643
|
|
1666
|
-
def add_resource(item
|
1644
|
+
def add_resource(item, resource, columns):
|
1667
1645
|
provider = resource["provider"]
|
1668
1646
|
if provider not in columns:
|
1669
1647
|
columns.append(provider)
|
@@ -1674,11 +1652,11 @@ def add_resource(item: dict, resource: Mapping, columns: list[str]) -> None:
|
|
1674
1652
|
|
1675
1653
|
@get.command
|
1676
1654
|
@click.pass_context
|
1677
|
-
def cluster_openshift_resources(ctx
|
1655
|
+
def cluster_openshift_resources(ctx):
|
1678
1656
|
gqlapi = gql.get_api()
|
1679
1657
|
namespaces = gqlapi.query(orb.NAMESPACES_QUERY)["namespaces"]
|
1680
1658
|
columns = ["name", "total"]
|
1681
|
-
results
|
1659
|
+
results = {}
|
1682
1660
|
for ns_info in namespaces:
|
1683
1661
|
cluster_name = ns_info["cluster"]["name"]
|
1684
1662
|
item = {"name": cluster_name, "total": 0}
|
@@ -1699,10 +1677,10 @@ def cluster_openshift_resources(ctx: click.Context) -> None:
|
|
1699
1677
|
|
1700
1678
|
@get.command
|
1701
1679
|
@click.pass_context
|
1702
|
-
def aws_terraform_resources(ctx
|
1680
|
+
def aws_terraform_resources(ctx):
|
1703
1681
|
namespaces = tfr.get_namespaces()
|
1704
1682
|
columns = ["name", "total"]
|
1705
|
-
results
|
1683
|
+
results = {}
|
1706
1684
|
for ns_info in namespaces:
|
1707
1685
|
specs = (
|
1708
1686
|
get_external_resource_specs(
|
@@ -1754,7 +1732,7 @@ def rds_region(
|
|
1754
1732
|
|
1755
1733
|
@get.command
|
1756
1734
|
@click.pass_context
|
1757
|
-
def rds(ctx
|
1735
|
+
def rds(ctx):
|
1758
1736
|
namespaces = tfr.get_namespaces()
|
1759
1737
|
accounts = {a["name"]: a for a in queries.get_aws_accounts()}
|
1760
1738
|
results = []
|
@@ -1832,7 +1810,7 @@ You can view the source of this Markdown to extract the JSON data.
|
|
1832
1810
|
|
1833
1811
|
@get.command
|
1834
1812
|
@click.pass_context
|
1835
|
-
def rds_recommendations(ctx
|
1813
|
+
def rds_recommendations(ctx):
|
1836
1814
|
IGNORED_STATUSES = ("resolved",)
|
1837
1815
|
IGNORED_SEVERITIES = ("informational",)
|
1838
1816
|
|
@@ -1881,23 +1859,23 @@ def rds_recommendations(ctx: click.Context) -> None:
|
|
1881
1859
|
with AWSApi(1, [account], settings=settings, init_users=False) as aws:
|
1882
1860
|
try:
|
1883
1861
|
data = aws.describe_rds_recommendations(account_name, region)
|
1884
|
-
|
1862
|
+
db_recommendations = data.get("DBRecommendations", [])
|
1885
1863
|
except Exception as e:
|
1886
1864
|
logging.error(f"Error describing RDS recommendations: {e}")
|
1887
1865
|
continue
|
1888
1866
|
|
1889
1867
|
# Add field ResourceName infered from ResourceArn
|
1890
1868
|
recommendations = [
|
1891
|
-
{
|
1892
|
-
|
1869
|
+
{
|
1870
|
+
**rec,
|
1871
|
+
"ResourceName": rec["ResourceArn"].split(":")[-1],
|
1872
|
+
# The Description field has \n that are causing issues with the markdown table
|
1873
|
+
"Description": rec["Description"].replace("\n", " "),
|
1874
|
+
}
|
1875
|
+
for rec in db_recommendations
|
1893
1876
|
if rec.get("Status") not in IGNORED_STATUSES
|
1894
1877
|
and rec.get("Severity") not in IGNORED_SEVERITIES
|
1895
1878
|
]
|
1896
|
-
# The Description field has \n that are causing issues with the markdown table
|
1897
|
-
recommendations = [
|
1898
|
-
{**rec, "Description": rec["Description"].replace("\n", " ")}
|
1899
|
-
for rec in recommendations
|
1900
|
-
]
|
1901
1879
|
# If we have no recommendations to show, skip
|
1902
1880
|
if not recommendations:
|
1903
1881
|
continue
|
@@ -1911,7 +1889,7 @@ def rds_recommendations(ctx: click.Context) -> None:
|
|
1911
1889
|
|
1912
1890
|
@get.command()
|
1913
1891
|
@click.pass_context
|
1914
|
-
def products(ctx
|
1892
|
+
def products(ctx):
|
1915
1893
|
products = queries.get_products()
|
1916
1894
|
columns = ["name", "description"]
|
1917
1895
|
print_output(ctx.obj["options"], products, columns)
|
@@ -1920,7 +1898,7 @@ def products(ctx: click.Context) -> None:
|
|
1920
1898
|
@describe.command()
|
1921
1899
|
@click.argument("name")
|
1922
1900
|
@click.pass_context
|
1923
|
-
def product(ctx
|
1901
|
+
def product(ctx, name):
|
1924
1902
|
products = queries.get_products()
|
1925
1903
|
products = [p for p in products if p["name"].lower() == name.lower()]
|
1926
1904
|
if len(products) != 1:
|
@@ -1935,7 +1913,7 @@ def product(ctx: click.Context, name: str) -> None:
|
|
1935
1913
|
|
1936
1914
|
@get.command()
|
1937
1915
|
@click.pass_context
|
1938
|
-
def environments(ctx
|
1916
|
+
def environments(ctx):
|
1939
1917
|
environments = queries.get_environments()
|
1940
1918
|
columns = ["name", "description", "product.name"]
|
1941
1919
|
# TODO(mafriedm): fix this
|
@@ -1947,7 +1925,7 @@ def environments(ctx: click.Context) -> None:
|
|
1947
1925
|
@describe.command()
|
1948
1926
|
@click.argument("name")
|
1949
1927
|
@click.pass_context
|
1950
|
-
def environment(ctx
|
1928
|
+
def environment(ctx, name):
|
1951
1929
|
environments = queries.get_environments()
|
1952
1930
|
environments = [e for e in environments if e["name"].lower() == name.lower()]
|
1953
1931
|
if len(environments) != 1:
|
@@ -1965,7 +1943,7 @@ def environment(ctx: click.Context, name: str) -> None:
|
|
1965
1943
|
|
1966
1944
|
@get.command()
|
1967
1945
|
@click.pass_context
|
1968
|
-
def services(ctx
|
1946
|
+
def services(ctx):
|
1969
1947
|
apps = queries.get_apps()
|
1970
1948
|
columns = ["name", "path", "onboardingStatus"]
|
1971
1949
|
print_output(ctx.obj["options"], apps, columns)
|
@@ -1973,15 +1951,17 @@ def services(ctx: click.Context) -> None:
|
|
1973
1951
|
|
1974
1952
|
@get.command()
|
1975
1953
|
@click.pass_context
|
1976
|
-
def repos(ctx
|
1954
|
+
def repos(ctx):
|
1977
1955
|
repos = queries.get_repos()
|
1978
|
-
|
1956
|
+
repos = [{"url": r} for r in repos]
|
1957
|
+
columns = ["url"]
|
1958
|
+
print_output(ctx.obj["options"], repos, columns)
|
1979
1959
|
|
1980
1960
|
|
1981
1961
|
@get.command()
|
1982
1962
|
@click.argument("org_username")
|
1983
1963
|
@click.pass_context
|
1984
|
-
def roles(ctx
|
1964
|
+
def roles(ctx, org_username):
|
1985
1965
|
users = queries.get_roles()
|
1986
1966
|
users = [u for u in users if u["org_username"] == org_username]
|
1987
1967
|
|
@@ -1992,7 +1972,7 @@ def roles(ctx: click.Context, org_username: str) -> None:
|
|
1992
1972
|
user = users[0]
|
1993
1973
|
|
1994
1974
|
# type, name, resource, [ref]
|
1995
|
-
roles: dict[
|
1975
|
+
roles: dict[(str, str, str), set] = defaultdict(set)
|
1996
1976
|
|
1997
1977
|
for role in user["roles"]:
|
1998
1978
|
role_name = role["path"]
|
@@ -2046,7 +2026,7 @@ def roles(ctx: click.Context, org_username: str) -> None:
|
|
2046
2026
|
@get.command()
|
2047
2027
|
@click.argument("org_username", default="")
|
2048
2028
|
@click.pass_context
|
2049
|
-
def users(ctx
|
2029
|
+
def users(ctx, org_username):
|
2050
2030
|
users = queries.get_users()
|
2051
2031
|
if org_username:
|
2052
2032
|
users = [u for u in users if u["org_username"] == org_username]
|
@@ -2057,7 +2037,7 @@ def users(ctx: click.Context, org_username: str) -> None:
|
|
2057
2037
|
|
2058
2038
|
@get.command()
|
2059
2039
|
@click.pass_context
|
2060
|
-
def integrations(ctx
|
2040
|
+
def integrations(ctx):
|
2061
2041
|
environments = queries.get_integrations()
|
2062
2042
|
columns = ["name", "description"]
|
2063
2043
|
print_output(ctx.obj["options"], environments, columns)
|
@@ -2065,7 +2045,7 @@ def integrations(ctx: click.Context) -> None:
|
|
2065
2045
|
|
2066
2046
|
@get.command()
|
2067
2047
|
@click.pass_context
|
2068
|
-
def quay_mirrors(ctx
|
2048
|
+
def quay_mirrors(ctx):
|
2069
2049
|
apps = queries.get_quay_repos()
|
2070
2050
|
|
2071
2051
|
mirrors = []
|
@@ -2103,9 +2083,7 @@ def quay_mirrors(ctx: click.Context) -> None:
|
|
2103
2083
|
@click.argument("kind")
|
2104
2084
|
@click.argument("name")
|
2105
2085
|
@click.pass_context
|
2106
|
-
def root_owner(
|
2107
|
-
ctx: click.Context, cluster: str, namespace: str, kind: str, name: str
|
2108
|
-
) -> None:
|
2086
|
+
def root_owner(ctx, cluster, namespace, kind, name):
|
2109
2087
|
settings = queries.get_app_interface_settings()
|
2110
2088
|
clusters = [c for c in queries.get_clusters(minimal=True) if c["name"] == cluster]
|
2111
2089
|
oc_map = OC_Map(
|
@@ -2135,9 +2113,7 @@ def root_owner(
|
|
2135
2113
|
@click.argument("aws_account")
|
2136
2114
|
@click.argument("identifier")
|
2137
2115
|
@click.pass_context
|
2138
|
-
def service_owners_for_rds_instance(
|
2139
|
-
ctx: click.Context, aws_account: str, identifier: str
|
2140
|
-
) -> None:
|
2116
|
+
def service_owners_for_rds_instance(ctx, aws_account, identifier):
|
2141
2117
|
namespaces = queries.get_namespaces()
|
2142
2118
|
service_owners = []
|
2143
2119
|
for namespace_info in namespaces:
|
@@ -2159,7 +2135,7 @@ def service_owners_for_rds_instance(
|
|
2159
2135
|
|
2160
2136
|
@get.command()
|
2161
2137
|
@click.pass_context
|
2162
|
-
def sre_checkpoints(ctx
|
2138
|
+
def sre_checkpoints(ctx):
|
2163
2139
|
apps = queries.get_apps()
|
2164
2140
|
|
2165
2141
|
parent_apps = {app["parentApp"]["path"] for app in apps if app.get("parentApp")}
|
@@ -2183,14 +2159,13 @@ def sre_checkpoints(ctx: click.Context) -> None:
|
|
2183
2159
|
|
2184
2160
|
@get.command()
|
2185
2161
|
@click.pass_context
|
2186
|
-
def app_interface_merge_queue(ctx
|
2162
|
+
def app_interface_merge_queue(ctx):
|
2187
2163
|
import reconcile.gitlab_housekeeping as glhk
|
2188
2164
|
|
2189
2165
|
settings = queries.get_app_interface_settings()
|
2190
2166
|
instance = queries.get_gitlab_instance()
|
2191
2167
|
gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
|
2192
|
-
|
2193
|
-
merge_requests = glhk.get_merge_requests(True, gl, state=state)
|
2168
|
+
merge_requests = glhk.get_merge_requests(True, gl, state=None)
|
2194
2169
|
|
2195
2170
|
columns = [
|
2196
2171
|
"id",
|
@@ -2225,7 +2200,7 @@ def app_interface_merge_queue(ctx: click.Context) -> None:
|
|
2225
2200
|
|
2226
2201
|
@get.command()
|
2227
2202
|
@click.pass_context
|
2228
|
-
def app_interface_review_queue(ctx
|
2203
|
+
def app_interface_review_queue(ctx) -> None:
|
2229
2204
|
import reconcile.gitlab_housekeeping as glhk
|
2230
2205
|
|
2231
2206
|
settings = queries.get_app_interface_settings()
|
@@ -2242,7 +2217,7 @@ def app_interface_review_queue(ctx: click.Context) -> None:
|
|
2242
2217
|
"labels",
|
2243
2218
|
]
|
2244
2219
|
|
2245
|
-
def get_mrs(repo
|
2220
|
+
def get_mrs(repo, url) -> list[dict[str, str]]:
|
2246
2221
|
gl = GitLabApi(instance, project_url=url, settings=settings)
|
2247
2222
|
merge_requests = gl.get_merge_requests(state=MRState.OPENED)
|
2248
2223
|
try:
|
@@ -2263,7 +2238,7 @@ def app_interface_review_queue(ctx: click.Context) -> None:
|
|
2263
2238
|
}:
|
2264
2239
|
continue
|
2265
2240
|
|
2266
|
-
labels = mr.attributes.get("labels")
|
2241
|
+
labels = mr.attributes.get("labels") or []
|
2267
2242
|
if glhk.is_good_to_merge(labels):
|
2268
2243
|
continue
|
2269
2244
|
if "stale" in labels:
|
@@ -2337,7 +2312,7 @@ def app_interface_review_queue(ctx: click.Context) -> None:
|
|
2337
2312
|
|
2338
2313
|
@get.command()
|
2339
2314
|
@click.pass_context
|
2340
|
-
def app_interface_open_selfserviceable_mr_queue(ctx
|
2315
|
+
def app_interface_open_selfserviceable_mr_queue(ctx):
|
2341
2316
|
settings = queries.get_app_interface_settings()
|
2342
2317
|
instance = queries.get_gitlab_instance()
|
2343
2318
|
gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
|
@@ -2358,7 +2333,7 @@ def app_interface_open_selfserviceable_mr_queue(ctx: click.Context) -> None:
|
|
2358
2333
|
continue
|
2359
2334
|
|
2360
2335
|
# skip stale or non self serviceable MRs
|
2361
|
-
labels = mr.attributes.get("labels")
|
2336
|
+
labels = mr.attributes.get("labels", [])
|
2362
2337
|
if "stale" in labels:
|
2363
2338
|
continue
|
2364
2339
|
if SELF_SERVICEABLE not in labels and SAAS_FILE_UPDATE not in labels:
|
@@ -2400,7 +2375,7 @@ def app_interface_open_selfserviceable_mr_queue(ctx: click.Context) -> None:
|
|
2400
2375
|
|
2401
2376
|
@get.command()
|
2402
2377
|
@click.pass_context
|
2403
|
-
def change_types(ctx
|
2378
|
+
def change_types(ctx) -> None:
|
2404
2379
|
"""List all change types."""
|
2405
2380
|
change_types = fetch_change_type_processors(gql.get_api(), NoOpFileDiffResolver())
|
2406
2381
|
|
@@ -2425,7 +2400,7 @@ def change_types(ctx: click.Context) -> None:
|
|
2425
2400
|
|
2426
2401
|
@get.command()
|
2427
2402
|
@click.pass_context
|
2428
|
-
def app_interface_merge_history(ctx
|
2403
|
+
def app_interface_merge_history(ctx):
|
2429
2404
|
settings = queries.get_app_interface_settings()
|
2430
2405
|
instance = queries.get_gitlab_instance()
|
2431
2406
|
gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
|
@@ -2447,7 +2422,7 @@ def app_interface_merge_history(ctx: click.Context) -> None:
|
|
2447
2422
|
"id": f"[{mr.iid}]({mr.web_url})",
|
2448
2423
|
"title": mr.title,
|
2449
2424
|
"merged_at": mr.merged_at,
|
2450
|
-
"labels": ", ".join(mr.attributes.get("labels")),
|
2425
|
+
"labels": ", ".join(mr.attributes.get("labels", [])),
|
2451
2426
|
}
|
2452
2427
|
merge_queue_data.append(item)
|
2453
2428
|
|
@@ -2462,7 +2437,7 @@ def app_interface_merge_history(ctx: click.Context) -> None:
|
|
2462
2437
|
)
|
2463
2438
|
@use_jump_host()
|
2464
2439
|
@click.pass_context
|
2465
|
-
def selectorsyncset_managed_resources(ctx
|
2440
|
+
def selectorsyncset_managed_resources(ctx, use_jump_host):
|
2466
2441
|
vault_settings = get_app_interface_vault_settings()
|
2467
2442
|
secret_reader = create_secret_reader(use_vault=vault_settings.vault)
|
2468
2443
|
clusters = get_clusters()
|
@@ -2520,9 +2495,7 @@ def selectorsyncset_managed_resources(ctx: click.Context, use_jump_host: bool) -
|
|
2520
2495
|
)
|
2521
2496
|
@use_jump_host()
|
2522
2497
|
@click.pass_context
|
2523
|
-
def selectorsyncset_managed_hypershift_resources(
|
2524
|
-
ctx: click.Context, use_jump_host: bool
|
2525
|
-
) -> None:
|
2498
|
+
def selectorsyncset_managed_hypershift_resources(ctx, use_jump_host):
|
2526
2499
|
vault_settings = get_app_interface_vault_settings()
|
2527
2500
|
secret_reader = create_secret_reader(use_vault=vault_settings.vault)
|
2528
2501
|
clusters = get_clusters()
|
@@ -2600,12 +2573,7 @@ def selectorsyncset_managed_hypershift_resources(
|
|
2600
2573
|
default=os.environ.get("QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_REGION", "us-east-1"),
|
2601
2574
|
)
|
2602
2575
|
@click.pass_context
|
2603
|
-
def ec2_jenkins_workers(
|
2604
|
-
ctx: click.Context,
|
2605
|
-
aws_access_key_id: str,
|
2606
|
-
aws_secret_access_key: str,
|
2607
|
-
aws_region: str,
|
2608
|
-
) -> None:
|
2576
|
+
def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_region):
|
2609
2577
|
"""Prints a list of jenkins workers and their status."""
|
2610
2578
|
if not aws_access_key_id or not aws_secret_access_key:
|
2611
2579
|
raise click.ClickException(
|
@@ -2652,9 +2620,9 @@ def ec2_jenkins_workers(
|
|
2652
2620
|
url = ""
|
2653
2621
|
for t in instance.tags:
|
2654
2622
|
if t.get("Key") == "os":
|
2655
|
-
os = t
|
2623
|
+
os = t.get("Value")
|
2656
2624
|
if t.get("Key") == "jenkins_controller":
|
2657
|
-
url = f"https://{t
|
2625
|
+
url = f"https://{t.get('Value').replace('-', '.')}.devshift.net/computer/{instance.id}"
|
2658
2626
|
image = ec2.Image(instance.image_id)
|
2659
2627
|
commit_url = ""
|
2660
2628
|
for t in image.tags:
|
@@ -2681,7 +2649,7 @@ def ec2_jenkins_workers(
|
|
2681
2649
|
@get.command()
|
2682
2650
|
@click.argument("status-board-instance")
|
2683
2651
|
@click.pass_context
|
2684
|
-
def slo_document_services(ctx
|
2652
|
+
def slo_document_services(ctx, status_board_instance):
|
2685
2653
|
"""Print SLO Documents Services"""
|
2686
2654
|
columns = [
|
2687
2655
|
"slo_doc_name",
|
@@ -2710,7 +2678,7 @@ def slo_document_services(ctx: click.Context, status_board_instance: str) -> Non
|
|
2710
2678
|
slodocs = []
|
2711
2679
|
for slodoc in get_slo_documents():
|
2712
2680
|
products = [ns.namespace.environment.product.name for ns in slodoc.namespaces]
|
2713
|
-
for slo in slodoc.slos
|
2681
|
+
for slo in slodoc.slos:
|
2714
2682
|
for product in products:
|
2715
2683
|
if slodoc.app.parent_app:
|
2716
2684
|
app = f"{slodoc.app.parent_app.name}-{slodoc.app.name}"
|
@@ -2736,7 +2704,7 @@ def slo_document_services(ctx: click.Context, status_board_instance: str) -> Non
|
|
2736
2704
|
"target_unit": slo.slo_target_unit,
|
2737
2705
|
"window": slo.slo_parameters.window,
|
2738
2706
|
"statusBoardService": f"{product}/{slodoc.app.name}/{slo.name}",
|
2739
|
-
"statusBoardEnabled": "statusBoard" in
|
2707
|
+
"statusBoardEnabled": "statusBoard" in slodoc.labels,
|
2740
2708
|
}
|
2741
2709
|
slodocs.append(item)
|
2742
2710
|
|
@@ -2746,7 +2714,7 @@ def slo_document_services(ctx: click.Context, status_board_instance: str) -> Non
|
|
2746
2714
|
@get.command()
|
2747
2715
|
@click.argument("file_path")
|
2748
2716
|
@click.pass_context
|
2749
|
-
def alerts(ctx
|
2717
|
+
def alerts(ctx, file_path):
|
2750
2718
|
BIG_NUMBER = 10
|
2751
2719
|
|
2752
2720
|
def sort_by_threshold(item: dict[str, str]) -> int:
|
@@ -2820,7 +2788,7 @@ def alerts(ctx: click.Context, file_path: str) -> None:
|
|
2820
2788
|
@get.command()
|
2821
2789
|
@click.pass_context
|
2822
2790
|
@thread_pool_size(default=5)
|
2823
|
-
def aws_cost_report(ctx
|
2791
|
+
def aws_cost_report(ctx, thread_pool_size):
|
2824
2792
|
command = AwsCostReportCommand.create(thread_pool_size=thread_pool_size)
|
2825
2793
|
print(command.execute())
|
2826
2794
|
|
@@ -2828,7 +2796,7 @@ def aws_cost_report(ctx: click.Context, thread_pool_size: int) -> None:
|
|
2828
2796
|
@get.command()
|
2829
2797
|
@click.pass_context
|
2830
2798
|
@thread_pool_size(default=5)
|
2831
|
-
def openshift_cost_report(ctx
|
2799
|
+
def openshift_cost_report(ctx, thread_pool_size):
|
2832
2800
|
command = OpenShiftCostReportCommand.create(thread_pool_size=thread_pool_size)
|
2833
2801
|
print(command.execute())
|
2834
2802
|
|
@@ -2836,9 +2804,7 @@ def openshift_cost_report(ctx: click.Context, thread_pool_size: int) -> None:
|
|
2836
2804
|
@get.command()
|
2837
2805
|
@click.pass_context
|
2838
2806
|
@thread_pool_size(default=5)
|
2839
|
-
def openshift_cost_optimization_report(
|
2840
|
-
ctx: click.Context, thread_pool_size: int
|
2841
|
-
) -> None:
|
2807
|
+
def openshift_cost_optimization_report(ctx, thread_pool_size):
|
2842
2808
|
command = OpenShiftCostOptimizationReportCommand.create(
|
2843
2809
|
thread_pool_size=thread_pool_size
|
2844
2810
|
)
|
@@ -2847,7 +2813,7 @@ def openshift_cost_optimization_report(
|
|
2847
2813
|
|
2848
2814
|
@get.command()
|
2849
2815
|
@click.pass_context
|
2850
|
-
def osd_component_versions(ctx
|
2816
|
+
def osd_component_versions(ctx):
|
2851
2817
|
osd_environments = [
|
2852
2818
|
e["name"] for e in queries.get_environments() if e["product"]["name"] == "OSDv4"
|
2853
2819
|
]
|
@@ -2883,7 +2849,7 @@ def osd_component_versions(ctx: click.Context) -> None:
|
|
2883
2849
|
|
2884
2850
|
@get.command()
|
2885
2851
|
@click.pass_context
|
2886
|
-
def maintenances(ctx
|
2852
|
+
def maintenances(ctx):
|
2887
2853
|
now = datetime.now(UTC)
|
2888
2854
|
maintenances = maintenances_gql.query(gql.get_api().query).maintenances or []
|
2889
2855
|
data = [
|
@@ -2946,7 +2912,7 @@ class MigrationStatusCount:
|
|
2946
2912
|
|
2947
2913
|
@get.command()
|
2948
2914
|
@click.pass_context
|
2949
|
-
def hcp_migration_status(ctx
|
2915
|
+
def hcp_migration_status(ctx):
|
2950
2916
|
counts: dict[str, MigrationStatusCount] = {}
|
2951
2917
|
total_count = MigrationStatusCount("total")
|
2952
2918
|
saas_files = get_saas_files()
|
@@ -2985,7 +2951,7 @@ def hcp_migration_status(ctx: click.Context) -> None:
|
|
2985
2951
|
|
2986
2952
|
@get.command()
|
2987
2953
|
@click.pass_context
|
2988
|
-
def systems_and_tools(ctx
|
2954
|
+
def systems_and_tools(ctx):
|
2989
2955
|
print(
|
2990
2956
|
f"This report is obtained from app-interface Graphql endpoint available at: {config.get_config()['graphql']['server']}"
|
2991
2957
|
)
|
@@ -2999,7 +2965,7 @@ def systems_and_tools(ctx: click.Context) -> None:
|
|
2999
2965
|
"--environment_name", default="production", help="environment to get logs from"
|
3000
2966
|
)
|
3001
2967
|
@click.pass_context
|
3002
|
-
def logs(ctx
|
2968
|
+
def logs(ctx, integration_name: str, environment_name: str):
|
3003
2969
|
integrations = [
|
3004
2970
|
i
|
3005
2971
|
for i in integrations_gql.query(query_func=gql.get_api().query).integrations
|
@@ -3038,7 +3004,7 @@ def logs(ctx: click.Context, integration_name: str, environment_name: str) -> No
|
|
3038
3004
|
|
3039
3005
|
@get.command
|
3040
3006
|
@click.pass_context
|
3041
|
-
def jenkins_jobs(ctx
|
3007
|
+
def jenkins_jobs(ctx):
|
3042
3008
|
jenkins_configs = queries.get_jenkins_configs()
|
3043
3009
|
|
3044
3010
|
# stats dicts
|
@@ -3108,9 +3074,9 @@ You can view the source of this Markdown to extract the JSON data.
|
|
3108
3074
|
|
3109
3075
|
@get.command
|
3110
3076
|
@click.pass_context
|
3111
|
-
def container_image_details(ctx
|
3077
|
+
def container_image_details(ctx):
|
3112
3078
|
apps = get_apps_quay_repos_escalation_policies()
|
3113
|
-
data: list[dict[str, str
|
3079
|
+
data: list[dict[str, str]] = []
|
3114
3080
|
for app in apps:
|
3115
3081
|
app_name = f"{app.parent_app.name}/{app.name}" if app.parent_app else app.name
|
3116
3082
|
ep_channels = app.escalation_policy.channels
|
@@ -3122,7 +3088,7 @@ def container_image_details(ctx: click.Context) -> None:
|
|
3122
3088
|
if repo.mirror:
|
3123
3089
|
continue
|
3124
3090
|
repository = f"quay.io/{org_name}/{repo.name}"
|
3125
|
-
item
|
3091
|
+
item = {
|
3126
3092
|
"app": app_name,
|
3127
3093
|
"repository": repository,
|
3128
3094
|
"email": email,
|
@@ -3135,25 +3101,27 @@ def container_image_details(ctx: click.Context) -> None:
|
|
3135
3101
|
|
3136
3102
|
@get.command
|
3137
3103
|
@click.pass_context
|
3138
|
-
def change_log_tracking(ctx
|
3104
|
+
def change_log_tracking(ctx):
|
3139
3105
|
repo_url = get_app_interface_repo_url()
|
3140
3106
|
change_types = fetch_change_type_processors(gql.get_api(), NoOpFileDiffResolver())
|
3141
3107
|
state = init_state(integration=cl.QONTRACT_INTEGRATION)
|
3142
3108
|
change_log = ChangeLog(**state.get(BUNDLE_DIFFS_OBJ))
|
3143
3109
|
data: list[dict[str, str]] = []
|
3144
|
-
for
|
3110
|
+
for item in change_log.items:
|
3111
|
+
change_log_item = ChangeLogItem(**item)
|
3145
3112
|
commit = change_log_item.commit
|
3146
3113
|
covered_change_types_descriptions = [
|
3147
3114
|
ct.description
|
3148
3115
|
for ct in change_types
|
3149
3116
|
if ct.name in change_log_item.change_types
|
3150
3117
|
]
|
3151
|
-
|
3118
|
+
item = {
|
3152
3119
|
"commit": f"[{commit[:7]}]({repo_url}/commit/{commit})",
|
3153
3120
|
"merged_at": change_log_item.merged_at,
|
3154
3121
|
"apps": ", ".join(change_log_item.apps),
|
3155
3122
|
"changes": ", ".join(covered_change_types_descriptions),
|
3156
|
-
}
|
3123
|
+
}
|
3124
|
+
data.append(item)
|
3157
3125
|
|
3158
3126
|
# TODO(mafriedm): Fix this
|
3159
3127
|
ctx.obj["options"]["sort"] = False
|
@@ -3164,7 +3132,7 @@ def change_log_tracking(ctx: click.Context) -> None:
|
|
3164
3132
|
@root.group(name="set")
|
3165
3133
|
@output
|
3166
3134
|
@click.pass_context
|
3167
|
-
def set_command(ctx
|
3135
|
+
def set_command(ctx, output):
|
3168
3136
|
ctx.obj["output"] = output
|
3169
3137
|
|
3170
3138
|
|
@@ -3173,9 +3141,7 @@ def set_command(ctx: click.Context, output: str) -> None:
|
|
3173
3141
|
@click.argument("usergroup")
|
3174
3142
|
@click.argument("username")
|
3175
3143
|
@click.pass_context
|
3176
|
-
def slack_usergroup(
|
3177
|
-
ctx: click.Context, workspace: str, usergroup: str, username: str
|
3178
|
-
) -> None:
|
3144
|
+
def slack_usergroup(ctx, workspace, usergroup, username):
|
3179
3145
|
"""Update users in a slack usergroup.
|
3180
3146
|
Use an org_username as the username.
|
3181
3147
|
To empty a slack usergroup, pass '' (empty string) as the username.
|
@@ -3183,8 +3149,6 @@ def slack_usergroup(
|
|
3183
3149
|
settings = queries.get_app_interface_settings()
|
3184
3150
|
slack = slackapi_from_queries("qontract-cli")
|
3185
3151
|
ugid = slack.get_usergroup_id(usergroup)
|
3186
|
-
if not ugid:
|
3187
|
-
raise click.ClickException(f"Usergroup {usergroup} not found.")
|
3188
3152
|
if username:
|
3189
3153
|
mail_address = settings["smtp"]["mailAddress"]
|
3190
3154
|
users = [slack.get_user_id_by_name(username, mail_address)]
|
@@ -3193,17 +3157,33 @@ def slack_usergroup(
|
|
3193
3157
|
slack.update_usergroup_users(ugid, users)
|
3194
3158
|
|
3195
3159
|
|
3160
|
+
@set_command.command()
|
3161
|
+
@click.argument("org_name")
|
3162
|
+
@click.argument("cluster_name")
|
3163
|
+
@click.pass_context
|
3164
|
+
def cluster_admin(ctx, org_name, cluster_name):
|
3165
|
+
settings = queries.get_app_interface_settings()
|
3166
|
+
ocms = [
|
3167
|
+
o for o in queries.get_openshift_cluster_managers() if o["name"] == org_name
|
3168
|
+
]
|
3169
|
+
ocm_map = OCMMap(ocms=ocms, settings=settings)
|
3170
|
+
ocm = ocm_map[org_name]
|
3171
|
+
enabled = ocm.is_cluster_admin_enabled(cluster_name)
|
3172
|
+
if not enabled:
|
3173
|
+
ocm.enable_cluster_admin(cluster_name)
|
3174
|
+
|
3175
|
+
|
3196
3176
|
@root.group()
|
3197
3177
|
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
3198
3178
|
@click.pass_context
|
3199
|
-
def state(ctx
|
3179
|
+
def state(ctx):
|
3200
3180
|
pass
|
3201
3181
|
|
3202
3182
|
|
3203
3183
|
@state.command()
|
3204
3184
|
@click.argument("integration", default="")
|
3205
3185
|
@click.pass_context
|
3206
|
-
def ls(ctx
|
3186
|
+
def ls(ctx, integration):
|
3207
3187
|
state = init_state(integration=integration)
|
3208
3188
|
keys = state.ls()
|
3209
3189
|
# if integration in not defined the 2th token will be the integration name
|
@@ -3224,7 +3204,7 @@ def ls(ctx: click.Context, integration: str) -> None:
|
|
3224
3204
|
@click.argument("integration")
|
3225
3205
|
@click.argument("key")
|
3226
3206
|
@click.pass_context
|
3227
|
-
def state_get(ctx
|
3207
|
+
def state_get(ctx, integration, key):
|
3228
3208
|
state = init_state(integration=integration)
|
3229
3209
|
value = state.get(key)
|
3230
3210
|
print(value)
|
@@ -3234,7 +3214,7 @@ def state_get(ctx: click.Context, integration: str, key: str) -> None:
|
|
3234
3214
|
@click.argument("integration")
|
3235
3215
|
@click.argument("key")
|
3236
3216
|
@click.pass_context
|
3237
|
-
def add(ctx
|
3217
|
+
def add(ctx, integration, key):
|
3238
3218
|
state = init_state(integration=integration)
|
3239
3219
|
state.add(key)
|
3240
3220
|
|
@@ -3244,7 +3224,7 @@ def add(ctx: click.Context, integration: str, key: str) -> None:
|
|
3244
3224
|
@click.argument("key")
|
3245
3225
|
@click.argument("value")
|
3246
3226
|
@click.pass_context
|
3247
|
-
def state_set(ctx
|
3227
|
+
def state_set(ctx, integration, key, value):
|
3248
3228
|
state = init_state(integration=integration)
|
3249
3229
|
state.add(key, value=value, force=True)
|
3250
3230
|
|
@@ -3253,7 +3233,7 @@ def state_set(ctx: click.Context, integration: str, key: str, value: str) -> Non
|
|
3253
3233
|
@click.argument("integration")
|
3254
3234
|
@click.argument("key")
|
3255
3235
|
@click.pass_context
|
3256
|
-
def rm(ctx
|
3236
|
+
def rm(ctx, integration, key):
|
3257
3237
|
state = init_state(integration=integration)
|
3258
3238
|
state.rm(key)
|
3259
3239
|
|
@@ -3261,7 +3241,7 @@ def rm(ctx: click.Context, integration: str, key: str) -> None:
|
|
3261
3241
|
@root.group()
|
3262
3242
|
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
3263
3243
|
@click.pass_context
|
3264
|
-
def early_exit_cache(ctx
|
3244
|
+
def early_exit_cache(ctx):
|
3265
3245
|
pass
|
3266
3246
|
|
3267
3247
|
|
@@ -3297,13 +3277,13 @@ def early_exit_cache(ctx: click.Context) -> None:
|
|
3297
3277
|
)
|
3298
3278
|
@click.pass_context
|
3299
3279
|
def early_exit_cache_head(
|
3300
|
-
ctx
|
3301
|
-
integration
|
3302
|
-
integration_version
|
3303
|
-
dry_run
|
3304
|
-
cache_source
|
3305
|
-
shard
|
3306
|
-
)
|
3280
|
+
ctx,
|
3281
|
+
integration,
|
3282
|
+
integration_version,
|
3283
|
+
dry_run,
|
3284
|
+
cache_source,
|
3285
|
+
shard,
|
3286
|
+
):
|
3307
3287
|
with EarlyExitCache.build() as cache:
|
3308
3288
|
cache_key = CacheKey(
|
3309
3289
|
integration=integration,
|
@@ -3349,13 +3329,13 @@ def early_exit_cache_head(
|
|
3349
3329
|
)
|
3350
3330
|
@click.pass_context
|
3351
3331
|
def early_exit_cache_get(
|
3352
|
-
ctx
|
3353
|
-
integration
|
3354
|
-
integration_version
|
3355
|
-
dry_run
|
3356
|
-
cache_source
|
3357
|
-
shard
|
3358
|
-
)
|
3332
|
+
ctx,
|
3333
|
+
integration,
|
3334
|
+
integration_version,
|
3335
|
+
dry_run,
|
3336
|
+
cache_source,
|
3337
|
+
shard,
|
3338
|
+
):
|
3359
3339
|
with EarlyExitCache.build() as cache:
|
3360
3340
|
cache_key = CacheKey(
|
3361
3341
|
integration=integration,
|
@@ -3432,18 +3412,18 @@ def early_exit_cache_get(
|
|
3432
3412
|
)
|
3433
3413
|
@click.pass_context
|
3434
3414
|
def early_exit_cache_set(
|
3435
|
-
ctx
|
3436
|
-
integration
|
3437
|
-
integration_version
|
3438
|
-
dry_run
|
3439
|
-
cache_source
|
3440
|
-
shard
|
3441
|
-
payload
|
3442
|
-
log_output
|
3443
|
-
applied_count
|
3444
|
-
ttl
|
3445
|
-
latest_cache_source_digest
|
3446
|
-
)
|
3415
|
+
ctx,
|
3416
|
+
integration,
|
3417
|
+
integration_version,
|
3418
|
+
dry_run,
|
3419
|
+
cache_source,
|
3420
|
+
shard,
|
3421
|
+
payload,
|
3422
|
+
log_output,
|
3423
|
+
applied_count,
|
3424
|
+
ttl,
|
3425
|
+
latest_cache_source_digest,
|
3426
|
+
):
|
3447
3427
|
with EarlyExitCache.build() as cache:
|
3448
3428
|
cache_key = CacheKey(
|
3449
3429
|
integration=integration,
|
@@ -3492,13 +3472,13 @@ def early_exit_cache_set(
|
|
3492
3472
|
)
|
3493
3473
|
@click.pass_context
|
3494
3474
|
def early_exit_cache_delete(
|
3495
|
-
ctx
|
3496
|
-
integration
|
3497
|
-
integration_version
|
3498
|
-
dry_run
|
3499
|
-
cache_source_digest
|
3500
|
-
shard
|
3501
|
-
)
|
3475
|
+
ctx,
|
3476
|
+
integration,
|
3477
|
+
integration_version,
|
3478
|
+
dry_run,
|
3479
|
+
cache_source_digest,
|
3480
|
+
shard,
|
3481
|
+
):
|
3502
3482
|
with EarlyExitCache.build() as cache:
|
3503
3483
|
cache_key_with_digest = CacheKeyWithDigest(
|
3504
3484
|
integration=integration,
|
@@ -3529,33 +3509,25 @@ def early_exit_cache_delete(
|
|
3529
3509
|
type=click.Choice(["config", "vault"]),
|
3530
3510
|
)
|
3531
3511
|
@click.pass_context
|
3532
|
-
def template(
|
3533
|
-
ctx: click.Context,
|
3534
|
-
cluster: str,
|
3535
|
-
namespace: str,
|
3536
|
-
kind: str,
|
3537
|
-
name: str,
|
3538
|
-
path: str,
|
3539
|
-
secret_reader: str,
|
3540
|
-
) -> None:
|
3512
|
+
def template(ctx, cluster, namespace, kind, name, path, secret_reader):
|
3541
3513
|
gqlapi = gql.get_api()
|
3542
3514
|
namespaces = gqlapi.query(orb.NAMESPACES_QUERY)["namespaces"]
|
3543
|
-
|
3515
|
+
namespace_info = [
|
3544
3516
|
n
|
3545
3517
|
for n in namespaces
|
3546
3518
|
if n["cluster"]["name"] == cluster and n["name"] == namespace
|
3547
3519
|
]
|
3548
|
-
if len(
|
3520
|
+
if len(namespace_info) != 1:
|
3549
3521
|
print(f"{cluster}/{namespace} error")
|
3550
3522
|
sys.exit(1)
|
3551
3523
|
|
3552
|
-
namespace_info = namespaces_info[0]
|
3553
3524
|
settings = queries.get_app_interface_settings()
|
3554
3525
|
settings["vault"] = secret_reader == "vault"
|
3555
3526
|
|
3556
3527
|
if path and path.startswith("resources"):
|
3557
3528
|
path = path.replace("resources", "", 1)
|
3558
3529
|
|
3530
|
+
[namespace_info] = namespace_info
|
3559
3531
|
ob.aggregate_shared_resources(namespace_info, "openshiftResources")
|
3560
3532
|
openshift_resources = namespace_info.get("openshiftResources")
|
3561
3533
|
for r in openshift_resources:
|
@@ -3596,9 +3568,7 @@ def template(
|
|
3596
3568
|
type=click.Choice(["config", "vault"]),
|
3597
3569
|
)
|
3598
3570
|
@click.pass_context
|
3599
|
-
def run_prometheus_test(
|
3600
|
-
ctx: click.Context, path: str, cluster: str, namespace: str, secret_reader: str
|
3601
|
-
) -> None:
|
3571
|
+
def run_prometheus_test(ctx, path, cluster, namespace, secret_reader):
|
3602
3572
|
"""Run prometheus tests for the rule associated with the test in the PATH from given
|
3603
3573
|
CLUSTER/NAMESPACE"""
|
3604
3574
|
|
@@ -3684,17 +3654,17 @@ def run_prometheus_test(
|
|
3684
3654
|
)
|
3685
3655
|
@click.pass_context
|
3686
3656
|
def alert_to_receiver(
|
3687
|
-
ctx
|
3688
|
-
cluster
|
3689
|
-
namespace
|
3690
|
-
rules_path
|
3691
|
-
alert_name
|
3692
|
-
alertmanager_secret_path
|
3693
|
-
alertmanager_namespace
|
3694
|
-
alertmanager_secret_key
|
3695
|
-
secret_reader
|
3696
|
-
additional_label
|
3697
|
-
)
|
3657
|
+
ctx,
|
3658
|
+
cluster,
|
3659
|
+
namespace,
|
3660
|
+
rules_path,
|
3661
|
+
alert_name,
|
3662
|
+
alertmanager_secret_path,
|
3663
|
+
alertmanager_namespace,
|
3664
|
+
alertmanager_secret_key,
|
3665
|
+
secret_reader,
|
3666
|
+
additional_label,
|
3667
|
+
):
|
3698
3668
|
additional_labels = {}
|
3699
3669
|
for al in additional_label:
|
3700
3670
|
try:
|
@@ -3786,12 +3756,12 @@ def alert_to_receiver(
|
|
3786
3756
|
print(f"Cannot find alert {alert_name} in rules {rules_path}")
|
3787
3757
|
sys.exit(1)
|
3788
3758
|
|
3789
|
-
for
|
3790
|
-
result = amtool.config_routes_test(am_config,
|
3759
|
+
for al in alert_labels:
|
3760
|
+
result = amtool.config_routes_test(am_config, al)
|
3791
3761
|
if not result:
|
3792
3762
|
print(f"Error running amtool: {result}")
|
3793
3763
|
sys.exit(1)
|
3794
|
-
print("|".join([
|
3764
|
+
print("|".join([al["alertname"], str(result)]))
|
3795
3765
|
|
3796
3766
|
|
3797
3767
|
@root.command()
|
@@ -3799,12 +3769,7 @@ def alert_to_receiver(
|
|
3799
3769
|
@click.option("--saas-file-name", default=None, help="saas-file to act on.")
|
3800
3770
|
@click.option("--env-name", default=None, help="environment to use for parameters.")
|
3801
3771
|
@click.pass_context
|
3802
|
-
def saas_dev(
|
3803
|
-
ctx: click.Context,
|
3804
|
-
app_name: str | None = None,
|
3805
|
-
saas_file_name: str | None = None,
|
3806
|
-
env_name: str | None = None,
|
3807
|
-
) -> None:
|
3772
|
+
def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
|
3808
3773
|
if not env_name:
|
3809
3774
|
print("env-name must be defined")
|
3810
3775
|
return
|
@@ -3852,7 +3817,7 @@ def saas_dev(
|
|
3852
3817
|
@click.option("--app-name", default=None, help="app to act on.")
|
3853
3818
|
@click.pass_context
|
3854
3819
|
def saas_targets(
|
3855
|
-
ctx
|
3820
|
+
ctx, saas_file_name: str | None = None, app_name: str | None = None
|
3856
3821
|
) -> None:
|
3857
3822
|
"""Resolve namespaceSelectors and print all resulting targets of a saas file."""
|
3858
3823
|
console = Console()
|
@@ -3916,7 +3881,7 @@ def saas_targets(
|
|
3916
3881
|
default="json",
|
3917
3882
|
type=click.Choice(["json", "yaml"]),
|
3918
3883
|
)
|
3919
|
-
def query(output
|
3884
|
+
def query(output, query):
|
3920
3885
|
"""Run a raw GraphQL query"""
|
3921
3886
|
gqlapi = gql.get_api()
|
3922
3887
|
result = gqlapi.query(query)
|
@@ -3930,7 +3895,7 @@ def query(output: str, query: str) -> None:
|
|
3930
3895
|
@root.command()
|
3931
3896
|
@click.argument("cluster")
|
3932
3897
|
@click.argument("query")
|
3933
|
-
def promquery(cluster
|
3898
|
+
def promquery(cluster, query):
|
3934
3899
|
"""Run a PromQL query"""
|
3935
3900
|
config_data = config.get_config()
|
3936
3901
|
auth = {"path": config_data["promql-auth"]["secret_path"], "field": "token"}
|
@@ -3981,13 +3946,8 @@ def promquery(cluster: str, query: str) -> None:
|
|
3981
3946
|
default=False,
|
3982
3947
|
)
|
3983
3948
|
def sre_checkpoint_metadata(
|
3984
|
-
app_path
|
3985
|
-
|
3986
|
-
jiraboard: str,
|
3987
|
-
jiradef: str,
|
3988
|
-
create_parent_ticket: bool,
|
3989
|
-
dry_run: bool,
|
3990
|
-
) -> None:
|
3949
|
+
app_path, parent_ticket, jiraboard, jiradef, create_parent_ticket, dry_run
|
3950
|
+
):
|
3991
3951
|
"""Check an app path for checkpoint-related metadata."""
|
3992
3952
|
data = queries.get_app_metadata(app_path)
|
3993
3953
|
settings = queries.get_app_interface_settings()
|
@@ -4026,13 +3986,8 @@ def sre_checkpoint_metadata(
|
|
4026
3986
|
required=True,
|
4027
3987
|
)
|
4028
3988
|
def gpg_encrypt(
|
4029
|
-
vault_path
|
4030
|
-
|
4031
|
-
file_path: str,
|
4032
|
-
openshift_path: str,
|
4033
|
-
output: str,
|
4034
|
-
for_user: str,
|
4035
|
-
) -> None:
|
3989
|
+
vault_path, vault_secret_version, file_path, openshift_path, output, for_user
|
3990
|
+
):
|
4036
3991
|
"""
|
4037
3992
|
Encrypt the specified secret (local file, vault or openshift) with a
|
4038
3993
|
given users gpg key. This is intended for easily sharing secrets with
|
@@ -4055,7 +4010,7 @@ def gpg_encrypt(
|
|
4055
4010
|
@click.option("--channel", help="the channel that state is part of")
|
4056
4011
|
@click.option("--sha", help="the commit sha we want state for")
|
4057
4012
|
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
4058
|
-
def get_promotion_state(channel: str, sha: str)
|
4013
|
+
def get_promotion_state(channel: str, sha: str):
|
4059
4014
|
from tools.saas_promotion_state.saas_promotion_state import (
|
4060
4015
|
SaasPromotionState,
|
4061
4016
|
)
|
@@ -4080,7 +4035,7 @@ def get_promotion_state(channel: str, sha: str) -> None:
|
|
4080
4035
|
@click.option("--sha", help="the commit sha we want state for")
|
4081
4036
|
@click.option("--publisher-id", help="the publisher id we want state for")
|
4082
4037
|
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
4083
|
-
def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str)
|
4038
|
+
def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str):
|
4084
4039
|
from tools.saas_promotion_state.saas_promotion_state import (
|
4085
4040
|
SaasPromotionState,
|
4086
4041
|
)
|
@@ -4104,9 +4059,7 @@ def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str) -
|
|
4104
4059
|
help="filesystem path to a local app-interface repo",
|
4105
4060
|
default=os.environ.get("APP_INTERFACE_PATH", None),
|
4106
4061
|
)
|
4107
|
-
def test_change_type(
|
4108
|
-
change_type_name: str, role_name: str, app_interface_path: str
|
4109
|
-
) -> None:
|
4062
|
+
def test_change_type(change_type_name: str, role_name: str, app_interface_path: str):
|
4110
4063
|
from reconcile.change_owners import tester
|
4111
4064
|
|
4112
4065
|
# tester.test_change_type(change_type_name, datafile_path)
|
@@ -4115,7 +4068,7 @@ def test_change_type(
|
|
4115
4068
|
|
4116
4069
|
@root.group()
|
4117
4070
|
@click.pass_context
|
4118
|
-
def sso_client(ctx
|
4071
|
+
def sso_client(ctx):
|
4119
4072
|
"""SSO client commands"""
|
4120
4073
|
|
4121
4074
|
|
@@ -4151,7 +4104,7 @@ def sso_client(ctx: click.Context) -> None:
|
|
4151
4104
|
)
|
4152
4105
|
@click.pass_context
|
4153
4106
|
def create(
|
4154
|
-
ctx
|
4107
|
+
ctx,
|
4155
4108
|
client_name: str,
|
4156
4109
|
contact_email: str,
|
4157
4110
|
keycloak_instance_vault_path: str,
|
@@ -4185,7 +4138,7 @@ def create(
|
|
4185
4138
|
@sso_client.command()
|
4186
4139
|
@click.argument("sso-client-vault-secret-path", required=True)
|
4187
4140
|
@click.pass_context
|
4188
|
-
def remove(ctx
|
4141
|
+
def remove(ctx, sso_client_vault_secret_path: str):
|
4189
4142
|
"""Remove an existing SSO client"""
|
4190
4143
|
vault_settings = get_app_interface_vault_settings()
|
4191
4144
|
secret_reader = create_secret_reader(use_vault=vault_settings.vault)
|
@@ -4232,12 +4185,8 @@ def remove(ctx: click.Context, sso_client_vault_secret_path: str) -> None:
|
|
4232
4185
|
)
|
4233
4186
|
@click.pass_context
|
4234
4187
|
def external_resources(
|
4235
|
-
ctx:
|
4236
|
-
|
4237
|
-
provisioner: str,
|
4238
|
-
provider: str,
|
4239
|
-
identifier: str,
|
4240
|
-
) -> None:
|
4188
|
+
ctx, provision_provider: str, provisioner: str, provider: str, identifier: str
|
4189
|
+
):
|
4241
4190
|
"""External resources commands"""
|
4242
4191
|
ctx.obj["provision_provider"] = provision_provider
|
4243
4192
|
ctx.obj["provisioner"] = provisioner
|
@@ -4249,7 +4198,7 @@ def external_resources(
|
|
4249
4198
|
|
4250
4199
|
@external_resources.command()
|
4251
4200
|
@click.pass_context
|
4252
|
-
def get_input(ctx
|
4201
|
+
def get_input(ctx):
|
4253
4202
|
"""Gets the input data for an external resource asset. Input data is what is used
|
4254
4203
|
in the Reconciliation Job to manage the resource."""
|
4255
4204
|
erv2cli = Erv2Cli(
|
@@ -4264,7 +4213,7 @@ def get_input(ctx: click.Context) -> None:
|
|
4264
4213
|
|
4265
4214
|
@external_resources.command()
|
4266
4215
|
@click.pass_context
|
4267
|
-
def request_reconciliation(ctx
|
4216
|
+
def request_reconciliation(ctx):
|
4268
4217
|
"""Marks a resource as it needs to get reconciled. The itegration will reconcile the resource at
|
4269
4218
|
its next iteration."""
|
4270
4219
|
erv2cli = Erv2Cli(
|
@@ -4291,7 +4240,7 @@ def request_reconciliation(ctx: click.Context) -> None:
|
|
4291
4240
|
default=False,
|
4292
4241
|
)
|
4293
4242
|
@click.pass_context
|
4294
|
-
def migrate(ctx
|
4243
|
+
def migrate(ctx, dry_run: bool, skip_build: bool) -> None:
|
4295
4244
|
"""Migrate an existing external resource managed by terraform-resources to ERv2.
|
4296
4245
|
|
4297
4246
|
|
@@ -4397,7 +4346,7 @@ def migrate(ctx: click.Context, dry_run: bool, skip_build: bool) -> None:
|
|
4397
4346
|
@external_resources.command()
|
4398
4347
|
@binary(["docker"])
|
4399
4348
|
@click.pass_context
|
4400
|
-
def debug_shell(ctx
|
4349
|
+
def debug_shell(ctx) -> None:
|
4401
4350
|
"""Enter an ERv2 debug shell to manually migrate resources."""
|
4402
4351
|
# use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
|
4403
4352
|
with tempfile.TemporaryDirectory(dir=Path.home(), prefix="erv2-debug.") as _tempdir:
|
@@ -4436,7 +4385,7 @@ def debug_shell(ctx: click.Context) -> None:
|
|
4436
4385
|
prompt=True,
|
4437
4386
|
)
|
4438
4387
|
@click.pass_context
|
4439
|
-
def force_unlock(ctx
|
4388
|
+
def force_unlock(ctx, lock_id: str) -> None:
|
4440
4389
|
"""Manually unlock the ERv2 terraform state."""
|
4441
4390
|
# use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
|
4442
4391
|
with tempfile.TemporaryDirectory(
|
@@ -4477,14 +4426,14 @@ def force_unlock(ctx: click.Context, lock_id: str) -> None:
|
|
4477
4426
|
@click.option("--include-pattern", help="Only include images that match this pattern")
|
4478
4427
|
@click.pass_context
|
4479
4428
|
def container_images(
|
4480
|
-
ctx
|
4481
|
-
cluster_name
|
4482
|
-
namespace_name
|
4483
|
-
thread_pool_size
|
4484
|
-
use_jump_host
|
4485
|
-
exclude_pattern
|
4486
|
-
include_pattern
|
4487
|
-
)
|
4429
|
+
ctx,
|
4430
|
+
cluster_name,
|
4431
|
+
namespace_name,
|
4432
|
+
thread_pool_size,
|
4433
|
+
use_jump_host,
|
4434
|
+
exclude_pattern,
|
4435
|
+
include_pattern,
|
4436
|
+
):
|
4488
4437
|
from tools.cli_commands.container_images_report import get_all_pods_images
|
4489
4438
|
|
4490
4439
|
results = get_all_pods_images(
|
@@ -4531,7 +4480,7 @@ You can view the source of this Markdown to extract the JSON data.
|
|
4531
4480
|
@get.command(help="Get all app tekton pipelines providers roles and users")
|
4532
4481
|
@click.argument("app-name")
|
4533
4482
|
@click.pass_context
|
4534
|
-
def tekton_roles_and_users(ctx
|
4483
|
+
def tekton_roles_and_users(ctx, app_name):
|
4535
4484
|
pp_namespaces = {
|
4536
4485
|
p.namespace.path
|
4537
4486
|
for p in get_tekton_pipeline_providers()
|
@@ -4558,7 +4507,6 @@ def tekton_roles_and_users(ctx: click.Context, app_name: str) -> None:
|
|
4558
4507
|
if not seen:
|
4559
4508
|
seen = True
|
4560
4509
|
|
4561
|
-
users: str | list[str]
|
4562
4510
|
if ctx.obj["options"]["output"] == "table":
|
4563
4511
|
users = ", ".join([u.org_username for u in r.users])
|
4564
4512
|
else:
|
@@ -4578,7 +4526,7 @@ def tekton_roles_and_users(ctx: click.Context, app_name: str) -> None:
|
|
4578
4526
|
)
|
4579
4527
|
@click.argument("aws-account")
|
4580
4528
|
@click.pass_context
|
4581
|
-
def log_group_usage(ctx
|
4529
|
+
def log_group_usage(ctx, aws_account):
|
4582
4530
|
accounts = queries.get_aws_accounts(name=aws_account)
|
4583
4531
|
if not accounts:
|
4584
4532
|
print("no aws account found with that name")
|
@@ -4588,7 +4536,7 @@ def log_group_usage(ctx: click.Context, aws_account: str) -> None:
|
|
4588
4536
|
settings = queries.get_app_interface_settings()
|
4589
4537
|
secret_reader = SecretReader(settings=settings)
|
4590
4538
|
columns = ["log_group", "stored_bytes", "retention_days"]
|
4591
|
-
results
|
4539
|
+
results = []
|
4592
4540
|
|
4593
4541
|
with AWSApi(1, [account], settings, secret_reader) as aws:
|
4594
4542
|
session = aws.get_session(account["name"])
|