qontract-reconcile 0.10.2.dev50__py3-none-any.whl → 0.10.2.dev52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qontract-reconcile might be problematic. Click here for more details.

tools/qontract_cli.py CHANGED
@@ -10,6 +10,7 @@ import sys
10
10
  import tempfile
11
11
  import textwrap
12
12
  from collections import defaultdict
13
+ from collections.abc import Callable, Mapping
13
14
  from datetime import (
14
15
  UTC,
15
16
  datetime,
@@ -19,7 +20,7 @@ from operator import itemgetter
19
20
  from pathlib import Path
20
21
  from statistics import median
21
22
  from textwrap import dedent
22
- from typing import Any
23
+ from typing import TYPE_CHECKING, Any, cast
23
24
 
24
25
  import boto3
25
26
  import click
@@ -54,7 +55,6 @@ from reconcile.change_owners.bundle import NoOpFileDiffResolver
54
55
  from reconcile.change_owners.change_log_tracking import (
55
56
  BUNDLE_DIFFS_OBJ,
56
57
  ChangeLog,
57
- ChangeLogItem,
58
58
  )
59
59
  from reconcile.change_owners.change_owners import (
60
60
  fetch_change_type_processors,
@@ -81,6 +81,7 @@ from reconcile.gql_definitions.app_sre_tekton_access_revalidation.roles import (
81
81
  from reconcile.gql_definitions.common.app_interface_vault_settings import (
82
82
  AppInterfaceSettingsV1,
83
83
  )
84
+ from reconcile.gql_definitions.common.clusters import ClusterSpecROSAV1
84
85
  from reconcile.gql_definitions.fragments.aus_organization import AUSOCMOrganization
85
86
  from reconcile.gql_definitions.integrations import integrations as integrations_gql
86
87
  from reconcile.gql_definitions.maintenance import maintenances as maintenances_gql
@@ -152,6 +153,7 @@ from reconcile.utils.oc_map import (
152
153
  init_oc_map_from_clusters,
153
154
  )
154
155
  from reconcile.utils.ocm import OCM_PRODUCT_ROSA, OCMMap
156
+ from reconcile.utils.ocm.upgrades import get_upgrade_policies
155
157
  from reconcile.utils.ocm_base_client import init_ocm_base_client
156
158
  from reconcile.utils.output import print_output
157
159
  from reconcile.utils.saasherder.models import TargetSpec
@@ -184,8 +186,13 @@ from tools.sre_checkpoints import (
184
186
  get_latest_sre_checkpoints,
185
187
  )
186
188
 
189
+ if TYPE_CHECKING:
190
+ from mypy_boto3_s3.type_defs import CopySourceTypeDef
191
+ else:
192
+ CopySourceTypeDef = object
187
193
 
188
- def output(function):
194
+
195
+ def output(function: Callable) -> Callable:
189
196
  function = click.option(
190
197
  "--output",
191
198
  "-o",
@@ -196,14 +203,14 @@ def output(function):
196
203
  return function
197
204
 
198
205
 
199
- def sort(function):
206
+ def sort(function: Callable) -> Callable:
200
207
  function = click.option(
201
208
  "--sort", "-s", help="sort output", default=True, type=bool
202
209
  )(function)
203
210
  return function
204
211
 
205
212
 
206
- def to_string(function):
213
+ def to_string(function: Callable) -> Callable:
207
214
  function = click.option(
208
215
  "--to-string", help="stringify output", default=False, type=bool
209
216
  )(function)
@@ -213,14 +220,14 @@ def to_string(function):
213
220
  @click.group()
214
221
  @config_file
215
222
  @click.pass_context
216
- def root(ctx, configfile):
223
+ def root(ctx: click.Context, configfile: str) -> None:
217
224
  ctx.ensure_object(dict)
218
225
  config.init_from_toml(configfile)
219
226
  gql.init_from_config()
220
227
 
221
228
 
222
229
  @root.result_callback()
223
- def exit_cli(ctx, configfile):
230
+ def exit_cli(ctx: click.Context, configfile: str) -> None:
224
231
  GqlApiSingleton.close()
225
232
 
226
233
 
@@ -229,7 +236,7 @@ def exit_cli(ctx, configfile):
229
236
  @sort
230
237
  @to_string
231
238
  @click.pass_context
232
- def get(ctx, output, sort, to_string):
239
+ def get(ctx: click.Context, output: str, sort: bool, to_string: bool) -> None:
233
240
  ctx.obj["options"] = {
234
241
  "output": output,
235
242
  "sort": sort,
@@ -240,7 +247,7 @@ def get(ctx, output, sort, to_string):
240
247
  @root.group()
241
248
  @output
242
249
  @click.pass_context
243
- def describe(ctx, output):
250
+ def describe(ctx: click.Context, output: str) -> None:
244
251
  ctx.obj["options"] = {
245
252
  "output": output,
246
253
  }
@@ -248,7 +255,7 @@ def describe(ctx, output):
248
255
 
249
256
  @get.command()
250
257
  @click.pass_context
251
- def settings(ctx):
258
+ def settings(ctx: click.Context) -> None:
252
259
  settings = queries.get_app_interface_settings()
253
260
  columns = ["vault", "kubeBinary", "mergeRequestGateway"]
254
261
  print_output(ctx.obj["options"], [settings], columns)
@@ -257,7 +264,7 @@ def settings(ctx):
257
264
  @get.command()
258
265
  @click.argument("name", default="")
259
266
  @click.pass_context
260
- def aws_accounts(ctx, name):
267
+ def aws_accounts(ctx: click.Context, name: str) -> None:
261
268
  accounts = queries.get_aws_accounts(name=name)
262
269
  if not accounts:
263
270
  print("no aws accounts found")
@@ -269,7 +276,7 @@ def aws_accounts(ctx, name):
269
276
  @get.command()
270
277
  @click.argument("name", default="")
271
278
  @click.pass_context
272
- def clusters(ctx, name):
279
+ def clusters(ctx: click.Context, name: str) -> None:
273
280
  clusters = queries.get_clusters()
274
281
  if name:
275
282
  clusters = [c for c in clusters if c["name"] == name]
@@ -286,7 +293,7 @@ def clusters(ctx, name):
286
293
  @get.command()
287
294
  @click.argument("name", default="")
288
295
  @click.pass_context
289
- def cluster_upgrades(ctx, name):
296
+ def cluster_upgrades(ctx: click.Context, name: str) -> None:
290
297
  settings = queries.get_app_interface_settings()
291
298
 
292
299
  clusters = queries.get_clusters()
@@ -317,12 +324,11 @@ def cluster_upgrades(ctx, name):
317
324
  if data.get("upgradePolicy") == "automatic":
318
325
  data["schedule"] = c["upgradePolicy"]["schedule"]
319
326
  ocm = ocm_map.get(c["name"])
320
- if ocm:
321
- upgrade_policy = ocm.get_upgrade_policies(c["name"])
322
- if upgrade_policy and len(upgrade_policy) > 0:
323
- next_run = upgrade_policy[0].get("next_run")
324
- if next_run:
325
- data["next_run"] = next_run
327
+ upgrade_policy = get_upgrade_policies(ocm.ocm_api, c["spec"]["id"])
328
+ if upgrade_policy and len(upgrade_policy) > 0:
329
+ next_run = upgrade_policy[0].get("next_run")
330
+ if next_run:
331
+ data["next_run"] = next_run
326
332
  else:
327
333
  data["upgradePolicy"] = "manual"
328
334
 
@@ -336,7 +342,7 @@ def cluster_upgrades(ctx, name):
336
342
  @get.command()
337
343
  @environ(["APP_INTERFACE_STATE_BUCKET", "APP_INTERFACE_STATE_BUCKET_ACCOUNT"])
338
344
  @click.pass_context
339
- def version_history(ctx):
345
+ def version_history(ctx: click.Context) -> None:
340
346
  import reconcile.aus.ocm_upgrade_scheduler as ous
341
347
 
342
348
  clusters = aus_clusters_query(query_func=gql.get_api().query).clusters or []
@@ -372,11 +378,11 @@ def version_history(ctx):
372
378
 
373
379
  def get_upgrade_policies_data(
374
380
  org_upgrade_specs: list[OrganizationUpgradeSpec],
375
- md_output,
376
- integration,
377
- workload=None,
378
- show_only_soaking_upgrades=False,
379
- by_workload=False,
381
+ md_output: bool,
382
+ integration: str,
383
+ workload: str | None = None,
384
+ show_only_soaking_upgrades: bool = False,
385
+ by_workload: bool = False,
380
386
  ) -> list:
381
387
  if not org_upgrade_specs:
382
388
  return []
@@ -557,12 +563,12 @@ more than 6 hours will be highlighted.
557
563
  )
558
564
  @click.pass_context
559
565
  def cluster_upgrade_policies(
560
- ctx,
561
- cluster=None,
562
- workload=None,
563
- show_only_soaking_upgrades=False,
564
- by_workload=False,
565
- ):
566
+ ctx: click.Context,
567
+ cluster: str | None = None,
568
+ workload: str | None = None,
569
+ show_only_soaking_upgrades: bool = False,
570
+ by_workload: bool = False,
571
+ ) -> None:
566
572
  print(
567
573
  "https://grafana.app-sre.devshift.net/d/ukLXCSwVz/aus-cluster-upgrade-overview"
568
574
  )
@@ -577,9 +583,7 @@ def inherit_version_data_text(org: AUSOCMOrganization) -> str:
577
583
 
578
584
  @get.command()
579
585
  @click.pass_context
580
- def ocm_fleet_upgrade_policies(
581
- ctx,
582
- ):
586
+ def ocm_fleet_upgrade_policies(ctx: click.Context) -> None:
583
587
  from reconcile.aus.ocm_upgrade_scheduler_org import (
584
588
  OCMClusterUpgradeSchedulerOrgIntegration,
585
589
  )
@@ -612,7 +616,12 @@ def ocm_fleet_upgrade_policies(
612
616
  help="Ignore STS clusters",
613
617
  )
614
618
  @click.pass_context
615
- def aus_fleet_upgrade_policies(ctx, ocm_env, ocm_org_ids, ignore_sts_clusters):
619
+ def aus_fleet_upgrade_policies(
620
+ ctx: click.Context,
621
+ ocm_env: str | None,
622
+ ocm_org_ids: str | None,
623
+ ignore_sts_clusters: bool,
624
+ ) -> None:
616
625
  from reconcile.aus.advanced_upgrade_service import AdvancedUpgradeServiceIntegration
617
626
 
618
627
  parsed_ocm_org_ids = set(ocm_org_ids.split(",")) if ocm_org_ids else None
@@ -629,8 +638,8 @@ def aus_fleet_upgrade_policies(ctx, ocm_env, ocm_org_ids, ignore_sts_clusters):
629
638
 
630
639
 
631
640
  def generate_fleet_upgrade_policices_report(
632
- ctx, aus_integration: AdvancedUpgradeSchedulerBaseIntegration
633
- ):
641
+ ctx: click.Context, aus_integration: AdvancedUpgradeSchedulerBaseIntegration
642
+ ) -> None:
634
643
  md_output = ctx.obj["options"]["output"] == "md"
635
644
 
636
645
  org_upgrade_specs: dict[str, OrganizationUpgradeSpec] = {}
@@ -948,7 +957,7 @@ def upgrade_cluster_addon(
948
957
  )
949
958
 
950
959
 
951
- def has_cluster_account_access(cluster: dict[str, Any]):
960
+ def has_cluster_account_access(cluster: dict[str, Any]) -> bool:
952
961
  spec = cluster.get("spec") or {}
953
962
  account = spec.get("account")
954
963
  return account or cluster.get("awsInfrastructureManagementAccounts") is not None
@@ -957,7 +966,7 @@ def has_cluster_account_access(cluster: dict[str, Any]):
957
966
  @get.command()
958
967
  @click.argument("name", default="")
959
968
  @click.pass_context
960
- def clusters_network(ctx, name):
969
+ def clusters_network(ctx: click.Context, name: str) -> None:
961
970
  settings = queries.get_app_interface_settings()
962
971
  clusters = [
963
972
  c
@@ -1007,6 +1016,7 @@ def clusters_network(ctx, name):
1007
1016
  ]
1008
1017
  with AWSApi(1, [account], settings=settings, init_users=False) as aws_api:
1009
1018
  vpc_id, _, _, _ = aws_api.get_cluster_vpc_details(account)
1019
+ assert vpc_id
1010
1020
  cluster["vpc_id"] = vpc_id
1011
1021
  egress_ips = aws_api.get_cluster_nat_gateways_egress_ips(account, vpc_id)
1012
1022
  cluster["egress_ips"] = ", ".join(sorted(egress_ips))
@@ -1019,7 +1029,7 @@ def clusters_network(ctx, name):
1019
1029
 
1020
1030
  @get.command()
1021
1031
  @click.pass_context
1022
- def network_reservations(ctx) -> None:
1032
+ def network_reservations(ctx: click.Context) -> None:
1023
1033
  from reconcile.typed_queries.reserved_networks import get_networks
1024
1034
 
1025
1035
  columns = [
@@ -1032,11 +1042,10 @@ def network_reservations(ctx) -> None:
1032
1042
  ]
1033
1043
  network_table = []
1034
1044
 
1035
- def md_link(url) -> str:
1045
+ def md_link(url: str) -> str:
1036
1046
  if ctx.obj["options"]["output"] == "md":
1037
1047
  return f"[{url}]({url})"
1038
- else:
1039
- return url
1048
+ return url
1040
1049
 
1041
1050
  for network in get_networks():
1042
1051
  parentAddress = "none"
@@ -1077,7 +1086,7 @@ def network_reservations(ctx) -> None:
1077
1086
  default=24,
1078
1087
  )
1079
1088
  @click.pass_context
1080
- def cidr_blocks(ctx, for_cluster: int, mask: int) -> None:
1089
+ def cidr_blocks(ctx: click.Context, for_cluster: int, mask: int) -> None:
1081
1090
  import ipaddress
1082
1091
 
1083
1092
  from reconcile.typed_queries.aws_vpcs import get_aws_vpcs
@@ -1203,7 +1212,7 @@ def ocm_aws_infrastructure_access_switch_role_links_data() -> list[dict]:
1203
1212
 
1204
1213
  @get.command()
1205
1214
  @click.pass_context
1206
- def ocm_aws_infrastructure_access_switch_role_links_flat(ctx):
1215
+ def ocm_aws_infrastructure_access_switch_role_links_flat(ctx: click.Context) -> None:
1207
1216
  results = ocm_aws_infrastructure_access_switch_role_links_data()
1208
1217
  columns = ["cluster", "user_arn", "access_level", "switch_role_link"]
1209
1218
  print_output(ctx.obj["options"], results, columns)
@@ -1211,11 +1220,11 @@ def ocm_aws_infrastructure_access_switch_role_links_flat(ctx):
1211
1220
 
1212
1221
  @get.command()
1213
1222
  @click.pass_context
1214
- def ocm_aws_infrastructure_access_switch_role_links(ctx):
1223
+ def ocm_aws_infrastructure_access_switch_role_links(ctx: click.Context) -> None:
1215
1224
  if ctx.obj["options"]["output"] != "md":
1216
1225
  raise Exception(f"Unupported output: {ctx.obj['options']['output']}")
1217
1226
  results = ocm_aws_infrastructure_access_switch_role_links_data()
1218
- by_user = {}
1227
+ by_user: dict = {}
1219
1228
  for r in results:
1220
1229
  by_user.setdefault(r["user"], []).append(r)
1221
1230
  columns = ["cluster", "source_login", "access_level", "switch_role_link"]
@@ -1229,7 +1238,7 @@ def ocm_aws_infrastructure_access_switch_role_links(ctx):
1229
1238
 
1230
1239
  @get.command()
1231
1240
  @click.pass_context
1232
- def clusters_aws_account_ids(ctx):
1241
+ def clusters_aws_account_ids(ctx: click.Context) -> None:
1233
1242
  settings = queries.get_app_interface_settings()
1234
1243
  clusters = [c for c in queries.get_clusters() if c.get("ocm") is not None]
1235
1244
  ocm_map = OCMMap(clusters=clusters, settings=settings)
@@ -1259,7 +1268,7 @@ def clusters_aws_account_ids(ctx):
1259
1268
  @root.command()
1260
1269
  @click.argument("account_name")
1261
1270
  @click.pass_context
1262
- def user_credentials_migrate_output(ctx, account_name) -> None:
1271
+ def user_credentials_migrate_output(ctx: click.Context, account_name: str) -> None:
1263
1272
  accounts = queries.get_state_aws_accounts()
1264
1273
  state = init_state(integration="account-notifier")
1265
1274
  skip_accounts, appsre_pgp_key, _ = tfu.get_reencrypt_settings()
@@ -1301,7 +1310,7 @@ def user_credentials_migrate_output(ctx, account_name) -> None:
1301
1310
 
1302
1311
  @get.command()
1303
1312
  @click.pass_context
1304
- def aws_route53_zones(ctx):
1313
+ def aws_route53_zones(ctx: click.Context) -> None:
1305
1314
  zones = queries.get_dns_zones()
1306
1315
 
1307
1316
  results = []
@@ -1324,7 +1333,7 @@ def aws_route53_zones(ctx):
1324
1333
  @click.argument("cluster_name")
1325
1334
  @click.option("--cluster-admin/--no-cluster-admin", default=False)
1326
1335
  @click.pass_context
1327
- def bot_login(ctx, cluster_name, cluster_admin):
1336
+ def bot_login(ctx: click.Context, cluster_name: str, cluster_admin: bool) -> None:
1328
1337
  settings = queries.get_app_interface_settings()
1329
1338
  secret_reader = SecretReader(settings=settings)
1330
1339
  clusters = queries.get_clusters()
@@ -1347,7 +1356,7 @@ def bot_login(ctx, cluster_name, cluster_admin):
1347
1356
  )
1348
1357
  @click.argument("org_name")
1349
1358
  @click.pass_context
1350
- def ocm_login(ctx, org_name):
1359
+ def ocm_login(ctx: click.Context, org_name: str) -> None:
1351
1360
  settings = queries.get_app_interface_settings()
1352
1361
  secret_reader = SecretReader(settings=settings)
1353
1362
  ocms = [
@@ -1374,7 +1383,7 @@ def ocm_login(ctx, org_name):
1374
1383
  )
1375
1384
  @click.argument("account_name")
1376
1385
  @click.pass_context
1377
- def aws_creds(ctx, account_name):
1386
+ def aws_creds(ctx: click.Context, account_name: str) -> None:
1378
1387
  settings = queries.get_app_interface_settings()
1379
1388
  secret_reader = SecretReader(settings=settings)
1380
1389
  accounts = queries.get_aws_accounts(name=account_name)
@@ -1417,8 +1426,14 @@ def aws_creds(ctx, account_name):
1417
1426
  )
1418
1427
  @click.pass_context
1419
1428
  def copy_tfstate(
1420
- ctx, source_bucket, source_object_path, account_uid, rename, region, force
1421
- ):
1429
+ ctx: click.Context,
1430
+ source_bucket: str,
1431
+ source_object_path: str,
1432
+ account_uid: str,
1433
+ rename: str | None,
1434
+ region: str | None,
1435
+ force: bool,
1436
+ ) -> None:
1422
1437
  settings = queries.get_app_interface_settings()
1423
1438
  secret_reader = SecretReader(settings=settings)
1424
1439
  accounts = queries.get_aws_accounts(uid=account_uid, terraform_state=True)
@@ -1439,7 +1454,6 @@ def copy_tfstate(
1439
1454
  )
1440
1455
  return
1441
1456
 
1442
- dest_filename = ""
1443
1457
  if rename:
1444
1458
  dest_filename = rename.removesuffix(".tfstate")
1445
1459
  else:
@@ -1451,10 +1465,13 @@ def copy_tfstate(
1451
1465
  with AWSApi(1, accounts, settings, secret_reader) as aws:
1452
1466
  session = aws.get_session(account["name"])
1453
1467
  s3_client = aws.get_session_client(session, "s3", region)
1454
- copy_source = {
1455
- "Bucket": source_bucket,
1456
- "Key": source_object_path,
1457
- }
1468
+ copy_source = cast(
1469
+ CopySourceTypeDef,
1470
+ {
1471
+ "Bucket": source_bucket,
1472
+ "Key": source_object_path,
1473
+ },
1474
+ )
1458
1475
 
1459
1476
  dest_pretty_path = f"s3://{dest_bucket}/{dest_key}"
1460
1477
  # check if dest already exists
@@ -1497,20 +1514,26 @@ def copy_tfstate(
1497
1514
  @get.command(short_help='obtain "rosa create cluster" command by cluster name')
1498
1515
  @click.argument("cluster_name")
1499
1516
  @click.pass_context
1500
- def rosa_create_cluster_command(ctx, cluster_name):
1517
+ def rosa_create_cluster_command(ctx: click.Context, cluster_name: str) -> None:
1501
1518
  clusters = [c for c in get_clusters() if c.name == cluster_name]
1502
- try:
1503
- cluster = clusters[0]
1504
- except IndexError:
1519
+ if not clusters:
1505
1520
  print(f"{cluster_name} not found.")
1506
1521
  sys.exit(1)
1522
+ cluster = clusters[0]
1507
1523
 
1508
- if cluster.spec.product != OCM_PRODUCT_ROSA:
1524
+ if (
1525
+ not cluster.spec
1526
+ or cluster.spec.product != OCM_PRODUCT_ROSA
1527
+ or not isinstance(cluster.spec, ClusterSpecROSAV1)
1528
+ ):
1509
1529
  print("must be a rosa cluster.")
1510
1530
  sys.exit(1)
1511
1531
 
1512
1532
  settings = queries.get_app_interface_settings()
1513
1533
  account = cluster.spec.account
1534
+ if not account:
1535
+ print("account not found.")
1536
+ sys.exit(1)
1514
1537
 
1515
1538
  if account.billing_account:
1516
1539
  billing_account = account.billing_account.uid
@@ -1520,6 +1543,19 @@ def rosa_create_cluster_command(ctx, cluster_name):
1520
1543
  ) as aws_api:
1521
1544
  billing_account = aws_api.get_organization_billing_account(account.name)
1522
1545
 
1546
+ if not cluster.spec.oidc_endpoint_url:
1547
+ print("oidc_endpoint_url not set.")
1548
+ sys.exit(1)
1549
+ if not cluster.spec.subnet_ids:
1550
+ print("subnet_ids not set.")
1551
+ sys.exit(1)
1552
+ if not cluster.network:
1553
+ print("network not set.")
1554
+ sys.exit(1)
1555
+ if not cluster.machine_pools:
1556
+ print("machine_pools not set.")
1557
+ sys.exit(1)
1558
+
1523
1559
  print(
1524
1560
  " ".join([
1525
1561
  "rosa create cluster",
@@ -1573,7 +1609,9 @@ def rosa_create_cluster_command(ctx, cluster_name):
1573
1609
  @click.argument("jumphost_hostname", required=False)
1574
1610
  @click.argument("cluster_name", required=False)
1575
1611
  @click.pass_context
1576
- def sshuttle_command(ctx, jumphost_hostname: str | None, cluster_name: str | None):
1612
+ def sshuttle_command(
1613
+ ctx: click.Context, jumphost_hostname: str | None, cluster_name: str | None
1614
+ ) -> None:
1577
1615
  jumphosts_query_data = queries.get_jumphosts(hostname=jumphost_hostname)
1578
1616
  jumphosts = jumphosts_query_data.jumphosts or []
1579
1617
  for jh in jumphosts:
@@ -1595,7 +1633,9 @@ def sshuttle_command(ctx, jumphost_hostname: str | None, cluster_name: str | Non
1595
1633
  @click.argument("instance_name")
1596
1634
  @click.argument("job_name")
1597
1635
  @click.pass_context
1598
- def jenkins_job_vault_secrets(ctx, instance_name: str, job_name: str) -> None:
1636
+ def jenkins_job_vault_secrets(
1637
+ ctx: click.Context, instance_name: str, job_name: str
1638
+ ) -> None:
1599
1639
  secret_reader = SecretReader(queries.get_secret_reader_settings())
1600
1640
  jjb: JJB = init_jjb(secret_reader, instance_name, config_name=None, print_only=True)
1601
1641
  jobs = jjb.get_all_jobs([job_name], instance_name)[instance_name]
@@ -1620,7 +1660,7 @@ def jenkins_job_vault_secrets(ctx, instance_name: str, job_name: str) -> None:
1620
1660
  @get.command()
1621
1661
  @click.argument("name", default="")
1622
1662
  @click.pass_context
1623
- def namespaces(ctx, name):
1663
+ def namespaces(ctx: click.Context, name: str) -> None:
1624
1664
  namespaces = queries.get_namespaces()
1625
1665
  if name:
1626
1666
  namespaces = [ns for ns in namespaces if ns["name"] == name]
@@ -1632,7 +1672,7 @@ def namespaces(ctx, name):
1632
1672
  print_output(ctx.obj["options"], namespaces, columns)
1633
1673
 
1634
1674
 
1635
- def add_resource(item, resource, columns):
1675
+ def add_resource(item: dict, resource: Mapping, columns: list[str]) -> None:
1636
1676
  provider = resource["provider"]
1637
1677
  if provider not in columns:
1638
1678
  columns.append(provider)
@@ -1643,11 +1683,11 @@ def add_resource(item, resource, columns):
1643
1683
 
1644
1684
  @get.command
1645
1685
  @click.pass_context
1646
- def cluster_openshift_resources(ctx):
1686
+ def cluster_openshift_resources(ctx: click.Context) -> None:
1647
1687
  gqlapi = gql.get_api()
1648
1688
  namespaces = gqlapi.query(orb.NAMESPACES_QUERY)["namespaces"]
1649
1689
  columns = ["name", "total"]
1650
- results = {}
1690
+ results: dict = {}
1651
1691
  for ns_info in namespaces:
1652
1692
  cluster_name = ns_info["cluster"]["name"]
1653
1693
  item = {"name": cluster_name, "total": 0}
@@ -1668,10 +1708,10 @@ def cluster_openshift_resources(ctx):
1668
1708
 
1669
1709
  @get.command
1670
1710
  @click.pass_context
1671
- def aws_terraform_resources(ctx):
1711
+ def aws_terraform_resources(ctx: click.Context) -> None:
1672
1712
  namespaces = tfr.get_namespaces()
1673
1713
  columns = ["name", "total"]
1674
- results = {}
1714
+ results: dict = {}
1675
1715
  for ns_info in namespaces:
1676
1716
  specs = (
1677
1717
  get_external_resource_specs(
@@ -1723,7 +1763,7 @@ def rds_region(
1723
1763
 
1724
1764
  @get.command
1725
1765
  @click.pass_context
1726
- def rds(ctx):
1766
+ def rds(ctx: click.Context) -> None:
1727
1767
  namespaces = tfr.get_namespaces()
1728
1768
  accounts = {a["name"]: a for a in queries.get_aws_accounts()}
1729
1769
  results = []
@@ -1801,7 +1841,7 @@ You can view the source of this Markdown to extract the JSON data.
1801
1841
 
1802
1842
  @get.command
1803
1843
  @click.pass_context
1804
- def rds_recommendations(ctx):
1844
+ def rds_recommendations(ctx: click.Context) -> None:
1805
1845
  IGNORED_STATUSES = ("resolved",)
1806
1846
  IGNORED_SEVERITIES = ("informational",)
1807
1847
 
@@ -1850,23 +1890,23 @@ def rds_recommendations(ctx):
1850
1890
  with AWSApi(1, [account], settings=settings, init_users=False) as aws:
1851
1891
  try:
1852
1892
  data = aws.describe_rds_recommendations(account_name, region)
1853
- recommendations = data.get("DBRecommendations", [])
1893
+ db_recommendations = data.get("DBRecommendations", [])
1854
1894
  except Exception as e:
1855
1895
  logging.error(f"Error describing RDS recommendations: {e}")
1856
1896
  continue
1857
1897
 
1858
1898
  # Add field ResourceName infered from ResourceArn
1859
1899
  recommendations = [
1860
- {**rec, "ResourceName": rec["ResourceArn"].split(":")[-1]}
1861
- for rec in recommendations
1900
+ {
1901
+ **rec,
1902
+ "ResourceName": rec["ResourceArn"].split(":")[-1],
1903
+ # The Description field has \n that are causing issues with the markdown table
1904
+ "Description": rec["Description"].replace("\n", " "),
1905
+ }
1906
+ for rec in db_recommendations
1862
1907
  if rec.get("Status") not in IGNORED_STATUSES
1863
1908
  and rec.get("Severity") not in IGNORED_SEVERITIES
1864
1909
  ]
1865
- # The Description field has \n that are causing issues with the markdown table
1866
- recommendations = [
1867
- {**rec, "Description": rec["Description"].replace("\n", " ")}
1868
- for rec in recommendations
1869
- ]
1870
1910
  # If we have no recommendations to show, skip
1871
1911
  if not recommendations:
1872
1912
  continue
@@ -1880,7 +1920,7 @@ def rds_recommendations(ctx):
1880
1920
 
1881
1921
  @get.command()
1882
1922
  @click.pass_context
1883
- def products(ctx):
1923
+ def products(ctx: click.Context) -> None:
1884
1924
  products = queries.get_products()
1885
1925
  columns = ["name", "description"]
1886
1926
  print_output(ctx.obj["options"], products, columns)
@@ -1889,7 +1929,7 @@ def products(ctx):
1889
1929
  @describe.command()
1890
1930
  @click.argument("name")
1891
1931
  @click.pass_context
1892
- def product(ctx, name):
1932
+ def product(ctx: click.Context, name: str) -> None:
1893
1933
  products = queries.get_products()
1894
1934
  products = [p for p in products if p["name"].lower() == name.lower()]
1895
1935
  if len(products) != 1:
@@ -1904,7 +1944,7 @@ def product(ctx, name):
1904
1944
 
1905
1945
  @get.command()
1906
1946
  @click.pass_context
1907
- def environments(ctx):
1947
+ def environments(ctx: click.Context) -> None:
1908
1948
  environments = queries.get_environments()
1909
1949
  columns = ["name", "description", "product.name"]
1910
1950
  # TODO(mafriedm): fix this
@@ -1916,7 +1956,7 @@ def environments(ctx):
1916
1956
  @describe.command()
1917
1957
  @click.argument("name")
1918
1958
  @click.pass_context
1919
- def environment(ctx, name):
1959
+ def environment(ctx: click.Context, name: str) -> None:
1920
1960
  environments = queries.get_environments()
1921
1961
  environments = [e for e in environments if e["name"].lower() == name.lower()]
1922
1962
  if len(environments) != 1:
@@ -1934,7 +1974,7 @@ def environment(ctx, name):
1934
1974
 
1935
1975
  @get.command()
1936
1976
  @click.pass_context
1937
- def services(ctx):
1977
+ def services(ctx: click.Context) -> None:
1938
1978
  apps = queries.get_apps()
1939
1979
  columns = ["name", "path", "onboardingStatus"]
1940
1980
  print_output(ctx.obj["options"], apps, columns)
@@ -1942,17 +1982,15 @@ def services(ctx):
1942
1982
 
1943
1983
  @get.command()
1944
1984
  @click.pass_context
1945
- def repos(ctx):
1985
+ def repos(ctx: click.Context) -> None:
1946
1986
  repos = queries.get_repos()
1947
- repos = [{"url": r} for r in repos]
1948
- columns = ["url"]
1949
- print_output(ctx.obj["options"], repos, columns)
1987
+ print_output(ctx.obj["options"], [{"url": r} for r in repos], ["url"])
1950
1988
 
1951
1989
 
1952
1990
  @get.command()
1953
1991
  @click.argument("org_username")
1954
1992
  @click.pass_context
1955
- def roles(ctx, org_username):
1993
+ def roles(ctx: click.Context, org_username: str) -> None:
1956
1994
  users = queries.get_roles()
1957
1995
  users = [u for u in users if u["org_username"] == org_username]
1958
1996
 
@@ -1963,7 +2001,7 @@ def roles(ctx, org_username):
1963
2001
  user = users[0]
1964
2002
 
1965
2003
  # type, name, resource, [ref]
1966
- roles: dict[(str, str, str), set] = defaultdict(set)
2004
+ roles: dict[tuple[str, str, str], set[str]] = defaultdict(set)
1967
2005
 
1968
2006
  for role in user["roles"]:
1969
2007
  role_name = role["path"]
@@ -2017,7 +2055,7 @@ def roles(ctx, org_username):
2017
2055
  @get.command()
2018
2056
  @click.argument("org_username", default="")
2019
2057
  @click.pass_context
2020
- def users(ctx, org_username):
2058
+ def users(ctx: click.Context, org_username: str) -> None:
2021
2059
  users = queries.get_users()
2022
2060
  if org_username:
2023
2061
  users = [u for u in users if u["org_username"] == org_username]
@@ -2028,7 +2066,7 @@ def users(ctx, org_username):
2028
2066
 
2029
2067
  @get.command()
2030
2068
  @click.pass_context
2031
- def integrations(ctx):
2069
+ def integrations(ctx: click.Context) -> None:
2032
2070
  environments = queries.get_integrations()
2033
2071
  columns = ["name", "description"]
2034
2072
  print_output(ctx.obj["options"], environments, columns)
@@ -2036,7 +2074,7 @@ def integrations(ctx):
2036
2074
 
2037
2075
  @get.command()
2038
2076
  @click.pass_context
2039
- def quay_mirrors(ctx):
2077
+ def quay_mirrors(ctx: click.Context) -> None:
2040
2078
  apps = queries.get_quay_repos()
2041
2079
 
2042
2080
  mirrors = []
@@ -2074,7 +2112,9 @@ def quay_mirrors(ctx):
2074
2112
  @click.argument("kind")
2075
2113
  @click.argument("name")
2076
2114
  @click.pass_context
2077
- def root_owner(ctx, cluster, namespace, kind, name):
2115
+ def root_owner(
2116
+ ctx: click.Context, cluster: str, namespace: str, kind: str, name: str
2117
+ ) -> None:
2078
2118
  settings = queries.get_app_interface_settings()
2079
2119
  clusters = [c for c in queries.get_clusters(minimal=True) if c["name"] == cluster]
2080
2120
  oc_map = OC_Map(
@@ -2104,7 +2144,9 @@ def root_owner(ctx, cluster, namespace, kind, name):
2104
2144
  @click.argument("aws_account")
2105
2145
  @click.argument("identifier")
2106
2146
  @click.pass_context
2107
- def service_owners_for_rds_instance(ctx, aws_account, identifier):
2147
+ def service_owners_for_rds_instance(
2148
+ ctx: click.Context, aws_account: str, identifier: str
2149
+ ) -> None:
2108
2150
  namespaces = queries.get_namespaces()
2109
2151
  service_owners = []
2110
2152
  for namespace_info in namespaces:
@@ -2126,7 +2168,7 @@ def service_owners_for_rds_instance(ctx, aws_account, identifier):
2126
2168
 
2127
2169
  @get.command()
2128
2170
  @click.pass_context
2129
- def sre_checkpoints(ctx):
2171
+ def sre_checkpoints(ctx: click.Context) -> None:
2130
2172
  apps = queries.get_apps()
2131
2173
 
2132
2174
  parent_apps = {app["parentApp"]["path"] for app in apps if app.get("parentApp")}
@@ -2150,13 +2192,14 @@ def sre_checkpoints(ctx):
2150
2192
 
2151
2193
  @get.command()
2152
2194
  @click.pass_context
2153
- def app_interface_merge_queue(ctx):
2195
+ def app_interface_merge_queue(ctx: click.Context) -> None:
2154
2196
  import reconcile.gitlab_housekeeping as glhk
2155
2197
 
2156
2198
  settings = queries.get_app_interface_settings()
2157
2199
  instance = queries.get_gitlab_instance()
2158
2200
  gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
2159
- merge_requests = glhk.get_merge_requests(True, gl, state=None)
2201
+ state = init_state(integration=glhk.QONTRACT_INTEGRATION)
2202
+ merge_requests = glhk.get_merge_requests(True, gl, state=state)
2160
2203
 
2161
2204
  columns = [
2162
2205
  "id",
@@ -2191,7 +2234,7 @@ def app_interface_merge_queue(ctx):
2191
2234
 
2192
2235
  @get.command()
2193
2236
  @click.pass_context
2194
- def app_interface_review_queue(ctx) -> None:
2237
+ def app_interface_review_queue(ctx: click.Context) -> None:
2195
2238
  import reconcile.gitlab_housekeeping as glhk
2196
2239
 
2197
2240
  settings = queries.get_app_interface_settings()
@@ -2208,7 +2251,7 @@ def app_interface_review_queue(ctx) -> None:
2208
2251
  "labels",
2209
2252
  ]
2210
2253
 
2211
- def get_mrs(repo, url) -> list[dict[str, str]]:
2254
+ def get_mrs(repo: str, url: str) -> list[dict[str, str]]:
2212
2255
  gl = GitLabApi(instance, project_url=url, settings=settings)
2213
2256
  merge_requests = gl.get_merge_requests(state=MRState.OPENED)
2214
2257
  try:
@@ -2229,7 +2272,7 @@ def app_interface_review_queue(ctx) -> None:
2229
2272
  }:
2230
2273
  continue
2231
2274
 
2232
- labels = mr.attributes.get("labels")
2275
+ labels = mr.attributes.get("labels") or []
2233
2276
  if glhk.is_good_to_merge(labels):
2234
2277
  continue
2235
2278
  if "stale" in labels:
@@ -2303,7 +2346,7 @@ def app_interface_review_queue(ctx) -> None:
2303
2346
 
2304
2347
  @get.command()
2305
2348
  @click.pass_context
2306
- def app_interface_open_selfserviceable_mr_queue(ctx):
2349
+ def app_interface_open_selfserviceable_mr_queue(ctx: click.Context) -> None:
2307
2350
  settings = queries.get_app_interface_settings()
2308
2351
  instance = queries.get_gitlab_instance()
2309
2352
  gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
@@ -2324,7 +2367,7 @@ def app_interface_open_selfserviceable_mr_queue(ctx):
2324
2367
  continue
2325
2368
 
2326
2369
  # skip stale or non self serviceable MRs
2327
- labels = mr.attributes.get("labels")
2370
+ labels = mr.attributes.get("labels", [])
2328
2371
  if "stale" in labels:
2329
2372
  continue
2330
2373
  if SELF_SERVICEABLE not in labels and SAAS_FILE_UPDATE not in labels:
@@ -2366,7 +2409,7 @@ def app_interface_open_selfserviceable_mr_queue(ctx):
2366
2409
 
2367
2410
  @get.command()
2368
2411
  @click.pass_context
2369
- def change_types(ctx) -> None:
2412
+ def change_types(ctx: click.Context) -> None:
2370
2413
  """List all change types."""
2371
2414
  change_types = fetch_change_type_processors(gql.get_api(), NoOpFileDiffResolver())
2372
2415
 
@@ -2391,7 +2434,7 @@ def change_types(ctx) -> None:
2391
2434
 
2392
2435
  @get.command()
2393
2436
  @click.pass_context
2394
- def app_interface_merge_history(ctx):
2437
+ def app_interface_merge_history(ctx: click.Context) -> None:
2395
2438
  settings = queries.get_app_interface_settings()
2396
2439
  instance = queries.get_gitlab_instance()
2397
2440
  gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
@@ -2413,7 +2456,7 @@ def app_interface_merge_history(ctx):
2413
2456
  "id": f"[{mr.iid}]({mr.web_url})",
2414
2457
  "title": mr.title,
2415
2458
  "merged_at": mr.merged_at,
2416
- "labels": ", ".join(mr.attributes.get("labels")),
2459
+ "labels": ", ".join(mr.attributes.get("labels", [])),
2417
2460
  }
2418
2461
  merge_queue_data.append(item)
2419
2462
 
@@ -2428,7 +2471,7 @@ def app_interface_merge_history(ctx):
2428
2471
  )
2429
2472
  @use_jump_host()
2430
2473
  @click.pass_context
2431
- def selectorsyncset_managed_resources(ctx, use_jump_host):
2474
+ def selectorsyncset_managed_resources(ctx: click.Context, use_jump_host: bool) -> None:
2432
2475
  vault_settings = get_app_interface_vault_settings()
2433
2476
  secret_reader = create_secret_reader(use_vault=vault_settings.vault)
2434
2477
  clusters = get_clusters()
@@ -2486,7 +2529,9 @@ def selectorsyncset_managed_resources(ctx, use_jump_host):
2486
2529
  )
2487
2530
  @use_jump_host()
2488
2531
  @click.pass_context
2489
- def selectorsyncset_managed_hypershift_resources(ctx, use_jump_host):
2532
+ def selectorsyncset_managed_hypershift_resources(
2533
+ ctx: click.Context, use_jump_host: bool
2534
+ ) -> None:
2490
2535
  vault_settings = get_app_interface_vault_settings()
2491
2536
  secret_reader = create_secret_reader(use_vault=vault_settings.vault)
2492
2537
  clusters = get_clusters()
@@ -2564,7 +2609,12 @@ def selectorsyncset_managed_hypershift_resources(ctx, use_jump_host):
2564
2609
  default=os.environ.get("QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_REGION", "us-east-1"),
2565
2610
  )
2566
2611
  @click.pass_context
2567
- def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_region):
2612
+ def ec2_jenkins_workers(
2613
+ ctx: click.Context,
2614
+ aws_access_key_id: str,
2615
+ aws_secret_access_key: str,
2616
+ aws_region: str,
2617
+ ) -> None:
2568
2618
  """Prints a list of jenkins workers and their status."""
2569
2619
  if not aws_access_key_id or not aws_secret_access_key:
2570
2620
  raise click.ClickException(
@@ -2611,9 +2661,9 @@ def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_regio
2611
2661
  url = ""
2612
2662
  for t in instance.tags:
2613
2663
  if t.get("Key") == "os":
2614
- os = t.get("Value")
2664
+ os = t["Value"]
2615
2665
  if t.get("Key") == "jenkins_controller":
2616
- url = f"https://{t.get('Value').replace('-', '.')}.devshift.net/computer/{instance.id}"
2666
+ url = f"https://{t['Value'].replace('-', '.')}.devshift.net/computer/{instance.id}"
2617
2667
  image = ec2.Image(instance.image_id)
2618
2668
  commit_url = ""
2619
2669
  for t in image.tags:
@@ -2640,7 +2690,7 @@ def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_regio
2640
2690
  @get.command()
2641
2691
  @click.argument("status-board-instance")
2642
2692
  @click.pass_context
2643
- def slo_document_services(ctx, status_board_instance):
2693
+ def slo_document_services(ctx: click.Context, status_board_instance: str) -> None:
2644
2694
  """Print SLO Documents Services"""
2645
2695
  columns = [
2646
2696
  "slo_doc_name",
@@ -2669,7 +2719,7 @@ def slo_document_services(ctx, status_board_instance):
2669
2719
  slodocs = []
2670
2720
  for slodoc in get_slo_documents():
2671
2721
  products = [ns.namespace.environment.product.name for ns in slodoc.namespaces]
2672
- for slo in slodoc.slos:
2722
+ for slo in slodoc.slos or []:
2673
2723
  for product in products:
2674
2724
  if slodoc.app.parent_app:
2675
2725
  app = f"{slodoc.app.parent_app.name}-{slodoc.app.name}"
@@ -2695,7 +2745,7 @@ def slo_document_services(ctx, status_board_instance):
2695
2745
  "target_unit": slo.slo_target_unit,
2696
2746
  "window": slo.slo_parameters.window,
2697
2747
  "statusBoardService": f"{product}/{slodoc.app.name}/{slo.name}",
2698
- "statusBoardEnabled": "statusBoard" in slodoc.labels,
2748
+ "statusBoardEnabled": "statusBoard" in (slodoc.labels or {}),
2699
2749
  }
2700
2750
  slodocs.append(item)
2701
2751
 
@@ -2705,7 +2755,7 @@ def slo_document_services(ctx, status_board_instance):
2705
2755
  @get.command()
2706
2756
  @click.argument("file_path")
2707
2757
  @click.pass_context
2708
- def alerts(ctx, file_path):
2758
+ def alerts(ctx: click.Context, file_path: str) -> None:
2709
2759
  BIG_NUMBER = 10
2710
2760
 
2711
2761
  def sort_by_threshold(item: dict[str, str]) -> int:
@@ -2779,7 +2829,7 @@ def alerts(ctx, file_path):
2779
2829
  @get.command()
2780
2830
  @click.pass_context
2781
2831
  @thread_pool_size(default=5)
2782
- def aws_cost_report(ctx, thread_pool_size):
2832
+ def aws_cost_report(ctx: click.Context, thread_pool_size: int) -> None:
2783
2833
  command = AwsCostReportCommand.create(thread_pool_size=thread_pool_size)
2784
2834
  print(command.execute())
2785
2835
 
@@ -2787,7 +2837,7 @@ def aws_cost_report(ctx, thread_pool_size):
2787
2837
  @get.command()
2788
2838
  @click.pass_context
2789
2839
  @thread_pool_size(default=5)
2790
- def openshift_cost_report(ctx, thread_pool_size):
2840
+ def openshift_cost_report(ctx: click.Context, thread_pool_size: int) -> None:
2791
2841
  command = OpenShiftCostReportCommand.create(thread_pool_size=thread_pool_size)
2792
2842
  print(command.execute())
2793
2843
 
@@ -2795,7 +2845,9 @@ def openshift_cost_report(ctx, thread_pool_size):
2795
2845
  @get.command()
2796
2846
  @click.pass_context
2797
2847
  @thread_pool_size(default=5)
2798
- def openshift_cost_optimization_report(ctx, thread_pool_size):
2848
+ def openshift_cost_optimization_report(
2849
+ ctx: click.Context, thread_pool_size: int
2850
+ ) -> None:
2799
2851
  command = OpenShiftCostOptimizationReportCommand.create(
2800
2852
  thread_pool_size=thread_pool_size
2801
2853
  )
@@ -2804,7 +2856,7 @@ def openshift_cost_optimization_report(ctx, thread_pool_size):
2804
2856
 
2805
2857
  @get.command()
2806
2858
  @click.pass_context
2807
- def osd_component_versions(ctx):
2859
+ def osd_component_versions(ctx: click.Context) -> None:
2808
2860
  osd_environments = [
2809
2861
  e["name"] for e in queries.get_environments() if e["product"]["name"] == "OSDv4"
2810
2862
  ]
@@ -2840,7 +2892,7 @@ def osd_component_versions(ctx):
2840
2892
 
2841
2893
  @get.command()
2842
2894
  @click.pass_context
2843
- def maintenances(ctx):
2895
+ def maintenances(ctx: click.Context) -> None:
2844
2896
  now = datetime.now(UTC)
2845
2897
  maintenances = maintenances_gql.query(gql.get_api().query).maintenances or []
2846
2898
  data = [
@@ -2903,7 +2955,7 @@ class MigrationStatusCount:
2903
2955
 
2904
2956
  @get.command()
2905
2957
  @click.pass_context
2906
- def hcp_migration_status(ctx):
2958
+ def hcp_migration_status(ctx: click.Context) -> None:
2907
2959
  counts: dict[str, MigrationStatusCount] = {}
2908
2960
  total_count = MigrationStatusCount("total")
2909
2961
  saas_files = get_saas_files()
@@ -2924,6 +2976,7 @@ def hcp_migration_status(ctx):
2924
2976
  continue
2925
2977
  if t.delete:
2926
2978
  continue
2979
+ assert t.namespace.cluster.labels
2927
2980
  if hcp_migration := t.namespace.cluster.labels.get("hcp_migration"):
2928
2981
  app = sf.app.parent_app.name if sf.app.parent_app else sf.app.name
2929
2982
  counts.setdefault(app, MigrationStatusCount(app))
@@ -2940,7 +2993,7 @@ def hcp_migration_status(ctx):
2940
2993
 
2941
2994
  @get.command()
2942
2995
  @click.pass_context
2943
- def systems_and_tools(ctx):
2996
+ def systems_and_tools(ctx: click.Context) -> None:
2944
2997
  print(
2945
2998
  f"This report is obtained from app-interface Graphql endpoint available at: {config.get_config()['graphql']['server']}"
2946
2999
  )
@@ -2954,7 +3007,7 @@ def systems_and_tools(ctx):
2954
3007
  "--environment_name", default="production", help="environment to get logs from"
2955
3008
  )
2956
3009
  @click.pass_context
2957
- def logs(ctx, integration_name: str, environment_name: str):
3010
+ def logs(ctx: click.Context, integration_name: str, environment_name: str) -> None:
2958
3011
  integrations = [
2959
3012
  i
2960
3013
  for i in integrations_gql.query(query_func=gql.get_api().query).integrations
@@ -2993,7 +3046,7 @@ def logs(ctx, integration_name: str, environment_name: str):
2993
3046
 
2994
3047
  @get.command
2995
3048
  @click.pass_context
2996
- def jenkins_jobs(ctx):
3049
+ def jenkins_jobs(ctx: click.Context) -> None:
2997
3050
  jenkins_configs = queries.get_jenkins_configs()
2998
3051
 
2999
3052
  # stats dicts
@@ -3063,9 +3116,9 @@ You can view the source of this Markdown to extract the JSON data.
3063
3116
 
3064
3117
  @get.command
3065
3118
  @click.pass_context
3066
- def container_image_details(ctx):
3119
+ def container_image_details(ctx: click.Context) -> None:
3067
3120
  apps = get_apps_quay_repos_escalation_policies()
3068
- data: list[dict[str, str]] = []
3121
+ data: list[dict[str, str | list[str]]] = []
3069
3122
  for app in apps:
3070
3123
  app_name = f"{app.parent_app.name}/{app.name}" if app.parent_app else app.name
3071
3124
  ep_channels = app.escalation_policy.channels
@@ -3077,7 +3130,7 @@ def container_image_details(ctx):
3077
3130
  if repo.mirror:
3078
3131
  continue
3079
3132
  repository = f"quay.io/{org_name}/{repo.name}"
3080
- item = {
3133
+ item: dict[str, str | list[str]] = {
3081
3134
  "app": app_name,
3082
3135
  "repository": repository,
3083
3136
  "email": email,
@@ -3090,27 +3143,25 @@ def container_image_details(ctx):
3090
3143
 
3091
3144
  @get.command
3092
3145
  @click.pass_context
3093
- def change_log_tracking(ctx):
3146
+ def change_log_tracking(ctx: click.Context) -> None:
3094
3147
  repo_url = get_app_interface_repo_url()
3095
3148
  change_types = fetch_change_type_processors(gql.get_api(), NoOpFileDiffResolver())
3096
3149
  state = init_state(integration=cl.QONTRACT_INTEGRATION)
3097
3150
  change_log = ChangeLog(**state.get(BUNDLE_DIFFS_OBJ))
3098
3151
  data: list[dict[str, str]] = []
3099
- for item in change_log.items:
3100
- change_log_item = ChangeLogItem(**item)
3152
+ for change_log_item in change_log.items:
3101
3153
  commit = change_log_item.commit
3102
3154
  covered_change_types_descriptions = [
3103
3155
  ct.description
3104
3156
  for ct in change_types
3105
3157
  if ct.name in change_log_item.change_types
3106
3158
  ]
3107
- item = {
3159
+ data.append({
3108
3160
  "commit": f"[{commit[:7]}]({repo_url}/commit/{commit})",
3109
3161
  "merged_at": change_log_item.merged_at,
3110
3162
  "apps": ", ".join(change_log_item.apps),
3111
3163
  "changes": ", ".join(covered_change_types_descriptions),
3112
- }
3113
- data.append(item)
3164
+ })
3114
3165
 
3115
3166
  # TODO(mafriedm): Fix this
3116
3167
  ctx.obj["options"]["sort"] = False
@@ -3121,7 +3172,7 @@ def change_log_tracking(ctx):
3121
3172
  @root.group(name="set")
3122
3173
  @output
3123
3174
  @click.pass_context
3124
- def set_command(ctx, output):
3175
+ def set_command(ctx: click.Context, output: str) -> None:
3125
3176
  ctx.obj["output"] = output
3126
3177
 
3127
3178
 
@@ -3130,7 +3181,9 @@ def set_command(ctx, output):
3130
3181
  @click.argument("usergroup")
3131
3182
  @click.argument("username")
3132
3183
  @click.pass_context
3133
- def slack_usergroup(ctx, workspace, usergroup, username):
3184
+ def slack_usergroup(
3185
+ ctx: click.Context, workspace: str, usergroup: str, username: str
3186
+ ) -> None:
3134
3187
  """Update users in a slack usergroup.
3135
3188
  Use an org_username as the username.
3136
3189
  To empty a slack usergroup, pass '' (empty string) as the username.
@@ -3138,6 +3191,8 @@ def slack_usergroup(ctx, workspace, usergroup, username):
3138
3191
  settings = queries.get_app_interface_settings()
3139
3192
  slack = slackapi_from_queries("qontract-cli")
3140
3193
  ugid = slack.get_usergroup_id(usergroup)
3194
+ if not ugid:
3195
+ raise click.ClickException(f"Usergroup {usergroup} not found.")
3141
3196
  if username:
3142
3197
  mail_address = settings["smtp"]["mailAddress"]
3143
3198
  users = [slack.get_user_id_by_name(username, mail_address)]
@@ -3146,33 +3201,17 @@ def slack_usergroup(ctx, workspace, usergroup, username):
3146
3201
  slack.update_usergroup_users(ugid, users)
3147
3202
 
3148
3203
 
3149
- @set_command.command()
3150
- @click.argument("org_name")
3151
- @click.argument("cluster_name")
3152
- @click.pass_context
3153
- def cluster_admin(ctx, org_name, cluster_name):
3154
- settings = queries.get_app_interface_settings()
3155
- ocms = [
3156
- o for o in queries.get_openshift_cluster_managers() if o["name"] == org_name
3157
- ]
3158
- ocm_map = OCMMap(ocms=ocms, settings=settings)
3159
- ocm = ocm_map[org_name]
3160
- enabled = ocm.is_cluster_admin_enabled(cluster_name)
3161
- if not enabled:
3162
- ocm.enable_cluster_admin(cluster_name)
3163
-
3164
-
3165
3204
  @root.group()
3166
3205
  @environ(["APP_INTERFACE_STATE_BUCKET"])
3167
3206
  @click.pass_context
3168
- def state(ctx):
3207
+ def state(ctx: click.Context) -> None:
3169
3208
  pass
3170
3209
 
3171
3210
 
3172
3211
  @state.command()
3173
3212
  @click.argument("integration", default="")
3174
3213
  @click.pass_context
3175
- def ls(ctx, integration):
3214
+ def ls(ctx: click.Context, integration: str) -> None:
3176
3215
  state = init_state(integration=integration)
3177
3216
  keys = state.ls()
3178
3217
  # if integration in not defined the 2th token will be the integration name
@@ -3193,7 +3232,7 @@ def ls(ctx, integration):
3193
3232
  @click.argument("integration")
3194
3233
  @click.argument("key")
3195
3234
  @click.pass_context
3196
- def state_get(ctx, integration, key):
3235
+ def state_get(ctx: click.Context, integration: str, key: str) -> None:
3197
3236
  state = init_state(integration=integration)
3198
3237
  value = state.get(key)
3199
3238
  print(value)
@@ -3203,7 +3242,7 @@ def state_get(ctx, integration, key):
3203
3242
  @click.argument("integration")
3204
3243
  @click.argument("key")
3205
3244
  @click.pass_context
3206
- def add(ctx, integration, key):
3245
+ def add(ctx: click.Context, integration: str, key: str) -> None:
3207
3246
  state = init_state(integration=integration)
3208
3247
  state.add(key)
3209
3248
 
@@ -3213,7 +3252,7 @@ def add(ctx, integration, key):
3213
3252
  @click.argument("key")
3214
3253
  @click.argument("value")
3215
3254
  @click.pass_context
3216
- def state_set(ctx, integration, key, value):
3255
+ def state_set(ctx: click.Context, integration: str, key: str, value: str) -> None:
3217
3256
  state = init_state(integration=integration)
3218
3257
  state.add(key, value=value, force=True)
3219
3258
 
@@ -3222,7 +3261,7 @@ def state_set(ctx, integration, key, value):
3222
3261
  @click.argument("integration")
3223
3262
  @click.argument("key")
3224
3263
  @click.pass_context
3225
- def rm(ctx, integration, key):
3264
+ def rm(ctx: click.Context, integration: str, key: str) -> None:
3226
3265
  state = init_state(integration=integration)
3227
3266
  state.rm(key)
3228
3267
 
@@ -3230,7 +3269,7 @@ def rm(ctx, integration, key):
3230
3269
  @root.group()
3231
3270
  @environ(["APP_INTERFACE_STATE_BUCKET"])
3232
3271
  @click.pass_context
3233
- def early_exit_cache(ctx):
3272
+ def early_exit_cache(ctx: click.Context) -> None:
3234
3273
  pass
3235
3274
 
3236
3275
 
@@ -3266,13 +3305,13 @@ def early_exit_cache(ctx):
3266
3305
  )
3267
3306
  @click.pass_context
3268
3307
  def early_exit_cache_head(
3269
- ctx,
3270
- integration,
3271
- integration_version,
3272
- dry_run,
3273
- cache_source,
3274
- shard,
3275
- ):
3308
+ ctx: click.Context,
3309
+ integration: str,
3310
+ integration_version: str,
3311
+ dry_run: bool,
3312
+ cache_source: str,
3313
+ shard: str,
3314
+ ) -> None:
3276
3315
  with EarlyExitCache.build() as cache:
3277
3316
  cache_key = CacheKey(
3278
3317
  integration=integration,
@@ -3318,13 +3357,13 @@ def early_exit_cache_head(
3318
3357
  )
3319
3358
  @click.pass_context
3320
3359
  def early_exit_cache_get(
3321
- ctx,
3322
- integration,
3323
- integration_version,
3324
- dry_run,
3325
- cache_source,
3326
- shard,
3327
- ):
3360
+ ctx: click.Context,
3361
+ integration: str,
3362
+ integration_version: str,
3363
+ dry_run: bool,
3364
+ cache_source: str,
3365
+ shard: str,
3366
+ ) -> None:
3328
3367
  with EarlyExitCache.build() as cache:
3329
3368
  cache_key = CacheKey(
3330
3369
  integration=integration,
@@ -3401,18 +3440,18 @@ def early_exit_cache_get(
3401
3440
  )
3402
3441
  @click.pass_context
3403
3442
  def early_exit_cache_set(
3404
- ctx,
3405
- integration,
3406
- integration_version,
3407
- dry_run,
3408
- cache_source,
3409
- shard,
3410
- payload,
3411
- log_output,
3412
- applied_count,
3413
- ttl,
3414
- latest_cache_source_digest,
3415
- ):
3443
+ ctx: click.Context,
3444
+ integration: str,
3445
+ integration_version: str,
3446
+ dry_run: bool,
3447
+ cache_source: str,
3448
+ shard: str,
3449
+ payload: str,
3450
+ log_output: str,
3451
+ applied_count: int,
3452
+ ttl: int,
3453
+ latest_cache_source_digest: str,
3454
+ ) -> None:
3416
3455
  with EarlyExitCache.build() as cache:
3417
3456
  cache_key = CacheKey(
3418
3457
  integration=integration,
@@ -3461,13 +3500,13 @@ def early_exit_cache_set(
3461
3500
  )
3462
3501
  @click.pass_context
3463
3502
  def early_exit_cache_delete(
3464
- ctx,
3465
- integration,
3466
- integration_version,
3467
- dry_run,
3468
- cache_source_digest,
3469
- shard,
3470
- ):
3503
+ ctx: click.Context,
3504
+ integration: str,
3505
+ integration_version: str,
3506
+ dry_run: bool,
3507
+ cache_source_digest: str,
3508
+ shard: str,
3509
+ ) -> None:
3471
3510
  with EarlyExitCache.build() as cache:
3472
3511
  cache_key_with_digest = CacheKeyWithDigest(
3473
3512
  integration=integration,
@@ -3498,25 +3537,33 @@ def early_exit_cache_delete(
3498
3537
  type=click.Choice(["config", "vault"]),
3499
3538
  )
3500
3539
  @click.pass_context
3501
- def template(ctx, cluster, namespace, kind, name, path, secret_reader):
3540
+ def template(
3541
+ ctx: click.Context,
3542
+ cluster: str,
3543
+ namespace: str,
3544
+ kind: str,
3545
+ name: str,
3546
+ path: str,
3547
+ secret_reader: str,
3548
+ ) -> None:
3502
3549
  gqlapi = gql.get_api()
3503
3550
  namespaces = gqlapi.query(orb.NAMESPACES_QUERY)["namespaces"]
3504
- namespace_info = [
3551
+ namespaces_info = [
3505
3552
  n
3506
3553
  for n in namespaces
3507
3554
  if n["cluster"]["name"] == cluster and n["name"] == namespace
3508
3555
  ]
3509
- if len(namespace_info) != 1:
3556
+ if len(namespaces_info) != 1:
3510
3557
  print(f"{cluster}/{namespace} error")
3511
3558
  sys.exit(1)
3512
3559
 
3560
+ namespace_info = namespaces_info[0]
3513
3561
  settings = queries.get_app_interface_settings()
3514
3562
  settings["vault"] = secret_reader == "vault"
3515
3563
 
3516
3564
  if path and path.startswith("resources"):
3517
3565
  path = path.replace("resources", "", 1)
3518
3566
 
3519
- [namespace_info] = namespace_info
3520
3567
  ob.aggregate_shared_resources(namespace_info, "openshiftResources")
3521
3568
  openshift_resources = namespace_info.get("openshiftResources")
3522
3569
  for r in openshift_resources:
@@ -3557,7 +3604,9 @@ def template(ctx, cluster, namespace, kind, name, path, secret_reader):
3557
3604
  type=click.Choice(["config", "vault"]),
3558
3605
  )
3559
3606
  @click.pass_context
3560
- def run_prometheus_test(ctx, path, cluster, namespace, secret_reader):
3607
+ def run_prometheus_test(
3608
+ ctx: click.Context, path: str, cluster: str, namespace: str, secret_reader: str
3609
+ ) -> None:
3561
3610
  """Run prometheus tests for the rule associated with the test in the PATH from given
3562
3611
  CLUSTER/NAMESPACE"""
3563
3612
 
@@ -3643,17 +3692,17 @@ def run_prometheus_test(ctx, path, cluster, namespace, secret_reader):
3643
3692
  )
3644
3693
  @click.pass_context
3645
3694
  def alert_to_receiver(
3646
- ctx,
3647
- cluster,
3648
- namespace,
3649
- rules_path,
3650
- alert_name,
3651
- alertmanager_secret_path,
3652
- alertmanager_namespace,
3653
- alertmanager_secret_key,
3654
- secret_reader,
3655
- additional_label,
3656
- ):
3695
+ ctx: click.Context,
3696
+ cluster: str,
3697
+ namespace: str,
3698
+ rules_path: str,
3699
+ alert_name: str,
3700
+ alertmanager_secret_path: str,
3701
+ alertmanager_namespace: str,
3702
+ alertmanager_secret_key: str,
3703
+ secret_reader: str,
3704
+ additional_label: list[str],
3705
+ ) -> None:
3657
3706
  additional_labels = {}
3658
3707
  for al in additional_label:
3659
3708
  try:
@@ -3745,12 +3794,12 @@ def alert_to_receiver(
3745
3794
  print(f"Cannot find alert {alert_name} in rules {rules_path}")
3746
3795
  sys.exit(1)
3747
3796
 
3748
- for al in alert_labels:
3749
- result = amtool.config_routes_test(am_config, al)
3797
+ for label in alert_labels:
3798
+ result = amtool.config_routes_test(am_config, label)
3750
3799
  if not result:
3751
3800
  print(f"Error running amtool: {result}")
3752
3801
  sys.exit(1)
3753
- print("|".join([al["alertname"], str(result)]))
3802
+ print("|".join([label["alertname"], str(result)]))
3754
3803
 
3755
3804
 
3756
3805
  @root.command()
@@ -3758,7 +3807,12 @@ def alert_to_receiver(
3758
3807
  @click.option("--saas-file-name", default=None, help="saas-file to act on.")
3759
3808
  @click.option("--env-name", default=None, help="environment to use for parameters.")
3760
3809
  @click.pass_context
3761
- def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
3810
+ def saas_dev(
3811
+ ctx: click.Context,
3812
+ app_name: str | None = None,
3813
+ saas_file_name: str | None = None,
3814
+ env_name: str | None = None,
3815
+ ) -> None:
3762
3816
  if not env_name:
3763
3817
  print("env-name must be defined")
3764
3818
  return
@@ -3806,7 +3860,7 @@ def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
3806
3860
  @click.option("--app-name", default=None, help="app to act on.")
3807
3861
  @click.pass_context
3808
3862
  def saas_targets(
3809
- ctx, saas_file_name: str | None = None, app_name: str | None = None
3863
+ ctx: click.Context, saas_file_name: str | None = None, app_name: str | None = None
3810
3864
  ) -> None:
3811
3865
  """Resolve namespaceSelectors and print all resulting targets of a saas file."""
3812
3866
  console = Console()
@@ -3870,7 +3924,7 @@ def saas_targets(
3870
3924
  default="json",
3871
3925
  type=click.Choice(["json", "yaml"]),
3872
3926
  )
3873
- def query(output, query):
3927
+ def query(output: str, query: str) -> None:
3874
3928
  """Run a raw GraphQL query"""
3875
3929
  gqlapi = gql.get_api()
3876
3930
  result = gqlapi.query(query)
@@ -3884,7 +3938,7 @@ def query(output, query):
3884
3938
  @root.command()
3885
3939
  @click.argument("cluster")
3886
3940
  @click.argument("query")
3887
- def promquery(cluster, query):
3941
+ def promquery(cluster: str, query: str) -> None:
3888
3942
  """Run a PromQL query"""
3889
3943
  config_data = config.get_config()
3890
3944
  auth = {"path": config_data["promql-auth"]["secret_path"], "field": "token"}
@@ -3935,8 +3989,13 @@ def promquery(cluster, query):
3935
3989
  default=False,
3936
3990
  )
3937
3991
  def sre_checkpoint_metadata(
3938
- app_path, parent_ticket, jiraboard, jiradef, create_parent_ticket, dry_run
3939
- ):
3992
+ app_path: str,
3993
+ parent_ticket: str,
3994
+ jiraboard: str,
3995
+ jiradef: str,
3996
+ create_parent_ticket: bool,
3997
+ dry_run: bool,
3998
+ ) -> None:
3940
3999
  """Check an app path for checkpoint-related metadata."""
3941
4000
  data = queries.get_app_metadata(app_path)
3942
4001
  settings = queries.get_app_interface_settings()
@@ -3975,8 +4034,13 @@ def sre_checkpoint_metadata(
3975
4034
  required=True,
3976
4035
  )
3977
4036
  def gpg_encrypt(
3978
- vault_path, vault_secret_version, file_path, openshift_path, output, for_user
3979
- ):
4037
+ vault_path: str,
4038
+ vault_secret_version: str,
4039
+ file_path: str,
4040
+ openshift_path: str,
4041
+ output: str,
4042
+ for_user: str,
4043
+ ) -> None:
3980
4044
  """
3981
4045
  Encrypt the specified secret (local file, vault or openshift) with a
3982
4046
  given users gpg key. This is intended for easily sharing secrets with
@@ -3999,7 +4063,7 @@ def gpg_encrypt(
3999
4063
  @click.option("--channel", help="the channel that state is part of")
4000
4064
  @click.option("--sha", help="the commit sha we want state for")
4001
4065
  @environ(["APP_INTERFACE_STATE_BUCKET"])
4002
- def get_promotion_state(channel: str, sha: str):
4066
+ def get_promotion_state(channel: str, sha: str) -> None:
4003
4067
  from tools.saas_promotion_state.saas_promotion_state import (
4004
4068
  SaasPromotionState,
4005
4069
  )
@@ -4024,7 +4088,7 @@ def get_promotion_state(channel: str, sha: str):
4024
4088
  @click.option("--sha", help="the commit sha we want state for")
4025
4089
  @click.option("--publisher-id", help="the publisher id we want state for")
4026
4090
  @environ(["APP_INTERFACE_STATE_BUCKET"])
4027
- def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str):
4091
+ def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str) -> None:
4028
4092
  from tools.saas_promotion_state.saas_promotion_state import (
4029
4093
  SaasPromotionState,
4030
4094
  )
@@ -4048,7 +4112,9 @@ def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str):
4048
4112
  help="filesystem path to a local app-interface repo",
4049
4113
  default=os.environ.get("APP_INTERFACE_PATH", None),
4050
4114
  )
4051
- def test_change_type(change_type_name: str, role_name: str, app_interface_path: str):
4115
+ def test_change_type(
4116
+ change_type_name: str, role_name: str, app_interface_path: str
4117
+ ) -> None:
4052
4118
  from reconcile.change_owners import tester
4053
4119
 
4054
4120
  # tester.test_change_type(change_type_name, datafile_path)
@@ -4057,7 +4123,7 @@ def test_change_type(change_type_name: str, role_name: str, app_interface_path:
4057
4123
 
4058
4124
  @root.group()
4059
4125
  @click.pass_context
4060
- def sso_client(ctx):
4126
+ def sso_client(ctx: click.Context) -> None:
4061
4127
  """SSO client commands"""
4062
4128
 
4063
4129
 
@@ -4093,7 +4159,7 @@ def sso_client(ctx):
4093
4159
  )
4094
4160
  @click.pass_context
4095
4161
  def create(
4096
- ctx,
4162
+ ctx: click.Context,
4097
4163
  client_name: str,
4098
4164
  contact_email: str,
4099
4165
  keycloak_instance_vault_path: str,
@@ -4127,7 +4193,7 @@ def create(
4127
4193
  @sso_client.command()
4128
4194
  @click.argument("sso-client-vault-secret-path", required=True)
4129
4195
  @click.pass_context
4130
- def remove(ctx, sso_client_vault_secret_path: str):
4196
+ def remove(ctx: click.Context, sso_client_vault_secret_path: str) -> None:
4131
4197
  """Remove an existing SSO client"""
4132
4198
  vault_settings = get_app_interface_vault_settings()
4133
4199
  secret_reader = create_secret_reader(use_vault=vault_settings.vault)
@@ -4174,8 +4240,12 @@ def remove(ctx, sso_client_vault_secret_path: str):
4174
4240
  )
4175
4241
  @click.pass_context
4176
4242
  def external_resources(
4177
- ctx, provision_provider: str, provisioner: str, provider: str, identifier: str
4178
- ):
4243
+ ctx: click.Context,
4244
+ provision_provider: str,
4245
+ provisioner: str,
4246
+ provider: str,
4247
+ identifier: str,
4248
+ ) -> None:
4179
4249
  """External resources commands"""
4180
4250
  ctx.obj["provision_provider"] = provision_provider
4181
4251
  ctx.obj["provisioner"] = provisioner
@@ -4187,7 +4257,7 @@ def external_resources(
4187
4257
 
4188
4258
  @external_resources.command()
4189
4259
  @click.pass_context
4190
- def get_input(ctx):
4260
+ def get_input(ctx: click.Context) -> None:
4191
4261
  """Gets the input data for an external resource asset. Input data is what is used
4192
4262
  in the Reconciliation Job to manage the resource."""
4193
4263
  erv2cli = Erv2Cli(
@@ -4202,7 +4272,7 @@ def get_input(ctx):
4202
4272
 
4203
4273
  @external_resources.command()
4204
4274
  @click.pass_context
4205
- def request_reconciliation(ctx):
4275
+ def request_reconciliation(ctx: click.Context) -> None:
4206
4276
  """Marks a resource as it needs to get reconciled. The itegration will reconcile the resource at
4207
4277
  its next iteration."""
4208
4278
  erv2cli = Erv2Cli(
@@ -4229,7 +4299,7 @@ def request_reconciliation(ctx):
4229
4299
  default=False,
4230
4300
  )
4231
4301
  @click.pass_context
4232
- def migrate(ctx, dry_run: bool, skip_build: bool) -> None:
4302
+ def migrate(ctx: click.Context, dry_run: bool, skip_build: bool) -> None:
4233
4303
  """Migrate an existing external resource managed by terraform-resources to ERv2.
4234
4304
 
4235
4305
 
@@ -4335,7 +4405,7 @@ def migrate(ctx, dry_run: bool, skip_build: bool) -> None:
4335
4405
  @external_resources.command()
4336
4406
  @binary(["docker"])
4337
4407
  @click.pass_context
4338
- def debug_shell(ctx) -> None:
4408
+ def debug_shell(ctx: click.Context) -> None:
4339
4409
  """Enter an ERv2 debug shell to manually migrate resources."""
4340
4410
  # use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
4341
4411
  with tempfile.TemporaryDirectory(dir=Path.home(), prefix="erv2-debug.") as _tempdir:
@@ -4374,7 +4444,7 @@ def debug_shell(ctx) -> None:
4374
4444
  prompt=True,
4375
4445
  )
4376
4446
  @click.pass_context
4377
- def force_unlock(ctx, lock_id: str) -> None:
4447
+ def force_unlock(ctx: click.Context, lock_id: str) -> None:
4378
4448
  """Manually unlock the ERv2 terraform state."""
4379
4449
  # use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
4380
4450
  with tempfile.TemporaryDirectory(
@@ -4415,14 +4485,14 @@ def force_unlock(ctx, lock_id: str) -> None:
4415
4485
  @click.option("--include-pattern", help="Only include images that match this pattern")
4416
4486
  @click.pass_context
4417
4487
  def container_images(
4418
- ctx,
4419
- cluster_name,
4420
- namespace_name,
4421
- thread_pool_size,
4422
- use_jump_host,
4423
- exclude_pattern,
4424
- include_pattern,
4425
- ):
4488
+ ctx: click.Context,
4489
+ cluster_name: str,
4490
+ namespace_name: str,
4491
+ thread_pool_size: int,
4492
+ use_jump_host: bool,
4493
+ exclude_pattern: str,
4494
+ include_pattern: str,
4495
+ ) -> None:
4426
4496
  from tools.cli_commands.container_images_report import get_all_pods_images
4427
4497
 
4428
4498
  results = get_all_pods_images(
@@ -4469,7 +4539,7 @@ You can view the source of this Markdown to extract the JSON data.
4469
4539
  @get.command(help="Get all app tekton pipelines providers roles and users")
4470
4540
  @click.argument("app-name")
4471
4541
  @click.pass_context
4472
- def tekton_roles_and_users(ctx, app_name):
4542
+ def tekton_roles_and_users(ctx: click.Context, app_name: str) -> None:
4473
4543
  pp_namespaces = {
4474
4544
  p.namespace.path
4475
4545
  for p in get_tekton_pipeline_providers()
@@ -4496,6 +4566,7 @@ def tekton_roles_and_users(ctx, app_name):
4496
4566
  if not seen:
4497
4567
  seen = True
4498
4568
 
4569
+ users: str | list[str]
4499
4570
  if ctx.obj["options"]["output"] == "table":
4500
4571
  users = ", ".join([u.org_username for u in r.users])
4501
4572
  else:
@@ -4515,7 +4586,7 @@ def tekton_roles_and_users(ctx, app_name):
4515
4586
  )
4516
4587
  @click.argument("aws-account")
4517
4588
  @click.pass_context
4518
- def log_group_usage(ctx, aws_account):
4589
+ def log_group_usage(ctx: click.Context, aws_account: str) -> None:
4519
4590
  accounts = queries.get_aws_accounts(name=aws_account)
4520
4591
  if not accounts:
4521
4592
  print("no aws account found with that name")
@@ -4525,7 +4596,7 @@ def log_group_usage(ctx, aws_account):
4525
4596
  settings = queries.get_app_interface_settings()
4526
4597
  secret_reader = SecretReader(settings=settings)
4527
4598
  columns = ["log_group", "stored_bytes", "retention_days"]
4528
- results = []
4599
+ results: list[dict[str, str | int]] = []
4529
4600
 
4530
4601
  with AWSApi(1, [account], settings, secret_reader) as aws:
4531
4602
  session = aws.get_session(account["name"])