qontract-reconcile 0.10.2.dev50__py3-none-any.whl → 0.10.2.dev51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qontract-reconcile might be problematic. Click here for more details.

tools/qontract_cli.py CHANGED
@@ -10,6 +10,7 @@ import sys
10
10
  import tempfile
11
11
  import textwrap
12
12
  from collections import defaultdict
13
+ from collections.abc import Callable, Mapping
13
14
  from datetime import (
14
15
  UTC,
15
16
  datetime,
@@ -54,7 +55,6 @@ from reconcile.change_owners.bundle import NoOpFileDiffResolver
54
55
  from reconcile.change_owners.change_log_tracking import (
55
56
  BUNDLE_DIFFS_OBJ,
56
57
  ChangeLog,
57
- ChangeLogItem,
58
58
  )
59
59
  from reconcile.change_owners.change_owners import (
60
60
  fetch_change_type_processors,
@@ -81,6 +81,7 @@ from reconcile.gql_definitions.app_sre_tekton_access_revalidation.roles import (
81
81
  from reconcile.gql_definitions.common.app_interface_vault_settings import (
82
82
  AppInterfaceSettingsV1,
83
83
  )
84
+ from reconcile.gql_definitions.common.clusters import ClusterSpecROSAV1
84
85
  from reconcile.gql_definitions.fragments.aus_organization import AUSOCMOrganization
85
86
  from reconcile.gql_definitions.integrations import integrations as integrations_gql
86
87
  from reconcile.gql_definitions.maintenance import maintenances as maintenances_gql
@@ -152,6 +153,7 @@ from reconcile.utils.oc_map import (
152
153
  init_oc_map_from_clusters,
153
154
  )
154
155
  from reconcile.utils.ocm import OCM_PRODUCT_ROSA, OCMMap
156
+ from reconcile.utils.ocm.upgrades import get_upgrade_policies
155
157
  from reconcile.utils.ocm_base_client import init_ocm_base_client
156
158
  from reconcile.utils.output import print_output
157
159
  from reconcile.utils.saasherder.models import TargetSpec
@@ -185,7 +187,7 @@ from tools.sre_checkpoints import (
185
187
  )
186
188
 
187
189
 
188
- def output(function):
190
+ def output(function: Callable) -> Callable:
189
191
  function = click.option(
190
192
  "--output",
191
193
  "-o",
@@ -196,14 +198,14 @@ def output(function):
196
198
  return function
197
199
 
198
200
 
199
- def sort(function):
201
+ def sort(function: Callable) -> Callable:
200
202
  function = click.option(
201
203
  "--sort", "-s", help="sort output", default=True, type=bool
202
204
  )(function)
203
205
  return function
204
206
 
205
207
 
206
- def to_string(function):
208
+ def to_string(function: Callable) -> Callable:
207
209
  function = click.option(
208
210
  "--to-string", help="stringify output", default=False, type=bool
209
211
  )(function)
@@ -213,14 +215,14 @@ def to_string(function):
213
215
  @click.group()
214
216
  @config_file
215
217
  @click.pass_context
216
- def root(ctx, configfile):
218
+ def root(ctx: click.Context, configfile: str) -> None:
217
219
  ctx.ensure_object(dict)
218
220
  config.init_from_toml(configfile)
219
221
  gql.init_from_config()
220
222
 
221
223
 
222
224
  @root.result_callback()
223
- def exit_cli(ctx, configfile):
225
+ def exit_cli(ctx: click.Context, configfile: str) -> None:
224
226
  GqlApiSingleton.close()
225
227
 
226
228
 
@@ -229,7 +231,7 @@ def exit_cli(ctx, configfile):
229
231
  @sort
230
232
  @to_string
231
233
  @click.pass_context
232
- def get(ctx, output, sort, to_string):
234
+ def get(ctx: click.Context, output: str, sort: bool, to_string: bool) -> None:
233
235
  ctx.obj["options"] = {
234
236
  "output": output,
235
237
  "sort": sort,
@@ -240,7 +242,7 @@ def get(ctx, output, sort, to_string):
240
242
  @root.group()
241
243
  @output
242
244
  @click.pass_context
243
- def describe(ctx, output):
245
+ def describe(ctx: click.Context, output: str) -> None:
244
246
  ctx.obj["options"] = {
245
247
  "output": output,
246
248
  }
@@ -248,7 +250,7 @@ def describe(ctx, output):
248
250
 
249
251
  @get.command()
250
252
  @click.pass_context
251
- def settings(ctx):
253
+ def settings(ctx: click.Context) -> None:
252
254
  settings = queries.get_app_interface_settings()
253
255
  columns = ["vault", "kubeBinary", "mergeRequestGateway"]
254
256
  print_output(ctx.obj["options"], [settings], columns)
@@ -257,7 +259,7 @@ def settings(ctx):
257
259
  @get.command()
258
260
  @click.argument("name", default="")
259
261
  @click.pass_context
260
- def aws_accounts(ctx, name):
262
+ def aws_accounts(ctx: click.Context, name: str) -> None:
261
263
  accounts = queries.get_aws_accounts(name=name)
262
264
  if not accounts:
263
265
  print("no aws accounts found")
@@ -269,7 +271,7 @@ def aws_accounts(ctx, name):
269
271
  @get.command()
270
272
  @click.argument("name", default="")
271
273
  @click.pass_context
272
- def clusters(ctx, name):
274
+ def clusters(ctx: click.Context, name: str) -> None:
273
275
  clusters = queries.get_clusters()
274
276
  if name:
275
277
  clusters = [c for c in clusters if c["name"] == name]
@@ -286,7 +288,7 @@ def clusters(ctx, name):
286
288
  @get.command()
287
289
  @click.argument("name", default="")
288
290
  @click.pass_context
289
- def cluster_upgrades(ctx, name):
291
+ def cluster_upgrades(ctx: click.Context, name: str) -> None:
290
292
  settings = queries.get_app_interface_settings()
291
293
 
292
294
  clusters = queries.get_clusters()
@@ -317,12 +319,11 @@ def cluster_upgrades(ctx, name):
317
319
  if data.get("upgradePolicy") == "automatic":
318
320
  data["schedule"] = c["upgradePolicy"]["schedule"]
319
321
  ocm = ocm_map.get(c["name"])
320
- if ocm:
321
- upgrade_policy = ocm.get_upgrade_policies(c["name"])
322
- if upgrade_policy and len(upgrade_policy) > 0:
323
- next_run = upgrade_policy[0].get("next_run")
324
- if next_run:
325
- data["next_run"] = next_run
322
+ upgrade_policy = get_upgrade_policies(ocm.ocm_api, c["spec"]["id"])
323
+ if upgrade_policy and len(upgrade_policy) > 0:
324
+ next_run = upgrade_policy[0].get("next_run")
325
+ if next_run:
326
+ data["next_run"] = next_run
326
327
  else:
327
328
  data["upgradePolicy"] = "manual"
328
329
 
@@ -336,7 +337,7 @@ def cluster_upgrades(ctx, name):
336
337
  @get.command()
337
338
  @environ(["APP_INTERFACE_STATE_BUCKET", "APP_INTERFACE_STATE_BUCKET_ACCOUNT"])
338
339
  @click.pass_context
339
- def version_history(ctx):
340
+ def version_history(ctx: click.Context) -> None:
340
341
  import reconcile.aus.ocm_upgrade_scheduler as ous
341
342
 
342
343
  clusters = aus_clusters_query(query_func=gql.get_api().query).clusters or []
@@ -372,11 +373,11 @@ def version_history(ctx):
372
373
 
373
374
  def get_upgrade_policies_data(
374
375
  org_upgrade_specs: list[OrganizationUpgradeSpec],
375
- md_output,
376
- integration,
377
- workload=None,
378
- show_only_soaking_upgrades=False,
379
- by_workload=False,
376
+ md_output: bool,
377
+ integration: str,
378
+ workload: str | None = None,
379
+ show_only_soaking_upgrades: bool = False,
380
+ by_workload: bool = False,
380
381
  ) -> list:
381
382
  if not org_upgrade_specs:
382
383
  return []
@@ -557,12 +558,12 @@ more than 6 hours will be highlighted.
557
558
  )
558
559
  @click.pass_context
559
560
  def cluster_upgrade_policies(
560
- ctx,
561
- cluster=None,
562
- workload=None,
563
- show_only_soaking_upgrades=False,
564
- by_workload=False,
565
- ):
561
+ ctx: click.Context,
562
+ cluster: str | None = None,
563
+ workload: str | None = None,
564
+ show_only_soaking_upgrades: bool = False,
565
+ by_workload: bool = False,
566
+ ) -> None:
566
567
  print(
567
568
  "https://grafana.app-sre.devshift.net/d/ukLXCSwVz/aus-cluster-upgrade-overview"
568
569
  )
@@ -577,9 +578,7 @@ def inherit_version_data_text(org: AUSOCMOrganization) -> str:
577
578
 
578
579
  @get.command()
579
580
  @click.pass_context
580
- def ocm_fleet_upgrade_policies(
581
- ctx,
582
- ):
581
+ def ocm_fleet_upgrade_policies(ctx: click.Context) -> None:
583
582
  from reconcile.aus.ocm_upgrade_scheduler_org import (
584
583
  OCMClusterUpgradeSchedulerOrgIntegration,
585
584
  )
@@ -612,7 +611,12 @@ def ocm_fleet_upgrade_policies(
612
611
  help="Ignore STS clusters",
613
612
  )
614
613
  @click.pass_context
615
- def aus_fleet_upgrade_policies(ctx, ocm_env, ocm_org_ids, ignore_sts_clusters):
614
+ def aus_fleet_upgrade_policies(
615
+ ctx: click.Context,
616
+ ocm_env: str | None,
617
+ ocm_org_ids: str | None,
618
+ ignore_sts_clusters: bool,
619
+ ) -> None:
616
620
  from reconcile.aus.advanced_upgrade_service import AdvancedUpgradeServiceIntegration
617
621
 
618
622
  parsed_ocm_org_ids = set(ocm_org_ids.split(",")) if ocm_org_ids else None
@@ -629,8 +633,8 @@ def aus_fleet_upgrade_policies(ctx, ocm_env, ocm_org_ids, ignore_sts_clusters):
629
633
 
630
634
 
631
635
  def generate_fleet_upgrade_policices_report(
632
- ctx, aus_integration: AdvancedUpgradeSchedulerBaseIntegration
633
- ):
636
+ ctx: click.Context, aus_integration: AdvancedUpgradeSchedulerBaseIntegration
637
+ ) -> None:
634
638
  md_output = ctx.obj["options"]["output"] == "md"
635
639
 
636
640
  org_upgrade_specs: dict[str, OrganizationUpgradeSpec] = {}
@@ -948,7 +952,7 @@ def upgrade_cluster_addon(
948
952
  )
949
953
 
950
954
 
951
- def has_cluster_account_access(cluster: dict[str, Any]):
955
+ def has_cluster_account_access(cluster: dict[str, Any]) -> bool:
952
956
  spec = cluster.get("spec") or {}
953
957
  account = spec.get("account")
954
958
  return account or cluster.get("awsInfrastructureManagementAccounts") is not None
@@ -957,7 +961,7 @@ def has_cluster_account_access(cluster: dict[str, Any]):
957
961
  @get.command()
958
962
  @click.argument("name", default="")
959
963
  @click.pass_context
960
- def clusters_network(ctx, name):
964
+ def clusters_network(ctx: click.Context, name: str) -> None:
961
965
  settings = queries.get_app_interface_settings()
962
966
  clusters = [
963
967
  c
@@ -1019,7 +1023,7 @@ def clusters_network(ctx, name):
1019
1023
 
1020
1024
  @get.command()
1021
1025
  @click.pass_context
1022
- def network_reservations(ctx) -> None:
1026
+ def network_reservations(ctx: click.Context) -> None:
1023
1027
  from reconcile.typed_queries.reserved_networks import get_networks
1024
1028
 
1025
1029
  columns = [
@@ -1032,11 +1036,10 @@ def network_reservations(ctx) -> None:
1032
1036
  ]
1033
1037
  network_table = []
1034
1038
 
1035
- def md_link(url) -> str:
1039
+ def md_link(url: str) -> str:
1036
1040
  if ctx.obj["options"]["output"] == "md":
1037
1041
  return f"[{url}]({url})"
1038
- else:
1039
- return url
1042
+ return url
1040
1043
 
1041
1044
  for network in get_networks():
1042
1045
  parentAddress = "none"
@@ -1077,7 +1080,7 @@ def network_reservations(ctx) -> None:
1077
1080
  default=24,
1078
1081
  )
1079
1082
  @click.pass_context
1080
- def cidr_blocks(ctx, for_cluster: int, mask: int) -> None:
1083
+ def cidr_blocks(ctx: click.Context, for_cluster: int, mask: int) -> None:
1081
1084
  import ipaddress
1082
1085
 
1083
1086
  from reconcile.typed_queries.aws_vpcs import get_aws_vpcs
@@ -1203,7 +1206,7 @@ def ocm_aws_infrastructure_access_switch_role_links_data() -> list[dict]:
1203
1206
 
1204
1207
  @get.command()
1205
1208
  @click.pass_context
1206
- def ocm_aws_infrastructure_access_switch_role_links_flat(ctx):
1209
+ def ocm_aws_infrastructure_access_switch_role_links_flat(ctx: click.Context) -> None:
1207
1210
  results = ocm_aws_infrastructure_access_switch_role_links_data()
1208
1211
  columns = ["cluster", "user_arn", "access_level", "switch_role_link"]
1209
1212
  print_output(ctx.obj["options"], results, columns)
@@ -1211,11 +1214,11 @@ def ocm_aws_infrastructure_access_switch_role_links_flat(ctx):
1211
1214
 
1212
1215
  @get.command()
1213
1216
  @click.pass_context
1214
- def ocm_aws_infrastructure_access_switch_role_links(ctx):
1217
+ def ocm_aws_infrastructure_access_switch_role_links(ctx: click.Context) -> None:
1215
1218
  if ctx.obj["options"]["output"] != "md":
1216
1219
  raise Exception(f"Unupported output: {ctx.obj['options']['output']}")
1217
1220
  results = ocm_aws_infrastructure_access_switch_role_links_data()
1218
- by_user = {}
1221
+ by_user: dict = {}
1219
1222
  for r in results:
1220
1223
  by_user.setdefault(r["user"], []).append(r)
1221
1224
  columns = ["cluster", "source_login", "access_level", "switch_role_link"]
@@ -1229,7 +1232,7 @@ def ocm_aws_infrastructure_access_switch_role_links(ctx):
1229
1232
 
1230
1233
  @get.command()
1231
1234
  @click.pass_context
1232
- def clusters_aws_account_ids(ctx):
1235
+ def clusters_aws_account_ids(ctx: click.Context) -> None:
1233
1236
  settings = queries.get_app_interface_settings()
1234
1237
  clusters = [c for c in queries.get_clusters() if c.get("ocm") is not None]
1235
1238
  ocm_map = OCMMap(clusters=clusters, settings=settings)
@@ -1259,7 +1262,7 @@ def clusters_aws_account_ids(ctx):
1259
1262
  @root.command()
1260
1263
  @click.argument("account_name")
1261
1264
  @click.pass_context
1262
- def user_credentials_migrate_output(ctx, account_name) -> None:
1265
+ def user_credentials_migrate_output(ctx: click.Context, account_name: str) -> None:
1263
1266
  accounts = queries.get_state_aws_accounts()
1264
1267
  state = init_state(integration="account-notifier")
1265
1268
  skip_accounts, appsre_pgp_key, _ = tfu.get_reencrypt_settings()
@@ -1301,7 +1304,7 @@ def user_credentials_migrate_output(ctx, account_name) -> None:
1301
1304
 
1302
1305
  @get.command()
1303
1306
  @click.pass_context
1304
- def aws_route53_zones(ctx):
1307
+ def aws_route53_zones(ctx: click.Context) -> None:
1305
1308
  zones = queries.get_dns_zones()
1306
1309
 
1307
1310
  results = []
@@ -1324,7 +1327,7 @@ def aws_route53_zones(ctx):
1324
1327
  @click.argument("cluster_name")
1325
1328
  @click.option("--cluster-admin/--no-cluster-admin", default=False)
1326
1329
  @click.pass_context
1327
- def bot_login(ctx, cluster_name, cluster_admin):
1330
+ def bot_login(ctx: click.Context, cluster_name: str, cluster_admin: bool) -> None:
1328
1331
  settings = queries.get_app_interface_settings()
1329
1332
  secret_reader = SecretReader(settings=settings)
1330
1333
  clusters = queries.get_clusters()
@@ -1347,7 +1350,7 @@ def bot_login(ctx, cluster_name, cluster_admin):
1347
1350
  )
1348
1351
  @click.argument("org_name")
1349
1352
  @click.pass_context
1350
- def ocm_login(ctx, org_name):
1353
+ def ocm_login(ctx: click.Context, org_name: str) -> None:
1351
1354
  settings = queries.get_app_interface_settings()
1352
1355
  secret_reader = SecretReader(settings=settings)
1353
1356
  ocms = [
@@ -1374,7 +1377,7 @@ def ocm_login(ctx, org_name):
1374
1377
  )
1375
1378
  @click.argument("account_name")
1376
1379
  @click.pass_context
1377
- def aws_creds(ctx, account_name):
1380
+ def aws_creds(ctx: click.Context, account_name: str) -> None:
1378
1381
  settings = queries.get_app_interface_settings()
1379
1382
  secret_reader = SecretReader(settings=settings)
1380
1383
  accounts = queries.get_aws_accounts(name=account_name)
@@ -1417,8 +1420,14 @@ def aws_creds(ctx, account_name):
1417
1420
  )
1418
1421
  @click.pass_context
1419
1422
  def copy_tfstate(
1420
- ctx, source_bucket, source_object_path, account_uid, rename, region, force
1421
- ):
1423
+ ctx: click.Context,
1424
+ source_bucket: str,
1425
+ source_object_path: str,
1426
+ account_uid: str,
1427
+ rename: str | None,
1428
+ region: str | None,
1429
+ force: bool,
1430
+ ) -> None:
1422
1431
  settings = queries.get_app_interface_settings()
1423
1432
  secret_reader = SecretReader(settings=settings)
1424
1433
  accounts = queries.get_aws_accounts(uid=account_uid, terraform_state=True)
@@ -1439,7 +1448,6 @@ def copy_tfstate(
1439
1448
  )
1440
1449
  return
1441
1450
 
1442
- dest_filename = ""
1443
1451
  if rename:
1444
1452
  dest_filename = rename.removesuffix(".tfstate")
1445
1453
  else:
@@ -1497,20 +1505,26 @@ def copy_tfstate(
1497
1505
  @get.command(short_help='obtain "rosa create cluster" command by cluster name')
1498
1506
  @click.argument("cluster_name")
1499
1507
  @click.pass_context
1500
- def rosa_create_cluster_command(ctx, cluster_name):
1508
+ def rosa_create_cluster_command(ctx: click.Context, cluster_name: str) -> None:
1501
1509
  clusters = [c for c in get_clusters() if c.name == cluster_name]
1502
- try:
1503
- cluster = clusters[0]
1504
- except IndexError:
1510
+ if not clusters:
1505
1511
  print(f"{cluster_name} not found.")
1506
1512
  sys.exit(1)
1513
+ cluster = clusters[0]
1507
1514
 
1508
- if cluster.spec.product != OCM_PRODUCT_ROSA:
1515
+ if (
1516
+ not cluster.spec
1517
+ or cluster.spec.product != OCM_PRODUCT_ROSA
1518
+ or not isinstance(cluster.spec, ClusterSpecROSAV1)
1519
+ ):
1509
1520
  print("must be a rosa cluster.")
1510
1521
  sys.exit(1)
1511
1522
 
1512
1523
  settings = queries.get_app_interface_settings()
1513
1524
  account = cluster.spec.account
1525
+ if not account:
1526
+ print("account not found.")
1527
+ sys.exit(1)
1514
1528
 
1515
1529
  if account.billing_account:
1516
1530
  billing_account = account.billing_account.uid
@@ -1520,6 +1534,19 @@ def rosa_create_cluster_command(ctx, cluster_name):
1520
1534
  ) as aws_api:
1521
1535
  billing_account = aws_api.get_organization_billing_account(account.name)
1522
1536
 
1537
+ if not cluster.spec.oidc_endpoint_url:
1538
+ print("oidc_endpoint_url not set.")
1539
+ sys.exit(1)
1540
+ if not cluster.spec.subnet_ids:
1541
+ print("subnet_ids not set.")
1542
+ sys.exit(1)
1543
+ if not cluster.network:
1544
+ print("network not set.")
1545
+ sys.exit(1)
1546
+ if not cluster.machine_pools:
1547
+ print("machine_pools not set.")
1548
+ sys.exit(1)
1549
+
1523
1550
  print(
1524
1551
  " ".join([
1525
1552
  "rosa create cluster",
@@ -1573,7 +1600,9 @@ def rosa_create_cluster_command(ctx, cluster_name):
1573
1600
  @click.argument("jumphost_hostname", required=False)
1574
1601
  @click.argument("cluster_name", required=False)
1575
1602
  @click.pass_context
1576
- def sshuttle_command(ctx, jumphost_hostname: str | None, cluster_name: str | None):
1603
+ def sshuttle_command(
1604
+ ctx: click.Context, jumphost_hostname: str | None, cluster_name: str | None
1605
+ ) -> None:
1577
1606
  jumphosts_query_data = queries.get_jumphosts(hostname=jumphost_hostname)
1578
1607
  jumphosts = jumphosts_query_data.jumphosts or []
1579
1608
  for jh in jumphosts:
@@ -1595,7 +1624,9 @@ def sshuttle_command(ctx, jumphost_hostname: str | None, cluster_name: str | Non
1595
1624
  @click.argument("instance_name")
1596
1625
  @click.argument("job_name")
1597
1626
  @click.pass_context
1598
- def jenkins_job_vault_secrets(ctx, instance_name: str, job_name: str) -> None:
1627
+ def jenkins_job_vault_secrets(
1628
+ ctx: click.Context, instance_name: str, job_name: str
1629
+ ) -> None:
1599
1630
  secret_reader = SecretReader(queries.get_secret_reader_settings())
1600
1631
  jjb: JJB = init_jjb(secret_reader, instance_name, config_name=None, print_only=True)
1601
1632
  jobs = jjb.get_all_jobs([job_name], instance_name)[instance_name]
@@ -1620,7 +1651,7 @@ def jenkins_job_vault_secrets(ctx, instance_name: str, job_name: str) -> None:
1620
1651
  @get.command()
1621
1652
  @click.argument("name", default="")
1622
1653
  @click.pass_context
1623
- def namespaces(ctx, name):
1654
+ def namespaces(ctx: click.Context, name: str) -> None:
1624
1655
  namespaces = queries.get_namespaces()
1625
1656
  if name:
1626
1657
  namespaces = [ns for ns in namespaces if ns["name"] == name]
@@ -1632,7 +1663,7 @@ def namespaces(ctx, name):
1632
1663
  print_output(ctx.obj["options"], namespaces, columns)
1633
1664
 
1634
1665
 
1635
- def add_resource(item, resource, columns):
1666
+ def add_resource(item: dict, resource: Mapping, columns: list[str]) -> None:
1636
1667
  provider = resource["provider"]
1637
1668
  if provider not in columns:
1638
1669
  columns.append(provider)
@@ -1643,11 +1674,11 @@ def add_resource(item, resource, columns):
1643
1674
 
1644
1675
  @get.command
1645
1676
  @click.pass_context
1646
- def cluster_openshift_resources(ctx):
1677
+ def cluster_openshift_resources(ctx: click.Context) -> None:
1647
1678
  gqlapi = gql.get_api()
1648
1679
  namespaces = gqlapi.query(orb.NAMESPACES_QUERY)["namespaces"]
1649
1680
  columns = ["name", "total"]
1650
- results = {}
1681
+ results: dict = {}
1651
1682
  for ns_info in namespaces:
1652
1683
  cluster_name = ns_info["cluster"]["name"]
1653
1684
  item = {"name": cluster_name, "total": 0}
@@ -1668,10 +1699,10 @@ def cluster_openshift_resources(ctx):
1668
1699
 
1669
1700
  @get.command
1670
1701
  @click.pass_context
1671
- def aws_terraform_resources(ctx):
1702
+ def aws_terraform_resources(ctx: click.Context) -> None:
1672
1703
  namespaces = tfr.get_namespaces()
1673
1704
  columns = ["name", "total"]
1674
- results = {}
1705
+ results: dict = {}
1675
1706
  for ns_info in namespaces:
1676
1707
  specs = (
1677
1708
  get_external_resource_specs(
@@ -1723,7 +1754,7 @@ def rds_region(
1723
1754
 
1724
1755
  @get.command
1725
1756
  @click.pass_context
1726
- def rds(ctx):
1757
+ def rds(ctx: click.Context) -> None:
1727
1758
  namespaces = tfr.get_namespaces()
1728
1759
  accounts = {a["name"]: a for a in queries.get_aws_accounts()}
1729
1760
  results = []
@@ -1801,7 +1832,7 @@ You can view the source of this Markdown to extract the JSON data.
1801
1832
 
1802
1833
  @get.command
1803
1834
  @click.pass_context
1804
- def rds_recommendations(ctx):
1835
+ def rds_recommendations(ctx: click.Context) -> None:
1805
1836
  IGNORED_STATUSES = ("resolved",)
1806
1837
  IGNORED_SEVERITIES = ("informational",)
1807
1838
 
@@ -1880,7 +1911,7 @@ def rds_recommendations(ctx):
1880
1911
 
1881
1912
  @get.command()
1882
1913
  @click.pass_context
1883
- def products(ctx):
1914
+ def products(ctx: click.Context) -> None:
1884
1915
  products = queries.get_products()
1885
1916
  columns = ["name", "description"]
1886
1917
  print_output(ctx.obj["options"], products, columns)
@@ -1889,7 +1920,7 @@ def products(ctx):
1889
1920
  @describe.command()
1890
1921
  @click.argument("name")
1891
1922
  @click.pass_context
1892
- def product(ctx, name):
1923
+ def product(ctx: click.Context, name: str) -> None:
1893
1924
  products = queries.get_products()
1894
1925
  products = [p for p in products if p["name"].lower() == name.lower()]
1895
1926
  if len(products) != 1:
@@ -1904,7 +1935,7 @@ def product(ctx, name):
1904
1935
 
1905
1936
  @get.command()
1906
1937
  @click.pass_context
1907
- def environments(ctx):
1938
+ def environments(ctx: click.Context) -> None:
1908
1939
  environments = queries.get_environments()
1909
1940
  columns = ["name", "description", "product.name"]
1910
1941
  # TODO(mafriedm): fix this
@@ -1916,7 +1947,7 @@ def environments(ctx):
1916
1947
  @describe.command()
1917
1948
  @click.argument("name")
1918
1949
  @click.pass_context
1919
- def environment(ctx, name):
1950
+ def environment(ctx: click.Context, name: str) -> None:
1920
1951
  environments = queries.get_environments()
1921
1952
  environments = [e for e in environments if e["name"].lower() == name.lower()]
1922
1953
  if len(environments) != 1:
@@ -1934,7 +1965,7 @@ def environment(ctx, name):
1934
1965
 
1935
1966
  @get.command()
1936
1967
  @click.pass_context
1937
- def services(ctx):
1968
+ def services(ctx: click.Context) -> None:
1938
1969
  apps = queries.get_apps()
1939
1970
  columns = ["name", "path", "onboardingStatus"]
1940
1971
  print_output(ctx.obj["options"], apps, columns)
@@ -1942,17 +1973,15 @@ def services(ctx):
1942
1973
 
1943
1974
  @get.command()
1944
1975
  @click.pass_context
1945
- def repos(ctx):
1976
+ def repos(ctx: click.Context) -> None:
1946
1977
  repos = queries.get_repos()
1947
- repos = [{"url": r} for r in repos]
1948
- columns = ["url"]
1949
- print_output(ctx.obj["options"], repos, columns)
1978
+ print_output(ctx.obj["options"], [{"url": r} for r in repos], ["url"])
1950
1979
 
1951
1980
 
1952
1981
  @get.command()
1953
1982
  @click.argument("org_username")
1954
1983
  @click.pass_context
1955
- def roles(ctx, org_username):
1984
+ def roles(ctx: click.Context, org_username: str) -> None:
1956
1985
  users = queries.get_roles()
1957
1986
  users = [u for u in users if u["org_username"] == org_username]
1958
1987
 
@@ -1963,7 +1992,7 @@ def roles(ctx, org_username):
1963
1992
  user = users[0]
1964
1993
 
1965
1994
  # type, name, resource, [ref]
1966
- roles: dict[(str, str, str), set] = defaultdict(set)
1995
+ roles: dict[tuple[str, str, str], set[str]] = defaultdict(set)
1967
1996
 
1968
1997
  for role in user["roles"]:
1969
1998
  role_name = role["path"]
@@ -2017,7 +2046,7 @@ def roles(ctx, org_username):
2017
2046
  @get.command()
2018
2047
  @click.argument("org_username", default="")
2019
2048
  @click.pass_context
2020
- def users(ctx, org_username):
2049
+ def users(ctx: click.Context, org_username: str) -> None:
2021
2050
  users = queries.get_users()
2022
2051
  if org_username:
2023
2052
  users = [u for u in users if u["org_username"] == org_username]
@@ -2028,7 +2057,7 @@ def users(ctx, org_username):
2028
2057
 
2029
2058
  @get.command()
2030
2059
  @click.pass_context
2031
- def integrations(ctx):
2060
+ def integrations(ctx: click.Context) -> None:
2032
2061
  environments = queries.get_integrations()
2033
2062
  columns = ["name", "description"]
2034
2063
  print_output(ctx.obj["options"], environments, columns)
@@ -2036,7 +2065,7 @@ def integrations(ctx):
2036
2065
 
2037
2066
  @get.command()
2038
2067
  @click.pass_context
2039
- def quay_mirrors(ctx):
2068
+ def quay_mirrors(ctx: click.Context) -> None:
2040
2069
  apps = queries.get_quay_repos()
2041
2070
 
2042
2071
  mirrors = []
@@ -2074,7 +2103,9 @@ def quay_mirrors(ctx):
2074
2103
  @click.argument("kind")
2075
2104
  @click.argument("name")
2076
2105
  @click.pass_context
2077
- def root_owner(ctx, cluster, namespace, kind, name):
2106
+ def root_owner(
2107
+ ctx: click.Context, cluster: str, namespace: str, kind: str, name: str
2108
+ ) -> None:
2078
2109
  settings = queries.get_app_interface_settings()
2079
2110
  clusters = [c for c in queries.get_clusters(minimal=True) if c["name"] == cluster]
2080
2111
  oc_map = OC_Map(
@@ -2104,7 +2135,9 @@ def root_owner(ctx, cluster, namespace, kind, name):
2104
2135
  @click.argument("aws_account")
2105
2136
  @click.argument("identifier")
2106
2137
  @click.pass_context
2107
- def service_owners_for_rds_instance(ctx, aws_account, identifier):
2138
+ def service_owners_for_rds_instance(
2139
+ ctx: click.Context, aws_account: str, identifier: str
2140
+ ) -> None:
2108
2141
  namespaces = queries.get_namespaces()
2109
2142
  service_owners = []
2110
2143
  for namespace_info in namespaces:
@@ -2126,7 +2159,7 @@ def service_owners_for_rds_instance(ctx, aws_account, identifier):
2126
2159
 
2127
2160
  @get.command()
2128
2161
  @click.pass_context
2129
- def sre_checkpoints(ctx):
2162
+ def sre_checkpoints(ctx: click.Context) -> None:
2130
2163
  apps = queries.get_apps()
2131
2164
 
2132
2165
  parent_apps = {app["parentApp"]["path"] for app in apps if app.get("parentApp")}
@@ -2150,13 +2183,14 @@ def sre_checkpoints(ctx):
2150
2183
 
2151
2184
  @get.command()
2152
2185
  @click.pass_context
2153
- def app_interface_merge_queue(ctx):
2186
+ def app_interface_merge_queue(ctx: click.Context) -> None:
2154
2187
  import reconcile.gitlab_housekeeping as glhk
2155
2188
 
2156
2189
  settings = queries.get_app_interface_settings()
2157
2190
  instance = queries.get_gitlab_instance()
2158
2191
  gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
2159
- merge_requests = glhk.get_merge_requests(True, gl, state=None)
2192
+ state = init_state(integration=glhk.QONTRACT_INTEGRATION)
2193
+ merge_requests = glhk.get_merge_requests(True, gl, state=state)
2160
2194
 
2161
2195
  columns = [
2162
2196
  "id",
@@ -2191,7 +2225,7 @@ def app_interface_merge_queue(ctx):
2191
2225
 
2192
2226
  @get.command()
2193
2227
  @click.pass_context
2194
- def app_interface_review_queue(ctx) -> None:
2228
+ def app_interface_review_queue(ctx: click.Context) -> None:
2195
2229
  import reconcile.gitlab_housekeeping as glhk
2196
2230
 
2197
2231
  settings = queries.get_app_interface_settings()
@@ -2208,7 +2242,7 @@ def app_interface_review_queue(ctx) -> None:
2208
2242
  "labels",
2209
2243
  ]
2210
2244
 
2211
- def get_mrs(repo, url) -> list[dict[str, str]]:
2245
+ def get_mrs(repo: str, url: str) -> list[dict[str, str]]:
2212
2246
  gl = GitLabApi(instance, project_url=url, settings=settings)
2213
2247
  merge_requests = gl.get_merge_requests(state=MRState.OPENED)
2214
2248
  try:
@@ -2303,7 +2337,7 @@ def app_interface_review_queue(ctx) -> None:
2303
2337
 
2304
2338
  @get.command()
2305
2339
  @click.pass_context
2306
- def app_interface_open_selfserviceable_mr_queue(ctx):
2340
+ def app_interface_open_selfserviceable_mr_queue(ctx: click.Context) -> None:
2307
2341
  settings = queries.get_app_interface_settings()
2308
2342
  instance = queries.get_gitlab_instance()
2309
2343
  gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
@@ -2366,7 +2400,7 @@ def app_interface_open_selfserviceable_mr_queue(ctx):
2366
2400
 
2367
2401
  @get.command()
2368
2402
  @click.pass_context
2369
- def change_types(ctx) -> None:
2403
+ def change_types(ctx: click.Context) -> None:
2370
2404
  """List all change types."""
2371
2405
  change_types = fetch_change_type_processors(gql.get_api(), NoOpFileDiffResolver())
2372
2406
 
@@ -2391,7 +2425,7 @@ def change_types(ctx) -> None:
2391
2425
 
2392
2426
  @get.command()
2393
2427
  @click.pass_context
2394
- def app_interface_merge_history(ctx):
2428
+ def app_interface_merge_history(ctx: click.Context) -> None:
2395
2429
  settings = queries.get_app_interface_settings()
2396
2430
  instance = queries.get_gitlab_instance()
2397
2431
  gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
@@ -2428,7 +2462,7 @@ def app_interface_merge_history(ctx):
2428
2462
  )
2429
2463
  @use_jump_host()
2430
2464
  @click.pass_context
2431
- def selectorsyncset_managed_resources(ctx, use_jump_host):
2465
+ def selectorsyncset_managed_resources(ctx: click.Context, use_jump_host: bool) -> None:
2432
2466
  vault_settings = get_app_interface_vault_settings()
2433
2467
  secret_reader = create_secret_reader(use_vault=vault_settings.vault)
2434
2468
  clusters = get_clusters()
@@ -2486,7 +2520,9 @@ def selectorsyncset_managed_resources(ctx, use_jump_host):
2486
2520
  )
2487
2521
  @use_jump_host()
2488
2522
  @click.pass_context
2489
- def selectorsyncset_managed_hypershift_resources(ctx, use_jump_host):
2523
+ def selectorsyncset_managed_hypershift_resources(
2524
+ ctx: click.Context, use_jump_host: bool
2525
+ ) -> None:
2490
2526
  vault_settings = get_app_interface_vault_settings()
2491
2527
  secret_reader = create_secret_reader(use_vault=vault_settings.vault)
2492
2528
  clusters = get_clusters()
@@ -2564,7 +2600,12 @@ def selectorsyncset_managed_hypershift_resources(ctx, use_jump_host):
2564
2600
  default=os.environ.get("QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_REGION", "us-east-1"),
2565
2601
  )
2566
2602
  @click.pass_context
2567
- def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_region):
2603
+ def ec2_jenkins_workers(
2604
+ ctx: click.Context,
2605
+ aws_access_key_id: str,
2606
+ aws_secret_access_key: str,
2607
+ aws_region: str,
2608
+ ) -> None:
2568
2609
  """Prints a list of jenkins workers and their status."""
2569
2610
  if not aws_access_key_id or not aws_secret_access_key:
2570
2611
  raise click.ClickException(
@@ -2611,9 +2652,9 @@ def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_regio
2611
2652
  url = ""
2612
2653
  for t in instance.tags:
2613
2654
  if t.get("Key") == "os":
2614
- os = t.get("Value")
2655
+ os = t["Value"]
2615
2656
  if t.get("Key") == "jenkins_controller":
2616
- url = f"https://{t.get('Value').replace('-', '.')}.devshift.net/computer/{instance.id}"
2657
+ url = f"https://{t['Value'].replace('-', '.')}.devshift.net/computer/{instance.id}"
2617
2658
  image = ec2.Image(instance.image_id)
2618
2659
  commit_url = ""
2619
2660
  for t in image.tags:
@@ -2640,7 +2681,7 @@ def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_regio
2640
2681
  @get.command()
2641
2682
  @click.argument("status-board-instance")
2642
2683
  @click.pass_context
2643
- def slo_document_services(ctx, status_board_instance):
2684
+ def slo_document_services(ctx: click.Context, status_board_instance: str) -> None:
2644
2685
  """Print SLO Documents Services"""
2645
2686
  columns = [
2646
2687
  "slo_doc_name",
@@ -2669,7 +2710,7 @@ def slo_document_services(ctx, status_board_instance):
2669
2710
  slodocs = []
2670
2711
  for slodoc in get_slo_documents():
2671
2712
  products = [ns.namespace.environment.product.name for ns in slodoc.namespaces]
2672
- for slo in slodoc.slos:
2713
+ for slo in slodoc.slos or []:
2673
2714
  for product in products:
2674
2715
  if slodoc.app.parent_app:
2675
2716
  app = f"{slodoc.app.parent_app.name}-{slodoc.app.name}"
@@ -2695,7 +2736,7 @@ def slo_document_services(ctx, status_board_instance):
2695
2736
  "target_unit": slo.slo_target_unit,
2696
2737
  "window": slo.slo_parameters.window,
2697
2738
  "statusBoardService": f"{product}/{slodoc.app.name}/{slo.name}",
2698
- "statusBoardEnabled": "statusBoard" in slodoc.labels,
2739
+ "statusBoardEnabled": "statusBoard" in (slodoc.labels or {}),
2699
2740
  }
2700
2741
  slodocs.append(item)
2701
2742
 
@@ -2705,7 +2746,7 @@ def slo_document_services(ctx, status_board_instance):
2705
2746
  @get.command()
2706
2747
  @click.argument("file_path")
2707
2748
  @click.pass_context
2708
- def alerts(ctx, file_path):
2749
+ def alerts(ctx: click.Context, file_path: str) -> None:
2709
2750
  BIG_NUMBER = 10
2710
2751
 
2711
2752
  def sort_by_threshold(item: dict[str, str]) -> int:
@@ -2779,7 +2820,7 @@ def alerts(ctx, file_path):
2779
2820
  @get.command()
2780
2821
  @click.pass_context
2781
2822
  @thread_pool_size(default=5)
2782
- def aws_cost_report(ctx, thread_pool_size):
2823
+ def aws_cost_report(ctx: click.Context, thread_pool_size: int) -> None:
2783
2824
  command = AwsCostReportCommand.create(thread_pool_size=thread_pool_size)
2784
2825
  print(command.execute())
2785
2826
 
@@ -2787,7 +2828,7 @@ def aws_cost_report(ctx, thread_pool_size):
2787
2828
  @get.command()
2788
2829
  @click.pass_context
2789
2830
  @thread_pool_size(default=5)
2790
- def openshift_cost_report(ctx, thread_pool_size):
2831
+ def openshift_cost_report(ctx: click.Context, thread_pool_size: int) -> None:
2791
2832
  command = OpenShiftCostReportCommand.create(thread_pool_size=thread_pool_size)
2792
2833
  print(command.execute())
2793
2834
 
@@ -2795,7 +2836,9 @@ def openshift_cost_report(ctx, thread_pool_size):
2795
2836
  @get.command()
2796
2837
  @click.pass_context
2797
2838
  @thread_pool_size(default=5)
2798
- def openshift_cost_optimization_report(ctx, thread_pool_size):
2839
+ def openshift_cost_optimization_report(
2840
+ ctx: click.Context, thread_pool_size: int
2841
+ ) -> None:
2799
2842
  command = OpenShiftCostOptimizationReportCommand.create(
2800
2843
  thread_pool_size=thread_pool_size
2801
2844
  )
@@ -2804,7 +2847,7 @@ def openshift_cost_optimization_report(ctx, thread_pool_size):
2804
2847
 
2805
2848
  @get.command()
2806
2849
  @click.pass_context
2807
- def osd_component_versions(ctx):
2850
+ def osd_component_versions(ctx: click.Context) -> None:
2808
2851
  osd_environments = [
2809
2852
  e["name"] for e in queries.get_environments() if e["product"]["name"] == "OSDv4"
2810
2853
  ]
@@ -2840,7 +2883,7 @@ def osd_component_versions(ctx):
2840
2883
 
2841
2884
  @get.command()
2842
2885
  @click.pass_context
2843
- def maintenances(ctx):
2886
+ def maintenances(ctx: click.Context) -> None:
2844
2887
  now = datetime.now(UTC)
2845
2888
  maintenances = maintenances_gql.query(gql.get_api().query).maintenances or []
2846
2889
  data = [
@@ -2903,7 +2946,7 @@ class MigrationStatusCount:
2903
2946
 
2904
2947
  @get.command()
2905
2948
  @click.pass_context
2906
- def hcp_migration_status(ctx):
2949
+ def hcp_migration_status(ctx: click.Context) -> None:
2907
2950
  counts: dict[str, MigrationStatusCount] = {}
2908
2951
  total_count = MigrationStatusCount("total")
2909
2952
  saas_files = get_saas_files()
@@ -2924,6 +2967,7 @@ def hcp_migration_status(ctx):
2924
2967
  continue
2925
2968
  if t.delete:
2926
2969
  continue
2970
+ assert t.namespace.cluster.labels
2927
2971
  if hcp_migration := t.namespace.cluster.labels.get("hcp_migration"):
2928
2972
  app = sf.app.parent_app.name if sf.app.parent_app else sf.app.name
2929
2973
  counts.setdefault(app, MigrationStatusCount(app))
@@ -2940,7 +2984,7 @@ def hcp_migration_status(ctx):
2940
2984
 
2941
2985
  @get.command()
2942
2986
  @click.pass_context
2943
- def systems_and_tools(ctx):
2987
+ def systems_and_tools(ctx: click.Context) -> None:
2944
2988
  print(
2945
2989
  f"This report is obtained from app-interface Graphql endpoint available at: {config.get_config()['graphql']['server']}"
2946
2990
  )
@@ -2954,7 +2998,7 @@ def systems_and_tools(ctx):
2954
2998
  "--environment_name", default="production", help="environment to get logs from"
2955
2999
  )
2956
3000
  @click.pass_context
2957
- def logs(ctx, integration_name: str, environment_name: str):
3001
+ def logs(ctx: click.Context, integration_name: str, environment_name: str) -> None:
2958
3002
  integrations = [
2959
3003
  i
2960
3004
  for i in integrations_gql.query(query_func=gql.get_api().query).integrations
@@ -2993,7 +3037,7 @@ def logs(ctx, integration_name: str, environment_name: str):
2993
3037
 
2994
3038
  @get.command
2995
3039
  @click.pass_context
2996
- def jenkins_jobs(ctx):
3040
+ def jenkins_jobs(ctx: click.Context) -> None:
2997
3041
  jenkins_configs = queries.get_jenkins_configs()
2998
3042
 
2999
3043
  # stats dicts
@@ -3063,9 +3107,9 @@ You can view the source of this Markdown to extract the JSON data.
3063
3107
 
3064
3108
  @get.command
3065
3109
  @click.pass_context
3066
- def container_image_details(ctx):
3110
+ def container_image_details(ctx: click.Context) -> None:
3067
3111
  apps = get_apps_quay_repos_escalation_policies()
3068
- data: list[dict[str, str]] = []
3112
+ data: list[dict[str, str | list[str]]] = []
3069
3113
  for app in apps:
3070
3114
  app_name = f"{app.parent_app.name}/{app.name}" if app.parent_app else app.name
3071
3115
  ep_channels = app.escalation_policy.channels
@@ -3077,7 +3121,7 @@ def container_image_details(ctx):
3077
3121
  if repo.mirror:
3078
3122
  continue
3079
3123
  repository = f"quay.io/{org_name}/{repo.name}"
3080
- item = {
3124
+ item: dict[str, str | list[str]] = {
3081
3125
  "app": app_name,
3082
3126
  "repository": repository,
3083
3127
  "email": email,
@@ -3090,27 +3134,25 @@ def container_image_details(ctx):
3090
3134
 
3091
3135
  @get.command
3092
3136
  @click.pass_context
3093
- def change_log_tracking(ctx):
3137
+ def change_log_tracking(ctx: click.Context) -> None:
3094
3138
  repo_url = get_app_interface_repo_url()
3095
3139
  change_types = fetch_change_type_processors(gql.get_api(), NoOpFileDiffResolver())
3096
3140
  state = init_state(integration=cl.QONTRACT_INTEGRATION)
3097
3141
  change_log = ChangeLog(**state.get(BUNDLE_DIFFS_OBJ))
3098
3142
  data: list[dict[str, str]] = []
3099
- for item in change_log.items:
3100
- change_log_item = ChangeLogItem(**item)
3143
+ for change_log_item in change_log.items:
3101
3144
  commit = change_log_item.commit
3102
3145
  covered_change_types_descriptions = [
3103
3146
  ct.description
3104
3147
  for ct in change_types
3105
3148
  if ct.name in change_log_item.change_types
3106
3149
  ]
3107
- item = {
3150
+ data.append({
3108
3151
  "commit": f"[{commit[:7]}]({repo_url}/commit/{commit})",
3109
3152
  "merged_at": change_log_item.merged_at,
3110
3153
  "apps": ", ".join(change_log_item.apps),
3111
3154
  "changes": ", ".join(covered_change_types_descriptions),
3112
- }
3113
- data.append(item)
3155
+ })
3114
3156
 
3115
3157
  # TODO(mafriedm): Fix this
3116
3158
  ctx.obj["options"]["sort"] = False
@@ -3121,7 +3163,7 @@ def change_log_tracking(ctx):
3121
3163
  @root.group(name="set")
3122
3164
  @output
3123
3165
  @click.pass_context
3124
- def set_command(ctx, output):
3166
+ def set_command(ctx: click.Context, output: str) -> None:
3125
3167
  ctx.obj["output"] = output
3126
3168
 
3127
3169
 
@@ -3130,7 +3172,9 @@ def set_command(ctx, output):
3130
3172
  @click.argument("usergroup")
3131
3173
  @click.argument("username")
3132
3174
  @click.pass_context
3133
- def slack_usergroup(ctx, workspace, usergroup, username):
3175
+ def slack_usergroup(
3176
+ ctx: click.Context, workspace: str, usergroup: str, username: str
3177
+ ) -> None:
3134
3178
  """Update users in a slack usergroup.
3135
3179
  Use an org_username as the username.
3136
3180
  To empty a slack usergroup, pass '' (empty string) as the username.
@@ -3138,6 +3182,8 @@ def slack_usergroup(ctx, workspace, usergroup, username):
3138
3182
  settings = queries.get_app_interface_settings()
3139
3183
  slack = slackapi_from_queries("qontract-cli")
3140
3184
  ugid = slack.get_usergroup_id(usergroup)
3185
+ if not ugid:
3186
+ raise click.ClickException(f"Usergroup {usergroup} not found.")
3141
3187
  if username:
3142
3188
  mail_address = settings["smtp"]["mailAddress"]
3143
3189
  users = [slack.get_user_id_by_name(username, mail_address)]
@@ -3146,33 +3192,17 @@ def slack_usergroup(ctx, workspace, usergroup, username):
3146
3192
  slack.update_usergroup_users(ugid, users)
3147
3193
 
3148
3194
 
3149
- @set_command.command()
3150
- @click.argument("org_name")
3151
- @click.argument("cluster_name")
3152
- @click.pass_context
3153
- def cluster_admin(ctx, org_name, cluster_name):
3154
- settings = queries.get_app_interface_settings()
3155
- ocms = [
3156
- o for o in queries.get_openshift_cluster_managers() if o["name"] == org_name
3157
- ]
3158
- ocm_map = OCMMap(ocms=ocms, settings=settings)
3159
- ocm = ocm_map[org_name]
3160
- enabled = ocm.is_cluster_admin_enabled(cluster_name)
3161
- if not enabled:
3162
- ocm.enable_cluster_admin(cluster_name)
3163
-
3164
-
3165
3195
  @root.group()
3166
3196
  @environ(["APP_INTERFACE_STATE_BUCKET"])
3167
3197
  @click.pass_context
3168
- def state(ctx):
3198
+ def state(ctx: click.Context) -> None:
3169
3199
  pass
3170
3200
 
3171
3201
 
3172
3202
  @state.command()
3173
3203
  @click.argument("integration", default="")
3174
3204
  @click.pass_context
3175
- def ls(ctx, integration):
3205
+ def ls(ctx: click.Context, integration: str) -> None:
3176
3206
  state = init_state(integration=integration)
3177
3207
  keys = state.ls()
3178
3208
  # if integration in not defined the 2th token will be the integration name
@@ -3193,7 +3223,7 @@ def ls(ctx, integration):
3193
3223
  @click.argument("integration")
3194
3224
  @click.argument("key")
3195
3225
  @click.pass_context
3196
- def state_get(ctx, integration, key):
3226
+ def state_get(ctx: click.Context, integration: str, key: str) -> None:
3197
3227
  state = init_state(integration=integration)
3198
3228
  value = state.get(key)
3199
3229
  print(value)
@@ -3203,7 +3233,7 @@ def state_get(ctx, integration, key):
3203
3233
  @click.argument("integration")
3204
3234
  @click.argument("key")
3205
3235
  @click.pass_context
3206
- def add(ctx, integration, key):
3236
+ def add(ctx: click.Context, integration: str, key: str) -> None:
3207
3237
  state = init_state(integration=integration)
3208
3238
  state.add(key)
3209
3239
 
@@ -3213,7 +3243,7 @@ def add(ctx, integration, key):
3213
3243
  @click.argument("key")
3214
3244
  @click.argument("value")
3215
3245
  @click.pass_context
3216
- def state_set(ctx, integration, key, value):
3246
+ def state_set(ctx: click.Context, integration: str, key: str, value: str) -> None:
3217
3247
  state = init_state(integration=integration)
3218
3248
  state.add(key, value=value, force=True)
3219
3249
 
@@ -3222,7 +3252,7 @@ def state_set(ctx, integration, key, value):
3222
3252
  @click.argument("integration")
3223
3253
  @click.argument("key")
3224
3254
  @click.pass_context
3225
- def rm(ctx, integration, key):
3255
+ def rm(ctx: click.Context, integration: str, key: str) -> None:
3226
3256
  state = init_state(integration=integration)
3227
3257
  state.rm(key)
3228
3258
 
@@ -3230,7 +3260,7 @@ def rm(ctx, integration, key):
3230
3260
  @root.group()
3231
3261
  @environ(["APP_INTERFACE_STATE_BUCKET"])
3232
3262
  @click.pass_context
3233
- def early_exit_cache(ctx):
3263
+ def early_exit_cache(ctx: click.Context) -> None:
3234
3264
  pass
3235
3265
 
3236
3266
 
@@ -3266,13 +3296,13 @@ def early_exit_cache(ctx):
3266
3296
  )
3267
3297
  @click.pass_context
3268
3298
  def early_exit_cache_head(
3269
- ctx,
3270
- integration,
3271
- integration_version,
3272
- dry_run,
3273
- cache_source,
3274
- shard,
3275
- ):
3299
+ ctx: click.Context,
3300
+ integration: str,
3301
+ integration_version: str,
3302
+ dry_run: bool,
3303
+ cache_source: str,
3304
+ shard: str,
3305
+ ) -> None:
3276
3306
  with EarlyExitCache.build() as cache:
3277
3307
  cache_key = CacheKey(
3278
3308
  integration=integration,
@@ -3318,13 +3348,13 @@ def early_exit_cache_head(
3318
3348
  )
3319
3349
  @click.pass_context
3320
3350
  def early_exit_cache_get(
3321
- ctx,
3322
- integration,
3323
- integration_version,
3324
- dry_run,
3325
- cache_source,
3326
- shard,
3327
- ):
3351
+ ctx: click.Context,
3352
+ integration: str,
3353
+ integration_version: str,
3354
+ dry_run: bool,
3355
+ cache_source: str,
3356
+ shard: str,
3357
+ ) -> None:
3328
3358
  with EarlyExitCache.build() as cache:
3329
3359
  cache_key = CacheKey(
3330
3360
  integration=integration,
@@ -3401,18 +3431,18 @@ def early_exit_cache_get(
3401
3431
  )
3402
3432
  @click.pass_context
3403
3433
  def early_exit_cache_set(
3404
- ctx,
3405
- integration,
3406
- integration_version,
3407
- dry_run,
3408
- cache_source,
3409
- shard,
3410
- payload,
3411
- log_output,
3412
- applied_count,
3413
- ttl,
3414
- latest_cache_source_digest,
3415
- ):
3434
+ ctx: click.Context,
3435
+ integration: str,
3436
+ integration_version: str,
3437
+ dry_run: bool,
3438
+ cache_source: str,
3439
+ shard: str,
3440
+ payload: str,
3441
+ log_output: str,
3442
+ applied_count: int,
3443
+ ttl: int,
3444
+ latest_cache_source_digest: str,
3445
+ ) -> None:
3416
3446
  with EarlyExitCache.build() as cache:
3417
3447
  cache_key = CacheKey(
3418
3448
  integration=integration,
@@ -3461,13 +3491,13 @@ def early_exit_cache_set(
3461
3491
  )
3462
3492
  @click.pass_context
3463
3493
  def early_exit_cache_delete(
3464
- ctx,
3465
- integration,
3466
- integration_version,
3467
- dry_run,
3468
- cache_source_digest,
3469
- shard,
3470
- ):
3494
+ ctx: click.Context,
3495
+ integration: str,
3496
+ integration_version: str,
3497
+ dry_run: bool,
3498
+ cache_source_digest: str,
3499
+ shard: str,
3500
+ ) -> None:
3471
3501
  with EarlyExitCache.build() as cache:
3472
3502
  cache_key_with_digest = CacheKeyWithDigest(
3473
3503
  integration=integration,
@@ -3498,25 +3528,33 @@ def early_exit_cache_delete(
3498
3528
  type=click.Choice(["config", "vault"]),
3499
3529
  )
3500
3530
  @click.pass_context
3501
- def template(ctx, cluster, namespace, kind, name, path, secret_reader):
3531
+ def template(
3532
+ ctx: click.Context,
3533
+ cluster: str,
3534
+ namespace: str,
3535
+ kind: str,
3536
+ name: str,
3537
+ path: str,
3538
+ secret_reader: str,
3539
+ ) -> None:
3502
3540
  gqlapi = gql.get_api()
3503
3541
  namespaces = gqlapi.query(orb.NAMESPACES_QUERY)["namespaces"]
3504
- namespace_info = [
3542
+ namespaces_info = [
3505
3543
  n
3506
3544
  for n in namespaces
3507
3545
  if n["cluster"]["name"] == cluster and n["name"] == namespace
3508
3546
  ]
3509
- if len(namespace_info) != 1:
3547
+ if len(namespaces_info) != 1:
3510
3548
  print(f"{cluster}/{namespace} error")
3511
3549
  sys.exit(1)
3512
3550
 
3551
+ namespace_info = namespaces_info[0]
3513
3552
  settings = queries.get_app_interface_settings()
3514
3553
  settings["vault"] = secret_reader == "vault"
3515
3554
 
3516
3555
  if path and path.startswith("resources"):
3517
3556
  path = path.replace("resources", "", 1)
3518
3557
 
3519
- [namespace_info] = namespace_info
3520
3558
  ob.aggregate_shared_resources(namespace_info, "openshiftResources")
3521
3559
  openshift_resources = namespace_info.get("openshiftResources")
3522
3560
  for r in openshift_resources:
@@ -3557,7 +3595,9 @@ def template(ctx, cluster, namespace, kind, name, path, secret_reader):
3557
3595
  type=click.Choice(["config", "vault"]),
3558
3596
  )
3559
3597
  @click.pass_context
3560
- def run_prometheus_test(ctx, path, cluster, namespace, secret_reader):
3598
+ def run_prometheus_test(
3599
+ ctx: click.Context, path: str, cluster: str, namespace: str, secret_reader: str
3600
+ ) -> None:
3561
3601
  """Run prometheus tests for the rule associated with the test in the PATH from given
3562
3602
  CLUSTER/NAMESPACE"""
3563
3603
 
@@ -3643,17 +3683,17 @@ def run_prometheus_test(ctx, path, cluster, namespace, secret_reader):
3643
3683
  )
3644
3684
  @click.pass_context
3645
3685
  def alert_to_receiver(
3646
- ctx,
3647
- cluster,
3648
- namespace,
3649
- rules_path,
3650
- alert_name,
3651
- alertmanager_secret_path,
3652
- alertmanager_namespace,
3653
- alertmanager_secret_key,
3654
- secret_reader,
3655
- additional_label,
3656
- ):
3686
+ ctx: click.Context,
3687
+ cluster: str,
3688
+ namespace: str,
3689
+ rules_path: str,
3690
+ alert_name: str,
3691
+ alertmanager_secret_path: str,
3692
+ alertmanager_namespace: str,
3693
+ alertmanager_secret_key: str,
3694
+ secret_reader: str,
3695
+ additional_label: list[str],
3696
+ ) -> None:
3657
3697
  additional_labels = {}
3658
3698
  for al in additional_label:
3659
3699
  try:
@@ -3745,12 +3785,12 @@ def alert_to_receiver(
3745
3785
  print(f"Cannot find alert {alert_name} in rules {rules_path}")
3746
3786
  sys.exit(1)
3747
3787
 
3748
- for al in alert_labels:
3749
- result = amtool.config_routes_test(am_config, al)
3788
+ for label in alert_labels:
3789
+ result = amtool.config_routes_test(am_config, label)
3750
3790
  if not result:
3751
3791
  print(f"Error running amtool: {result}")
3752
3792
  sys.exit(1)
3753
- print("|".join([al["alertname"], str(result)]))
3793
+ print("|".join([label["alertname"], str(result)]))
3754
3794
 
3755
3795
 
3756
3796
  @root.command()
@@ -3758,7 +3798,12 @@ def alert_to_receiver(
3758
3798
  @click.option("--saas-file-name", default=None, help="saas-file to act on.")
3759
3799
  @click.option("--env-name", default=None, help="environment to use for parameters.")
3760
3800
  @click.pass_context
3761
- def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
3801
+ def saas_dev(
3802
+ ctx: click.Context,
3803
+ app_name: str | None = None,
3804
+ saas_file_name: str | None = None,
3805
+ env_name: str | None = None,
3806
+ ) -> None:
3762
3807
  if not env_name:
3763
3808
  print("env-name must be defined")
3764
3809
  return
@@ -3806,7 +3851,7 @@ def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
3806
3851
  @click.option("--app-name", default=None, help="app to act on.")
3807
3852
  @click.pass_context
3808
3853
  def saas_targets(
3809
- ctx, saas_file_name: str | None = None, app_name: str | None = None
3854
+ ctx: click.Context, saas_file_name: str | None = None, app_name: str | None = None
3810
3855
  ) -> None:
3811
3856
  """Resolve namespaceSelectors and print all resulting targets of a saas file."""
3812
3857
  console = Console()
@@ -3870,7 +3915,7 @@ def saas_targets(
3870
3915
  default="json",
3871
3916
  type=click.Choice(["json", "yaml"]),
3872
3917
  )
3873
- def query(output, query):
3918
+ def query(output: str, query: str) -> None:
3874
3919
  """Run a raw GraphQL query"""
3875
3920
  gqlapi = gql.get_api()
3876
3921
  result = gqlapi.query(query)
@@ -3884,7 +3929,7 @@ def query(output, query):
3884
3929
  @root.command()
3885
3930
  @click.argument("cluster")
3886
3931
  @click.argument("query")
3887
- def promquery(cluster, query):
3932
+ def promquery(cluster: str, query: str) -> None:
3888
3933
  """Run a PromQL query"""
3889
3934
  config_data = config.get_config()
3890
3935
  auth = {"path": config_data["promql-auth"]["secret_path"], "field": "token"}
@@ -3935,8 +3980,13 @@ def promquery(cluster, query):
3935
3980
  default=False,
3936
3981
  )
3937
3982
  def sre_checkpoint_metadata(
3938
- app_path, parent_ticket, jiraboard, jiradef, create_parent_ticket, dry_run
3939
- ):
3983
+ app_path: str,
3984
+ parent_ticket: str,
3985
+ jiraboard: str,
3986
+ jiradef: str,
3987
+ create_parent_ticket: bool,
3988
+ dry_run: bool,
3989
+ ) -> None:
3940
3990
  """Check an app path for checkpoint-related metadata."""
3941
3991
  data = queries.get_app_metadata(app_path)
3942
3992
  settings = queries.get_app_interface_settings()
@@ -3975,8 +4025,13 @@ def sre_checkpoint_metadata(
3975
4025
  required=True,
3976
4026
  )
3977
4027
  def gpg_encrypt(
3978
- vault_path, vault_secret_version, file_path, openshift_path, output, for_user
3979
- ):
4028
+ vault_path: str,
4029
+ vault_secret_version: str,
4030
+ file_path: str,
4031
+ openshift_path: str,
4032
+ output: str,
4033
+ for_user: str,
4034
+ ) -> None:
3980
4035
  """
3981
4036
  Encrypt the specified secret (local file, vault or openshift) with a
3982
4037
  given users gpg key. This is intended for easily sharing secrets with
@@ -3999,7 +4054,7 @@ def gpg_encrypt(
3999
4054
  @click.option("--channel", help="the channel that state is part of")
4000
4055
  @click.option("--sha", help="the commit sha we want state for")
4001
4056
  @environ(["APP_INTERFACE_STATE_BUCKET"])
4002
- def get_promotion_state(channel: str, sha: str):
4057
+ def get_promotion_state(channel: str, sha: str) -> None:
4003
4058
  from tools.saas_promotion_state.saas_promotion_state import (
4004
4059
  SaasPromotionState,
4005
4060
  )
@@ -4024,7 +4079,7 @@ def get_promotion_state(channel: str, sha: str):
4024
4079
  @click.option("--sha", help="the commit sha we want state for")
4025
4080
  @click.option("--publisher-id", help="the publisher id we want state for")
4026
4081
  @environ(["APP_INTERFACE_STATE_BUCKET"])
4027
- def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str):
4082
+ def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str) -> None:
4028
4083
  from tools.saas_promotion_state.saas_promotion_state import (
4029
4084
  SaasPromotionState,
4030
4085
  )
@@ -4048,7 +4103,9 @@ def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str):
4048
4103
  help="filesystem path to a local app-interface repo",
4049
4104
  default=os.environ.get("APP_INTERFACE_PATH", None),
4050
4105
  )
4051
- def test_change_type(change_type_name: str, role_name: str, app_interface_path: str):
4106
+ def test_change_type(
4107
+ change_type_name: str, role_name: str, app_interface_path: str
4108
+ ) -> None:
4052
4109
  from reconcile.change_owners import tester
4053
4110
 
4054
4111
  # tester.test_change_type(change_type_name, datafile_path)
@@ -4057,7 +4114,7 @@ def test_change_type(change_type_name: str, role_name: str, app_interface_path:
4057
4114
 
4058
4115
  @root.group()
4059
4116
  @click.pass_context
4060
- def sso_client(ctx):
4117
+ def sso_client(ctx: click.Context) -> None:
4061
4118
  """SSO client commands"""
4062
4119
 
4063
4120
 
@@ -4093,7 +4150,7 @@ def sso_client(ctx):
4093
4150
  )
4094
4151
  @click.pass_context
4095
4152
  def create(
4096
- ctx,
4153
+ ctx: click.Context,
4097
4154
  client_name: str,
4098
4155
  contact_email: str,
4099
4156
  keycloak_instance_vault_path: str,
@@ -4127,7 +4184,7 @@ def create(
4127
4184
  @sso_client.command()
4128
4185
  @click.argument("sso-client-vault-secret-path", required=True)
4129
4186
  @click.pass_context
4130
- def remove(ctx, sso_client_vault_secret_path: str):
4187
+ def remove(ctx: click.Context, sso_client_vault_secret_path: str) -> None:
4131
4188
  """Remove an existing SSO client"""
4132
4189
  vault_settings = get_app_interface_vault_settings()
4133
4190
  secret_reader = create_secret_reader(use_vault=vault_settings.vault)
@@ -4174,8 +4231,12 @@ def remove(ctx, sso_client_vault_secret_path: str):
4174
4231
  )
4175
4232
  @click.pass_context
4176
4233
  def external_resources(
4177
- ctx, provision_provider: str, provisioner: str, provider: str, identifier: str
4178
- ):
4234
+ ctx: click.Context,
4235
+ provision_provider: str,
4236
+ provisioner: str,
4237
+ provider: str,
4238
+ identifier: str,
4239
+ ) -> None:
4179
4240
  """External resources commands"""
4180
4241
  ctx.obj["provision_provider"] = provision_provider
4181
4242
  ctx.obj["provisioner"] = provisioner
@@ -4187,7 +4248,7 @@ def external_resources(
4187
4248
 
4188
4249
  @external_resources.command()
4189
4250
  @click.pass_context
4190
- def get_input(ctx):
4251
+ def get_input(ctx: click.Context) -> None:
4191
4252
  """Gets the input data for an external resource asset. Input data is what is used
4192
4253
  in the Reconciliation Job to manage the resource."""
4193
4254
  erv2cli = Erv2Cli(
@@ -4202,7 +4263,7 @@ def get_input(ctx):
4202
4263
 
4203
4264
  @external_resources.command()
4204
4265
  @click.pass_context
4205
- def request_reconciliation(ctx):
4266
+ def request_reconciliation(ctx: click.Context) -> None:
4206
4267
  """Marks a resource as it needs to get reconciled. The itegration will reconcile the resource at
4207
4268
  its next iteration."""
4208
4269
  erv2cli = Erv2Cli(
@@ -4229,7 +4290,7 @@ def request_reconciliation(ctx):
4229
4290
  default=False,
4230
4291
  )
4231
4292
  @click.pass_context
4232
- def migrate(ctx, dry_run: bool, skip_build: bool) -> None:
4293
+ def migrate(ctx: click.Context, dry_run: bool, skip_build: bool) -> None:
4233
4294
  """Migrate an existing external resource managed by terraform-resources to ERv2.
4234
4295
 
4235
4296
 
@@ -4335,7 +4396,7 @@ def migrate(ctx, dry_run: bool, skip_build: bool) -> None:
4335
4396
  @external_resources.command()
4336
4397
  @binary(["docker"])
4337
4398
  @click.pass_context
4338
- def debug_shell(ctx) -> None:
4399
+ def debug_shell(ctx: click.Context) -> None:
4339
4400
  """Enter an ERv2 debug shell to manually migrate resources."""
4340
4401
  # use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
4341
4402
  with tempfile.TemporaryDirectory(dir=Path.home(), prefix="erv2-debug.") as _tempdir:
@@ -4374,7 +4435,7 @@ def debug_shell(ctx) -> None:
4374
4435
  prompt=True,
4375
4436
  )
4376
4437
  @click.pass_context
4377
- def force_unlock(ctx, lock_id: str) -> None:
4438
+ def force_unlock(ctx: click.Context, lock_id: str) -> None:
4378
4439
  """Manually unlock the ERv2 terraform state."""
4379
4440
  # use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
4380
4441
  with tempfile.TemporaryDirectory(
@@ -4415,14 +4476,14 @@ def force_unlock(ctx, lock_id: str) -> None:
4415
4476
  @click.option("--include-pattern", help="Only include images that match this pattern")
4416
4477
  @click.pass_context
4417
4478
  def container_images(
4418
- ctx,
4419
- cluster_name,
4420
- namespace_name,
4421
- thread_pool_size,
4422
- use_jump_host,
4423
- exclude_pattern,
4424
- include_pattern,
4425
- ):
4479
+ ctx: click.Context,
4480
+ cluster_name: str,
4481
+ namespace_name: str,
4482
+ thread_pool_size: int,
4483
+ use_jump_host: bool,
4484
+ exclude_pattern: str,
4485
+ include_pattern: str,
4486
+ ) -> None:
4426
4487
  from tools.cli_commands.container_images_report import get_all_pods_images
4427
4488
 
4428
4489
  results = get_all_pods_images(
@@ -4469,7 +4530,7 @@ You can view the source of this Markdown to extract the JSON data.
4469
4530
  @get.command(help="Get all app tekton pipelines providers roles and users")
4470
4531
  @click.argument("app-name")
4471
4532
  @click.pass_context
4472
- def tekton_roles_and_users(ctx, app_name):
4533
+ def tekton_roles_and_users(ctx: click.Context, app_name: str) -> None:
4473
4534
  pp_namespaces = {
4474
4535
  p.namespace.path
4475
4536
  for p in get_tekton_pipeline_providers()
@@ -4496,6 +4557,7 @@ def tekton_roles_and_users(ctx, app_name):
4496
4557
  if not seen:
4497
4558
  seen = True
4498
4559
 
4560
+ users: str | list[str]
4499
4561
  if ctx.obj["options"]["output"] == "table":
4500
4562
  users = ", ".join([u.org_username for u in r.users])
4501
4563
  else:
@@ -4515,7 +4577,7 @@ def tekton_roles_and_users(ctx, app_name):
4515
4577
  )
4516
4578
  @click.argument("aws-account")
4517
4579
  @click.pass_context
4518
- def log_group_usage(ctx, aws_account):
4580
+ def log_group_usage(ctx: click.Context, aws_account: str) -> None:
4519
4581
  accounts = queries.get_aws_accounts(name=aws_account)
4520
4582
  if not accounts:
4521
4583
  print("no aws account found with that name")
@@ -4525,7 +4587,7 @@ def log_group_usage(ctx, aws_account):
4525
4587
  settings = queries.get_app_interface_settings()
4526
4588
  secret_reader = SecretReader(settings=settings)
4527
4589
  columns = ["log_group", "stored_bytes", "retention_days"]
4528
- results = []
4590
+ results: list[dict[str, str]] = []
4529
4591
 
4530
4592
  with AWSApi(1, [account], settings, secret_reader) as aws:
4531
4593
  session = aws.get_session(account["name"])