qontract-reconcile 0.10.2.dev414__py3-none-any.whl → 0.10.2.dev456__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/METADATA +2 -2
  2. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/RECORD +55 -53
  3. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/WHEEL +1 -1
  4. reconcile/aus/advanced_upgrade_service.py +3 -0
  5. reconcile/aus/aus_sts_gate_handler.py +59 -0
  6. reconcile/aus/base.py +115 -8
  7. reconcile/aus/models.py +2 -0
  8. reconcile/aus/ocm_addons_upgrade_scheduler_org.py +1 -0
  9. reconcile/aus/ocm_upgrade_scheduler.py +8 -1
  10. reconcile/aus/ocm_upgrade_scheduler_org.py +20 -5
  11. reconcile/aus/version_gate_approver.py +1 -16
  12. reconcile/aus/version_gates/sts_version_gate_handler.py +5 -72
  13. reconcile/automated_actions/config/integration.py +1 -1
  14. reconcile/aws_ecr_image_pull_secrets.py +1 -1
  15. reconcile/change_owners/change_owners.py +100 -34
  16. reconcile/cli.py +63 -5
  17. reconcile/external_resources/manager.py +7 -18
  18. reconcile/external_resources/model.py +8 -8
  19. reconcile/external_resources/secrets_sync.py +2 -3
  20. reconcile/external_resources/state.py +1 -34
  21. reconcile/gql_definitions/common/aws_vpc_requests.py +3 -0
  22. reconcile/gql_definitions/common/clusters.py +2 -0
  23. reconcile/gql_definitions/external_resources/external_resources_namespaces.py +3 -1
  24. reconcile/gql_definitions/fragments/aws_vpc_request.py +5 -0
  25. reconcile/gql_definitions/introspection.json +48 -0
  26. reconcile/gql_definitions/rhcs/certs.py +20 -74
  27. reconcile/gql_definitions/rhcs/openshift_resource_rhcs_cert.py +43 -0
  28. reconcile/gql_definitions/terraform_resources/terraform_resources_namespaces.py +5 -1
  29. reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator.py +3 -0
  30. reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator_peered_cluster_fragment.py +1 -0
  31. reconcile/ocm_machine_pools.py +12 -6
  32. reconcile/openshift_base.py +60 -2
  33. reconcile/openshift_namespaces.py +3 -4
  34. reconcile/openshift_rhcs_certs.py +71 -34
  35. reconcile/rhidp/sso_client/base.py +15 -4
  36. reconcile/templates/rosa-classic-cluster-creation.sh.j2 +1 -1
  37. reconcile/templates/rosa-hcp-cluster-creation.sh.j2 +1 -1
  38. reconcile/terraform_vpc_resources/integration.py +10 -7
  39. reconcile/typed_queries/saas_files.py +9 -4
  40. reconcile/utils/binary.py +7 -12
  41. reconcile/utils/environ.py +5 -0
  42. reconcile/utils/gitlab_api.py +12 -0
  43. reconcile/utils/glitchtip/client.py +2 -2
  44. reconcile/utils/jjb_client.py +19 -3
  45. reconcile/utils/jobcontroller/controller.py +1 -1
  46. reconcile/utils/json.py +5 -1
  47. reconcile/utils/oc.py +144 -113
  48. reconcile/utils/rhcsv2_certs.py +87 -21
  49. reconcile/utils/rosa/session.py +16 -0
  50. reconcile/utils/saasherder/saasherder.py +20 -7
  51. reconcile/utils/terrascript_aws_client.py +140 -50
  52. reconcile/utils/vault.py +1 -1
  53. reconcile/vpc_peerings_validator.py +13 -0
  54. tools/cli_commands/erv2.py +1 -3
  55. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/entry_points.txt +0 -0
reconcile/aus/base.py CHANGED
@@ -20,6 +20,11 @@ from pydantic import BaseModel
20
20
  from requests.exceptions import HTTPError
21
21
  from semver import VersionInfo
22
22
 
23
+ from reconcile.aus.aus_sts_gate_handler import (
24
+ AUS_VERSION_GATE_APPROVALS_LABEL,
25
+ STS_GATE_LABEL,
26
+ AUSSTSGateHandler,
27
+ )
23
28
  from reconcile.aus.cluster_version_data import (
24
29
  VersionData,
25
30
  VersionDataMap,
@@ -79,7 +84,9 @@ from reconcile.utils.datetime_util import (
79
84
  from reconcile.utils.defer import defer
80
85
  from reconcile.utils.disabled_integrations import integration_is_enabled
81
86
  from reconcile.utils.filtering import remove_none_values_from_dict
87
+ from reconcile.utils.jobcontroller.controller import build_job_controller
82
88
  from reconcile.utils.ocm.addons import AddonService, AddonServiceV1, AddonServiceV2
89
+ from reconcile.utils.ocm.base import LabelContainer
83
90
  from reconcile.utils.ocm.clusters import (
84
91
  OCMCluster,
85
92
  )
@@ -102,6 +109,7 @@ from reconcile.utils.runtime.integration import (
102
109
  PydanticRunParams,
103
110
  QontractReconcileIntegration,
104
111
  )
112
+ from reconcile.utils.secret_reader import SecretReaderBase
105
113
  from reconcile.utils.semver_helper import (
106
114
  get_version_prefix,
107
115
  parse_semver,
@@ -112,11 +120,22 @@ from reconcile.utils.state import init_state
112
120
  MIN_DELTA_MINUTES = 6
113
121
 
114
122
 
123
+ class RosaRoleUpgradeHandlerParams(PydanticRunParams):
124
+ job_controller_cluster: str
125
+ job_controller_namespace: str
126
+ rosa_job_service_account: str
127
+ rosa_role: str
128
+ rosa_job_image: str | None = None
129
+ integration_name: str
130
+ integration_version: str
131
+
132
+
115
133
  class AdvancedUpgradeSchedulerBaseIntegrationParams(PydanticRunParams):
116
134
  ocm_environment: str | None = None
117
135
  ocm_organization_ids: set[str] | None = None
118
136
  excluded_ocm_organization_ids: set[str] | None = None
119
137
  ignore_sts_clusters: bool = False
138
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None
120
139
 
121
140
 
122
141
  class ReconcileError(Exception):
@@ -412,7 +431,12 @@ class AbstractUpgradePolicy(ABC, BaseModel):
412
431
  state: str | None = None
413
432
 
414
433
  @abstractmethod
415
- def create(self, ocm_api: OCMBaseClient) -> None:
434
+ def create(
435
+ self,
436
+ ocm_api: OCMBaseClient,
437
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None,
438
+ secret_reader: SecretReaderBase | None = None,
439
+ ) -> None:
416
440
  pass
417
441
 
418
442
  @abstractmethod
@@ -436,7 +460,15 @@ class AddonUpgradePolicy(AbstractUpgradePolicy, arbitrary_types_allowed=True):
436
460
  addon_id: str
437
461
  addon_service: AddonService
438
462
 
439
- def create(self, ocm_api: OCMBaseClient) -> None:
463
+ class Config:
464
+ arbitrary_types_allowed = True
465
+
466
+ def create(
467
+ self,
468
+ ocm_api: OCMBaseClient,
469
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None,
470
+ secret_reader: SecretReaderBase | None = None,
471
+ ) -> None:
440
472
  self.addon_service.create_addon_upgrade_policy(
441
473
  ocm_api=ocm_api,
442
474
  cluster_id=self.cluster.id,
@@ -469,14 +501,64 @@ class AddonUpgradePolicy(AbstractUpgradePolicy, arbitrary_types_allowed=True):
469
501
  class ClusterUpgradePolicy(AbstractUpgradePolicy):
470
502
  """Class to create ClusterUpgradePolicies in OCM"""
471
503
 
472
- def create(self, ocm_api: OCMBaseClient) -> None:
504
+ organization_id: str
505
+ cluster_labels: LabelContainer
506
+
507
+ def create(
508
+ self,
509
+ ocm_api: OCMBaseClient,
510
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None,
511
+ secret_reader: SecretReaderBase | None = None,
512
+ ) -> None:
473
513
  policy = {
474
514
  "version": self.version,
475
515
  "schedule_type": "manual",
476
516
  "next_run": self.next_run,
477
517
  }
518
+ if (
519
+ rosa_role_upgrade_handler_params
520
+ and secret_reader
521
+ and self.should_upgrade_roles()
522
+ ):
523
+ logging.info(f"Updating account and operator roles for {self.cluster.name}")
524
+ aus_sts_gate_handler = AUSSTSGateHandler(
525
+ job_controller=build_job_controller(
526
+ integration=rosa_role_upgrade_handler_params.integration_name,
527
+ integration_version=rosa_role_upgrade_handler_params.integration_version,
528
+ cluster=rosa_role_upgrade_handler_params.job_controller_cluster,
529
+ namespace=rosa_role_upgrade_handler_params.job_controller_namespace,
530
+ secret_reader=secret_reader,
531
+ dry_run=False,
532
+ ),
533
+ aws_iam_role=rosa_role_upgrade_handler_params.rosa_role,
534
+ rosa_job_service_account=rosa_role_upgrade_handler_params.rosa_job_service_account,
535
+ rosa_job_image=rosa_role_upgrade_handler_params.rosa_job_image,
536
+ )
537
+ if not aus_sts_gate_handler.upgrade_rosa_roles(
538
+ ocm_api=ocm_api,
539
+ cluster=self.cluster,
540
+ dry_run=False,
541
+ upgrade_version=self.version,
542
+ ocm_org_id=self.organization_id,
543
+ ):
544
+ logging.error(
545
+ f"Failed to update account and operator roles for {self.cluster.name}"
546
+ )
478
547
  create_upgrade_policy(ocm_api, self.cluster.id, policy)
479
548
 
549
+ def should_upgrade_roles(self) -> bool:
550
+ handler_csv = self.cluster_labels.get_label_value(
551
+ AUS_VERSION_GATE_APPROVALS_LABEL
552
+ )
553
+ if not handler_csv:
554
+ return False
555
+
556
+ return (
557
+ self.cluster.is_sts()
558
+ and self.cluster.is_rosa_classic()
559
+ and STS_GATE_LABEL in set(handler_csv.split(","))
560
+ )
561
+
480
562
  def delete(self, ocm_api: OCMBaseClient) -> None:
481
563
  raise NotImplementedError("ClusterUpgradePolicy.delete() not implemented")
482
564
 
@@ -494,7 +576,12 @@ class ClusterUpgradePolicy(AbstractUpgradePolicy):
494
576
  class ControlPlaneUpgradePolicy(AbstractUpgradePolicy):
495
577
  """Class to create and delete ControlPlanUpgradePolicies in OCM"""
496
578
 
497
- def create(self, ocm_api: OCMBaseClient) -> None:
579
+ def create(
580
+ self,
581
+ ocm_api: OCMBaseClient,
582
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None,
583
+ secret_reader: SecretReaderBase | None = None,
584
+ ) -> None:
498
585
  policy = {
499
586
  "version": self.version,
500
587
  "schedule_type": "manual",
@@ -522,7 +609,12 @@ class NodePoolUpgradePolicy(AbstractUpgradePolicy):
522
609
 
523
610
  node_pool: str
524
611
 
525
- def create(self, ocm_api: OCMBaseClient) -> None:
612
+ def create(
613
+ self,
614
+ ocm_api: OCMBaseClient,
615
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None,
616
+ secret_reader: SecretReaderBase | None = None,
617
+ ) -> None:
526
618
  policy = {
527
619
  "version": self.version,
528
620
  "schedule_type": "manual",
@@ -554,7 +646,13 @@ class UpgradePolicyHandler(BaseModel, extra="forbid"):
554
646
  action: str
555
647
  policy: AbstractUpgradePolicy
556
648
 
557
- def act(self, dry_run: bool, ocm_api: OCMBaseClient) -> None:
649
+ def act(
650
+ self,
651
+ dry_run: bool,
652
+ ocm_api: OCMBaseClient,
653
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None,
654
+ secret_reader: SecretReaderBase | None = None,
655
+ ) -> None:
558
656
  logging.info(f"{self.action} {self.policy.summarize()}")
559
657
  if dry_run:
560
658
  return
@@ -564,7 +662,7 @@ class UpgradePolicyHandler(BaseModel, extra="forbid"):
564
662
  elif self.action == "delete":
565
663
  self.policy.delete(ocm_api)
566
664
  elif self.action == "create":
567
- self.policy.create(ocm_api)
665
+ self.policy.create(ocm_api, rosa_role_upgrade_handler_params, secret_reader)
568
666
 
569
667
 
570
668
  def fetch_current_state(
@@ -582,6 +680,7 @@ def fetch_current_state(
582
680
  )
583
681
  current_state.extend(
584
682
  AddonUpgradePolicy(
683
+ organization_id=spec.org.org_id,
585
684
  id=addon_upgrade_policy.id,
586
685
  addon_id=addon_spec.addon.addon.id,
587
686
  cluster=spec.cluster,
@@ -618,6 +717,8 @@ def fetch_current_state(
618
717
  for upgrade_policy in upgrade_policies:
619
718
  policy = upgrade_policy | {
620
719
  "cluster": spec.cluster,
720
+ "organization_id": spec.org.org_id,
721
+ "cluster_labels": spec.cluster_labels,
621
722
  }
622
723
  current_state.append(ClusterUpgradePolicy(**policy))
623
724
 
@@ -1016,6 +1117,8 @@ def _create_upgrade_policy(
1016
1117
  )
1017
1118
  return ClusterUpgradePolicy(
1018
1119
  cluster=spec.cluster,
1120
+ organization_id=spec.org.org_id,
1121
+ cluster_labels=spec.cluster_labels,
1019
1122
  version=version,
1020
1123
  schedule_type="manual",
1021
1124
  next_run=next_schedule,
@@ -1122,6 +1225,8 @@ def calculate_diff(
1122
1225
  UpgradePolicyHandler(
1123
1226
  action="create",
1124
1227
  policy=AddonUpgradePolicy(
1228
+ action="create",
1229
+ organization_id=spec.org.org_id,
1125
1230
  cluster=spec.cluster,
1126
1231
  version=version,
1127
1232
  schedule_type="manual",
@@ -1186,6 +1291,8 @@ def act(
1186
1291
  dry_run: bool,
1187
1292
  diffs: list[UpgradePolicyHandler],
1188
1293
  ocm_api: OCMBaseClient,
1294
+ rosa_role_upgrade_handler_params: RosaRoleUpgradeHandlerParams | None = None,
1295
+ secret_reader: SecretReaderBase | None = None,
1189
1296
  addon_id: str | None = None,
1190
1297
  ) -> None:
1191
1298
  diffs.sort(key=sort_diffs)
@@ -1198,7 +1305,7 @@ def act(
1198
1305
  ):
1199
1306
  continue
1200
1307
  try:
1201
- diff.act(dry_run, ocm_api)
1308
+ diff.act(dry_run, ocm_api, rosa_role_upgrade_handler_params, secret_reader)
1202
1309
  except HTTPError as e:
1203
1310
  logging.error(f"{policy.cluster.name}: {e}: {e.response.text}")
1204
1311
 
reconcile/aus/models.py CHANGED
@@ -14,6 +14,7 @@ from reconcile.aus.healthchecks import AUSClusterHealth
14
14
  from reconcile.gql_definitions.fragments.aus_organization import AUSOCMOrganization
15
15
  from reconcile.gql_definitions.fragments.upgrade_policy import ClusterUpgradePolicyV1
16
16
  from reconcile.utils.ocm.addons import OCMAddonInstallation
17
+ from reconcile.utils.ocm.base import LabelContainer
17
18
  from reconcile.utils.ocm.clusters import OCMCluster
18
19
  from reconcile.utils.semver_helper import parse_semver
19
20
 
@@ -33,6 +34,7 @@ class ClusterUpgradeSpec(BaseModel):
33
34
 
34
35
  org: AUSOCMOrganization
35
36
  cluster: OCMCluster
37
+ cluster_labels: LabelContainer | None = None
36
38
  upgrade_policy: ClusterUpgradePolicyV1 = Field(..., alias="upgradePolicy")
37
39
  health: AUSClusterHealth
38
40
  node_pools: list[NodePoolSpec] = Field(default_factory=list, alias="nodePools")
@@ -260,6 +260,7 @@ def calculate_diff(
260
260
  aus.UpgradePolicyHandler(
261
261
  action="delete",
262
262
  policy=aus.AddonUpgradePolicy(
263
+ organization_id=org_upgrade_spec.org.org_id,
263
264
  cluster=current.cluster,
264
265
  version=current.schedule_type,
265
266
  id=current.id,
@@ -82,7 +82,14 @@ class OCMClusterUpgradeSchedulerIntegration(
82
82
  version_data,
83
83
  integration=self.name,
84
84
  )
85
- aus.act(dry_run, diffs, ocm_api)
85
+
86
+ aus.act(
87
+ dry_run,
88
+ diffs,
89
+ ocm_api,
90
+ self.params.rosa_role_upgrade_handler_params,
91
+ self.secret_reader,
92
+ )
86
93
 
87
94
  def expose_version_data_metrics(
88
95
  self,
@@ -1,4 +1,5 @@
1
1
  from collections import defaultdict
2
+ from dataclasses import dataclass
2
3
 
3
4
  from reconcile.aus.healthchecks import (
4
5
  AUSClusterHealthCheckProvider,
@@ -13,6 +14,7 @@ from reconcile.aus.node_pool_spec import get_node_pool_specs_by_org_cluster
13
14
  from reconcile.aus.ocm_upgrade_scheduler import OCMClusterUpgradeSchedulerIntegration
14
15
  from reconcile.gql_definitions.fragments.aus_organization import AUSOCMOrganization
15
16
  from reconcile.gql_definitions.fragments.ocm_environment import OCMEnvironment
17
+ from reconcile.utils.ocm.base import LabelContainer
16
18
  from reconcile.utils.ocm.clusters import (
17
19
  OCMCluster,
18
20
  discover_clusters_for_organizations,
@@ -22,6 +24,12 @@ from reconcile.utils.ocm_base_client import init_ocm_base_client
22
24
  QONTRACT_INTEGRATION = "ocm-upgrade-scheduler-org"
23
25
 
24
26
 
27
+ @dataclass
28
+ class ClusterUpgradeSpecWithLabels:
29
+ cluster: OCMCluster
30
+ cluster_labels: LabelContainer
31
+
32
+
25
33
  class OCMClusterUpgradeSchedulerOrgIntegration(OCMClusterUpgradeSchedulerIntegration):
26
34
  @property
27
35
  def name(self) -> str:
@@ -60,7 +68,10 @@ class OCMClusterUpgradeSchedulerOrgIntegration(OCMClusterUpgradeSchedulerIntegra
60
68
  specs=self._build_cluster_upgrade_specs(
61
69
  org=org,
62
70
  clusters_by_name={
63
- c.ocm_cluster.name: c.ocm_cluster
71
+ c.ocm_cluster.name: ClusterUpgradeSpecWithLabels(
72
+ cluster=c.ocm_cluster,
73
+ cluster_labels=c.labels,
74
+ )
64
75
  for c in clusters_by_org[org.org_id]
65
76
  },
66
77
  cluster_health_provider=build_cluster_health_providers_for_organization(
@@ -79,7 +90,7 @@ class OCMClusterUpgradeSchedulerOrgIntegration(OCMClusterUpgradeSchedulerIntegra
79
90
  def _build_cluster_upgrade_specs(
80
91
  self,
81
92
  org: AUSOCMOrganization,
82
- clusters_by_name: dict[str, OCMCluster],
93
+ clusters_by_name: dict[str, ClusterUpgradeSpecWithLabels],
83
94
  cluster_health_provider: AUSClusterHealthCheckProvider,
84
95
  node_pool_specs_by_cluster_id: dict[str, list[NodePoolSpec]],
85
96
  ) -> list[ClusterUpgradeSpec]:
@@ -87,12 +98,16 @@ class OCMClusterUpgradeSchedulerOrgIntegration(OCMClusterUpgradeSchedulerIntegra
87
98
  ClusterUpgradeSpec(
88
99
  org=org,
89
100
  upgradePolicy=cluster.upgrade_policy,
90
- cluster=clusters_by_name[cluster.name],
101
+ cluster=clusters_by_name[cluster.name].cluster,
102
+ cluster_labels=clusters_by_name[cluster.name].cluster_labels,
91
103
  health=cluster_health_provider.cluster_health(
92
- cluster_external_id=clusters_by_name[cluster.name].external_id,
104
+ cluster_external_id=clusters_by_name[
105
+ cluster.name
106
+ ].cluster.external_id,
93
107
  org_id=org.org_id,
94
108
  ),
95
- nodePools=node_pool_specs_by_cluster_id.get(ocm_cluster.id) or [],
109
+ nodePools=node_pool_specs_by_cluster_id.get(ocm_cluster.cluster.id)
110
+ or [],
96
111
  )
97
112
  for cluster in org.upgrade_policy_clusters or []
98
113
  # clusters that are not in the UUID dict will be ignored because
@@ -14,9 +14,6 @@ from reconcile.gql_definitions.common.ocm_environments import (
14
14
  )
15
15
  from reconcile.utils import gql
16
16
  from reconcile.utils.grouping import group_by
17
- from reconcile.utils.jobcontroller.controller import (
18
- build_job_controller,
19
- )
20
17
  from reconcile.utils.ocm.base import (
21
18
  ClusterDetails,
22
19
  LabelContainer,
@@ -63,19 +60,7 @@ class VersionGateApprover(QontractReconcileIntegration[VersionGateApproverParams
63
60
 
64
61
  def initialize_handlers(self, query_func: Callable) -> None:
65
62
  self.handlers: dict[str, GateHandler] = {
66
- sts_version_gate_handler.GATE_LABEL: sts_version_gate_handler.STSGateHandler(
67
- job_controller=build_job_controller(
68
- integration=QONTRACT_INTEGRATION,
69
- integration_version=QONTRACT_INTEGRATION_VERSION,
70
- cluster=self.params.job_controller_cluster,
71
- namespace=self.params.job_controller_namespace,
72
- secret_reader=self.secret_reader,
73
- dry_run=False,
74
- ),
75
- aws_iam_role=self.params.rosa_role,
76
- rosa_job_service_account=self.params.rosa_job_service_account,
77
- rosa_job_image=self.params.rosa_job_image,
78
- ),
63
+ sts_version_gate_handler.GATE_LABEL: sts_version_gate_handler.STSGateHandler(),
79
64
  ocp_gate_handler.GATE_LABEL: ocp_gate_handler.OCPGateHandler(),
80
65
  ingress_gate_handler.GATE_LABEL: ingress_gate_handler.IngressGateHandler(),
81
66
  }
@@ -1,27 +1,16 @@
1
- import logging
2
-
3
1
  from reconcile.aus.version_gates.handler import GateHandler
4
- from reconcile.utils.jobcontroller.controller import K8sJobController
5
2
  from reconcile.utils.ocm.base import OCMCluster, OCMVersionGate
6
3
  from reconcile.utils.ocm_base_client import OCMBaseClient
7
- from reconcile.utils.rosa.rosa_cli import RosaCliError
8
- from reconcile.utils.rosa.session import RosaSession
9
4
 
10
5
  GATE_LABEL = "api.openshift.com/gate-sts"
11
6
 
12
7
 
13
8
  class STSGateHandler(GateHandler):
14
- def __init__(
15
- self,
16
- job_controller: K8sJobController,
17
- aws_iam_role: str,
18
- rosa_job_service_account: str | None = None,
19
- rosa_job_image: str | None = None,
20
- ) -> None:
21
- self.job_controller = job_controller
22
- self.aws_iam_role = aws_iam_role
23
- self.rosa_job_image = rosa_job_image
24
- self.rosa_job_service_account = rosa_job_service_account
9
+ """
10
+ This handler is used to handle the STS version gate.
11
+ Right now we just ack all gate-sts gates
12
+ The actual job of role upgrade is now a part of AUS and is handled by the AUSSTSGateHandler.
13
+ """
25
14
 
26
15
  @staticmethod
27
16
  def gate_applicable_to_cluster(cluster: OCMCluster) -> bool:
@@ -41,60 +30,4 @@ class STSGateHandler(GateHandler):
41
30
  gate: OCMVersionGate,
42
31
  dry_run: bool,
43
32
  ) -> bool:
44
- if (
45
- not cluster.id
46
- or not cluster.aws
47
- or not cluster.aws.sts
48
- or not cluster.is_sts()
49
- ):
50
- # checked already but mypy :/
51
- return False
52
-
53
- if cluster.is_rosa_hypershift():
54
- # thanks to hypershift managed policies, there is nothing to do for us here
55
- # returning True will ack the version gate
56
- return True
57
- if not cluster.is_rosa_classic():
58
- # we manage roels only for rosa classic clusters
59
- # returning here will prevent OSD STS clusters to be handled right now
60
- logging.error(
61
- f"Cluster {cluster.id} is not a ROSA cluster. "
62
- "STS version gates are only handled for ROSA classic clusters."
63
- )
64
- return False
65
-
66
- rosa = RosaSession(
67
- aws_account_id=cluster.aws.aws_account_id,
68
- aws_region=cluster.region.id,
69
- aws_iam_role=self.aws_iam_role,
70
- ocm_org_id=ocm_org_id,
71
- ocm_api=ocm_api,
72
- job_controller=self.job_controller,
73
- image=self.rosa_job_image,
74
- service_account=self.rosa_job_service_account,
75
- )
76
-
77
- try:
78
- # account role handling
79
- account_role_prefix = cluster.aws.account_role_prefix
80
- if not account_role_prefix:
81
- raise Exception(
82
- f"Can't upgrade account roles. Cluster {cluster.name} does not define spec.aws.account_role_prefix"
83
- )
84
- rosa.upgrade_account_roles(
85
- role_prefix=account_role_prefix,
86
- minor_version=gate.version_raw_id_prefix,
87
- channel_group=cluster.version.channel_group,
88
- dry_run=dry_run,
89
- )
90
-
91
- # operator role handling
92
- rosa.upgrade_operator_roles(
93
- cluster_id=cluster.id,
94
- dry_run=dry_run,
95
- )
96
- except RosaCliError as e:
97
- logging.error(f"Failed to upgrade roles for cluster {cluster.name}: {e}")
98
- e.write_logs_to_logger(logging.error)
99
- return False
100
33
  return True
@@ -7,7 +7,7 @@ from collections.abc import (
7
7
  from typing import Any
8
8
 
9
9
  import yaml
10
- from kubernetes.client import ( # type: ignore[attr-defined]
10
+ from kubernetes.client import (
11
11
  ApiClient,
12
12
  V1ConfigMap,
13
13
  V1ObjectMeta,
@@ -21,7 +21,7 @@ def get_password(token: str) -> str:
21
21
 
22
22
  def construct_dockercfg_secret_data(data: Mapping[str, Any]) -> dict[str, str]:
23
23
  auth_data = data["authorizationData"][0]
24
- server = auth_data["proxyEndpoint"]
24
+ server = auth_data["proxyEndpoint"].replace("https://", "")
25
25
  token = auth_data["authorizationToken"]
26
26
  password = get_password(token)
27
27
  data = {
@@ -23,6 +23,7 @@ from reconcile.change_owners.changes import (
23
23
  )
24
24
  from reconcile.change_owners.decision import (
25
25
  ChangeDecision,
26
+ ChangeResponsibles,
26
27
  DecisionCommand,
27
28
  apply_decisions_to_changes,
28
29
  get_approver_decisions_from_mr_comments,
@@ -115,6 +116,77 @@ def manage_conditional_label(
115
116
  return set(new_labels)
116
117
 
117
118
 
119
+ def build_status_message(
120
+ self_serviceable: bool,
121
+ authoritative: bool,
122
+ change_admitted: bool,
123
+ approver_reachability: set[str],
124
+ supported_commands: list[str],
125
+ ) -> str:
126
+ """
127
+ Build a user-friendly status message based on the MR state.
128
+ """
129
+ approver_section = _build_approver_contact_section(approver_reachability)
130
+
131
+ # Check if changes are not admitted (security gate - takes priority)
132
+ if not change_admitted:
133
+ return f"""## ⏸️ Approval Required
134
+ Your changes need `/good-to-test` approval from a listed approver before review can begin.
135
+
136
+ {approver_section}"""
137
+
138
+ commands_text = (
139
+ f"**Available commands:** {' '.join(f'`{cmd}`' for cmd in supported_commands)}"
140
+ )
141
+
142
+ code_warning = ""
143
+ if not authoritative:
144
+ code_warning = "⚠️ **Code changes outside of data and resources detected** - please review carefully\n\n"
145
+
146
+ if self_serviceable:
147
+ return f"""## ✅ Ready for Review
148
+ Get `/lgtm` approval from the listed approvers below.
149
+
150
+ {code_warning}{approver_section}
151
+
152
+ {commands_text}"""
153
+
154
+ return f"""## 🔍 AppSRE Review Required
155
+ **What happens next:**
156
+ * AppSRE will review via their [review queue](https://gitlab.cee.redhat.com/service/app-interface-output/-/blob/master/app-interface-review-queue.md)
157
+ * Please don't ping directly unless this is **urgent**
158
+ * See [etiquette guide](https://gitlab.cee.redhat.com/service/app-interface#app-interface-etiquette) for more info
159
+
160
+ {code_warning}{approver_section}
161
+
162
+ {commands_text}"""
163
+
164
+
165
+ def _build_approver_contact_section(approver_reachability: set[str]) -> str:
166
+ """Build the approver contact information section."""
167
+ if not approver_reachability:
168
+ return ""
169
+
170
+ return "**Reach out to approvers:**\n" + "\n".join([
171
+ f"* {ar}" for ar in approver_reachability
172
+ ])
173
+
174
+
175
+ def _format_change_responsible(cr: ChangeResponsibles) -> str:
176
+ """
177
+ Format a ChangeResponsibles object.
178
+ """
179
+ usernames = [
180
+ f"@{a.org_username}"
181
+ if (a.tag_on_merge_requests or len(cr.approvers) == 1)
182
+ else a.org_username
183
+ for a in cr.approvers
184
+ ]
185
+
186
+ usernames_text = " ".join(usernames)
187
+ return f"<details><summary>{cr.context}</summary>{usernames_text}</details>"
188
+
189
+
118
190
  def write_coverage_report_to_mr(
119
191
  self_serviceable: bool,
120
192
  change_decisions: list[ChangeDecision],
@@ -135,14 +207,11 @@ def write_coverage_report_to_mr(
135
207
  startswith=change_coverage_report_header,
136
208
  )
137
209
 
138
- # add new report comment
210
+ # Build change coverage table
139
211
  results = []
140
212
  approver_reachability = set()
141
213
  for d in change_decisions:
142
- approvers = [
143
- f"{cr.context} - {' '.join([f'@{a.org_username}' if (a.tag_on_merge_requests or len(cr.approvers) == 1) else a.org_username for a in cr.approvers])}"
144
- for cr in d.change_responsibles
145
- ]
214
+ approvers = [_format_change_responsible(cr) for cr in d.change_responsibles]
146
215
  if d.coverable_by_fragment_decisions:
147
216
  approvers.append(
148
217
  "automatically approved if all sub-properties are approved"
@@ -164,41 +233,33 @@ def write_coverage_report_to_mr(
164
233
  item["status"] = "hold"
165
234
  elif d.is_approved():
166
235
  item["status"] = "approved"
167
- item["approvers"] = approvers
236
+ item["approvers"] = "".join(approvers)
168
237
  results.append(item)
238
+
169
239
  coverage_report = format_table(
170
240
  results, ["file", "change", "status", "approvers"], table_format="github"
171
241
  )
172
242
 
173
- self_serviceability_hint = "All changes require an `/lgtm` from a listed approver "
174
- if not self_serviceable:
175
- self_serviceability_hint += (
176
- "but <b>not all changes are self-serviceable and require AppSRE approval</b>."
177
- "The AppSRE Interrupt Catcher (IC) will review your Merge Request (MR) as it comes up in their "
178
- "<a href='https://gitlab.cee.redhat.com/service/app-interface-output/-/blob/master/app-interface-review-queue.md'>queue</a>, "
179
- "please do not ping them directly unless this is <b>urgent</b>."
180
- "\nPlease see https://gitlab.cee.redhat.com/service/app-interface#app-interface-etiquette for more information. Thank you :)"
181
- )
182
- if not authoritative:
183
- self_serviceability_hint += "\n\nchanges outside of data and resources detected - <b>PAY EXTRA ATTENTION WHILE REVIEWING</b>\n\n"
184
-
185
- if not change_admitted:
186
- self_serviceability_hint += "\n\nchanges are not admitted. Please request `/good-to-test` from one of the approvers.\n\n"
187
-
188
- approver_reachability_hint = "Reach out to approvers for reviews"
189
- if approver_reachability:
190
- approver_reachability_hint += " on\n" + "\n".join([
191
- f"* {ar}" for ar in approver_reachability or []
192
- ])
193
- gl.add_comment_to_merge_request(
194
- merge_request,
195
- f"{change_coverage_report_header}<br/>"
196
- f"{self_serviceability_hint}\n"
197
- f"{coverage_report}\n\n"
198
- f"{approver_reachability_hint}\n\n"
199
- + f"Supported commands: {' '.join([f'`{d.value}`' for d in DecisionCommand])} ",
243
+ # Build user-friendly status message
244
+ supported_commands = [d.value for d in DecisionCommand]
245
+ status_message = build_status_message(
246
+ self_serviceable=self_serviceable,
247
+ authoritative=authoritative,
248
+ change_admitted=change_admitted,
249
+ approver_reachability=approver_reachability,
250
+ supported_commands=supported_commands,
200
251
  )
201
252
 
253
+ # Create the full comment
254
+ full_comment = f"""{change_coverage_report_header}
255
+
256
+ {status_message}
257
+
258
+ ## 📋 Change Summary
259
+ {coverage_report}"""
260
+
261
+ gl.add_comment_to_merge_request(merge_request, full_comment)
262
+
202
263
 
203
264
  def write_coverage_report_to_stdout(change_decisions: list[ChangeDecision]) -> None:
204
265
  results = []
@@ -391,7 +452,12 @@ def run(
391
452
  good_to_test_approvers,
392
453
  )
393
454
  approver_decisions = get_approver_decisions_from_mr_comments(
394
- gl.get_merge_request_comments(merge_request, include_description=True)
455
+ gl.get_merge_request_comments(
456
+ merge_request,
457
+ include_description=True,
458
+ include_approvals=True,
459
+ approval_body=DecisionCommand.APPROVED.value,
460
+ )
395
461
  )
396
462
  change_decisions = apply_decisions_to_changes(
397
463
  changes,