qontract-reconcile 0.10.2.dev414__py3-none-any.whl → 0.10.2.dev427__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qontract-reconcile might be problematic. Click here for more details.

Files changed (31) hide show
  1. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/METADATA +2 -2
  2. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/RECORD +31 -30
  3. reconcile/aus/advanced_upgrade_service.py +3 -0
  4. reconcile/aus/base.py +112 -9
  5. reconcile/aus/models.py +2 -0
  6. reconcile/aus/ocm_addons_upgrade_scheduler_org.py +1 -0
  7. reconcile/aus/ocm_upgrade_scheduler.py +8 -1
  8. reconcile/aus/ocm_upgrade_scheduler_org.py +20 -5
  9. reconcile/aus/version_gates/sts_version_gate_handler.py +54 -1
  10. reconcile/automated_actions/config/integration.py +1 -1
  11. reconcile/cli.py +62 -4
  12. reconcile/external_resources/manager.py +7 -18
  13. reconcile/external_resources/model.py +8 -8
  14. reconcile/external_resources/state.py +1 -34
  15. reconcile/gql_definitions/rhcs/certs.py +19 -74
  16. reconcile/gql_definitions/rhcs/openshift_resource_rhcs_cert.py +42 -0
  17. reconcile/ocm_machine_pools.py +12 -6
  18. reconcile/openshift_base.py +60 -2
  19. reconcile/openshift_rhcs_certs.py +22 -24
  20. reconcile/rhidp/sso_client/base.py +15 -4
  21. reconcile/utils/binary.py +7 -12
  22. reconcile/utils/glitchtip/client.py +2 -2
  23. reconcile/utils/jobcontroller/controller.py +1 -1
  24. reconcile/utils/json.py +5 -1
  25. reconcile/utils/oc.py +136 -111
  26. reconcile/utils/rosa/session.py +16 -0
  27. reconcile/utils/saasherder/saasherder.py +20 -7
  28. reconcile/utils/vault.py +1 -1
  29. tools/cli_commands/erv2.py +1 -3
  30. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/WHEEL +0 -0
  31. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/entry_points.txt +0 -0
@@ -2,7 +2,7 @@ import logging
2
2
  import sys
3
3
  import time
4
4
  from collections.abc import Callable, Iterable, Mapping
5
- from typing import Any, cast
5
+ from typing import Any
6
6
 
7
7
  import reconcile.openshift_base as ob
8
8
  import reconcile.openshift_resources_base as orb
@@ -10,8 +10,8 @@ from reconcile.gql_definitions.common.rhcs_provider_settings import (
10
10
  RhcsProviderSettingsV1,
11
11
  )
12
12
  from reconcile.gql_definitions.rhcs.certs import (
13
- NamespaceOpenshiftResourceRhcsCertV1,
14
13
  NamespaceV1,
14
+ OpenshiftResourceRhcsCert,
15
15
  )
16
16
  from reconcile.gql_definitions.rhcs.certs import (
17
17
  query as rhcs_certs_query,
@@ -40,7 +40,6 @@ from reconcile.utils.vault import SecretNotFoundError, VaultClient
40
40
 
41
41
  QONTRACT_INTEGRATION = "openshift-rhcs-certs"
42
42
  QONTRACT_INTEGRATION_VERSION = make_semver(1, 9, 3)
43
- PROVIDERS = ["rhcs-cert"]
44
43
 
45
44
 
46
45
  def desired_state_shard_config() -> DesiredStateShardConfig:
@@ -67,10 +66,6 @@ class OpenshiftRhcsCertExpiration(GaugeMetric):
67
66
  return "qontract_reconcile_rhcs_cert_expiration_timestamp"
68
67
 
69
68
 
70
- def _is_rhcs_cert(obj: Any) -> bool:
71
- return getattr(obj, "provider", None) == "rhcs-cert"
72
-
73
-
74
69
  def get_namespaces_with_rhcs_certs(
75
70
  query_func: Callable,
76
71
  cluster_name: Iterable[str] | None = None,
@@ -82,7 +77,7 @@ def get_namespaces_with_rhcs_certs(
82
77
  integration_is_enabled(QONTRACT_INTEGRATION, ns.cluster)
83
78
  and not bool(ns.delete)
84
79
  and (not cluster_name or ns.cluster.name in cluster_name)
85
- and any(_is_rhcs_cert(r) for r in ns.openshift_resources or [])
80
+ and ns.openshift_resources
86
81
  ):
87
82
  result.append(ns)
88
83
  return result
@@ -105,7 +100,7 @@ def construct_rhcs_cert_oc_secret(
105
100
 
106
101
  def cert_expires_within_threshold(
107
102
  ns: NamespaceV1,
108
- cert_resource: NamespaceOpenshiftResourceRhcsCertV1,
103
+ cert_resource: OpenshiftResourceRhcsCert,
109
104
  vault_cert_secret: Mapping[str, Any],
110
105
  ) -> bool:
111
106
  auto_renew_threshold_days = cert_resource.auto_renew_threshold_days or 7
@@ -121,7 +116,7 @@ def cert_expires_within_threshold(
121
116
 
122
117
  def get_vault_cert_secret(
123
118
  ns: NamespaceV1,
124
- cert_resource: NamespaceOpenshiftResourceRhcsCertV1,
119
+ cert_resource: OpenshiftResourceRhcsCert,
125
120
  vault: VaultClient,
126
121
  vault_base_path: str,
127
122
  ) -> dict | None:
@@ -140,7 +135,7 @@ def get_vault_cert_secret(
140
135
  def generate_vault_cert_secret(
141
136
  dry_run: bool,
142
137
  ns: NamespaceV1,
143
- cert_resource: NamespaceOpenshiftResourceRhcsCertV1,
138
+ cert_resource: OpenshiftResourceRhcsCert,
144
139
  vault: VaultClient,
145
140
  vault_base_path: str,
146
141
  issuer_url: str,
@@ -182,7 +177,7 @@ def generate_vault_cert_secret(
182
177
  def fetch_openshift_resource_for_cert_resource(
183
178
  dry_run: bool,
184
179
  ns: NamespaceV1,
185
- cert_resource: NamespaceOpenshiftResourceRhcsCertV1,
180
+ cert_resource: OpenshiftResourceRhcsCert,
186
181
  vault: VaultClient,
187
182
  rhcs_settings: RhcsProviderSettingsV1,
188
183
  ) -> OR:
@@ -231,18 +226,13 @@ def fetch_desired_state(
231
226
  cert_provider = get_rhcs_provider_settings(query_func=query_func)
232
227
  for ns in namespaces:
233
228
  for cert_resource in ns.openshift_resources or []:
234
- if _is_rhcs_cert(cert_resource):
235
- ri.add_desired_resource(
236
- cluster=ns.cluster.name,
237
- namespace=ns.name,
238
- resource=fetch_openshift_resource_for_cert_resource(
239
- dry_run,
240
- ns,
241
- cast("NamespaceOpenshiftResourceRhcsCertV1", cert_resource),
242
- vault,
243
- cert_provider,
244
- ),
245
- )
229
+ ri.add_desired_resource(
230
+ cluster=ns.cluster.name,
231
+ namespace=ns.name,
232
+ resource=fetch_openshift_resource_for_cert_resource(
233
+ dry_run, ns, cert_resource, vault, cert_provider
234
+ ),
235
+ )
246
236
 
247
237
 
248
238
  @defer
@@ -295,3 +285,11 @@ def run(
295
285
  ob.publish_metrics(ri, QONTRACT_INTEGRATION)
296
286
  if ri.has_error_registered():
297
287
  sys.exit(1)
288
+
289
+
290
+ def early_exit_desired_state(*args: Any, **kwargs: Any) -> dict[str, Any]:
291
+ if not (query_func := kwargs.get("query_func")):
292
+ query_func = gql.get_api().query
293
+
294
+ cluster_name = kwargs.get("cluster_name")
295
+ return {"namespace": get_namespaces_with_rhcs_certs(query_func, cluster_name)}
@@ -1,3 +1,4 @@
1
+ import http
1
2
  import logging
2
3
  from collections.abc import (
3
4
  Iterable,
@@ -10,6 +11,7 @@ from urllib.parse import (
10
11
  )
11
12
 
12
13
  import jwt
14
+ from requests import HTTPError
13
15
 
14
16
  from reconcile.rhidp.common import (
15
17
  Cluster,
@@ -256,9 +258,18 @@ def delete_sso_client(
256
258
  )
257
259
  sso_client = SSOClient(**secret_reader.read_all_secret(secret=secret))
258
260
  keycloak_api = keycloak_map.get(sso_client.issuer)
259
- keycloak_api.delete_client(
260
- registration_client_uri=sso_client.registration_client_uri,
261
- registration_access_token=sso_client.registration_access_token,
262
- )
261
+ try:
262
+ keycloak_api.delete_client(
263
+ registration_client_uri=sso_client.registration_client_uri,
264
+ registration_access_token=sso_client.registration_access_token,
265
+ )
266
+ except HTTPError as e:
267
+ if e.response.status_code != http.HTTPStatus.UNAUTHORIZED:
268
+ logging.error(f"Failed to delete SSO client {sso_client_id}: {e}")
269
+ raise
270
+ # something went wrong with the registration token, maybe it expired
271
+ logging.error(
272
+ f"Failed to delete SSO client {sso_client_id} due to unauthorized error: {e}. Continuing to delete the vault secret."
273
+ )
263
274
 
264
275
  secret_reader.vault_client.delete(path=secret.path)
reconcile/utils/binary.py CHANGED
@@ -38,10 +38,7 @@ def binary_version(
38
38
  def deco_binary_version(f: Callable) -> Callable:
39
39
  @wraps(f)
40
40
  def f_binary_version(*args: Any, **kwargs: Any) -> None:
41
- regex = re.compile(search_regex)
42
-
43
- cmd = [binary]
44
- cmd.extend(version_args)
41
+ cmd = [binary, *version_args]
45
42
  try:
46
43
  result = subprocess.run(cmd, capture_output=True, check=True)
47
44
  except subprocess.CalledProcessError as e:
@@ -50,15 +47,13 @@ def binary_version(
50
47
  )
51
48
  raise Exception(msg) from e
52
49
 
53
- found = False
54
- match = None
55
- for line in result.stdout.splitlines():
56
- match = regex.search(line.decode("utf-8"))
57
- if match is not None:
58
- found = True
59
- break
50
+ match = re.search(
51
+ search_regex,
52
+ result.stdout.decode("utf-8"),
53
+ re.MULTILINE,
54
+ )
60
55
 
61
- if not found or not match:
56
+ if match is None:
62
57
  raise Exception(
63
58
  f"Could not find version for binary '{binary}' via regex "
64
59
  f"for binary version check: "
@@ -165,7 +165,7 @@ class GlitchtipClient(ApiBase):
165
165
  **self._post(
166
166
  f"/api/0/projects/{organization_slug}/{project_slug}/alerts/",
167
167
  data=alert.model_dump(
168
- by_alias=True, exclude_unset=True, exclude_none=True
168
+ mode="json", by_alias=True, exclude_unset=True, exclude_none=True
169
169
  ),
170
170
  )
171
171
  )
@@ -186,7 +186,7 @@ class GlitchtipClient(ApiBase):
186
186
  **self._put(
187
187
  f"/api/0/projects/{organization_slug}/{project_slug}/alerts/{alert.pk}/",
188
188
  data=alert.model_dump(
189
- by_alias=True, exclude_unset=True, exclude_none=True
189
+ mode="json", by_alias=True, exclude_unset=True, exclude_none=True
190
190
  ),
191
191
  )
192
192
  )
@@ -3,7 +3,7 @@ import time
3
3
  from datetime import datetime
4
4
  from typing import Protocol, TextIO
5
5
 
6
- from kubernetes.client import ( # type: ignore[attr-defined]
6
+ from kubernetes.client import (
7
7
  ApiClient,
8
8
  V1Job,
9
9
  V1ObjectMeta,
reconcile/utils/json.py CHANGED
@@ -7,6 +7,7 @@ from enum import Enum
7
7
  from typing import Any, Literal
8
8
 
9
9
  from pydantic import BaseModel
10
+ from pydantic.main import IncEx
10
11
 
11
12
  JSON_COMPACT_SEPARATORS = (",", ":")
12
13
 
@@ -42,6 +43,7 @@ def json_dumps(
42
43
  # BaseModel dump parameters
43
44
  by_alias: bool = True,
44
45
  exclude_none: bool = False,
46
+ exclude: IncEx | None = None,
45
47
  mode: Literal["json", "python"] = "json",
46
48
  ) -> str:
47
49
  """
@@ -56,7 +58,9 @@ def json_dumps(
56
58
  A JSON formatted string.
57
59
  """
58
60
  if isinstance(data, BaseModel):
59
- data = data.model_dump(mode=mode, by_alias=by_alias, exclude_none=exclude_none)
61
+ data = data.model_dump(
62
+ mode=mode, by_alias=by_alias, exclude_none=exclude_none, exclude=exclude
63
+ )
60
64
  if mode == "python":
61
65
  defaults = pydantic_encoder
62
66
  separators = JSON_COMPACT_SEPARATORS if compact else None
reconcile/utils/oc.py CHANGED
@@ -10,6 +10,7 @@ import re
10
10
  import subprocess
11
11
  import threading
12
12
  import time
13
+ from collections import defaultdict
13
14
  from contextlib import suppress
14
15
  from dataclasses import dataclass
15
16
  from functools import cache, wraps
@@ -18,7 +19,7 @@ from threading import Lock
18
19
  from typing import TYPE_CHECKING, Any, TextIO, cast
19
20
 
20
21
  import urllib3
21
- from kubernetes.client import ( # type: ignore[attr-defined]
22
+ from kubernetes.client import (
22
23
  ApiClient,
23
24
  Configuration,
24
25
  )
@@ -46,7 +47,6 @@ from sretoolbox.utils import (
46
47
  )
47
48
 
48
49
  from reconcile.status import RunningState
49
- from reconcile.utils.datetime_util import utc_now
50
50
  from reconcile.utils.json import json_dumps
51
51
  from reconcile.utils.jump_host import (
52
52
  JumphostParameters,
@@ -70,6 +70,16 @@ urllib3.disable_warnings()
70
70
  GET_REPLICASET_MAX_ATTEMPTS = 20
71
71
  DEFAULT_GROUP = ""
72
72
  PROJECT_KIND = "Project.project.openshift.io"
73
+ POD_RECYCLE_SUPPORTED_TRIGGER_KINDS = [
74
+ "ConfigMap",
75
+ "Secret",
76
+ ]
77
+ POD_RECYCLE_SUPPORTED_OWNER_KINDS = [
78
+ "DaemonSet",
79
+ "Deployment",
80
+ "DeploymentConfig",
81
+ "StatefulSet",
82
+ ]
73
83
 
74
84
  oc_run_execution_counter = Counter(
75
85
  name="oc_run_execution_counter",
@@ -126,14 +136,6 @@ class JSONParsingError(Exception):
126
136
  pass
127
137
 
128
138
 
129
- class RecyclePodsUnsupportedKindError(Exception):
130
- pass
131
-
132
-
133
- class RecyclePodsInvalidAnnotationValueError(Exception):
134
- pass
135
-
136
-
137
139
  class PodNotReadyError(Exception):
138
140
  pass
139
141
 
@@ -922,108 +924,105 @@ class OCCli:
922
924
  if not status["ready"]:
923
925
  raise PodNotReadyError(name)
924
926
 
925
- def recycle_pods(
926
- self, dry_run: bool, namespace: str, dep_kind: str, dep_resource: OR
927
- ) -> None:
928
- """recycles pods which are using the specified resources.
929
- will only act on Secrets containing the 'qontract.recycle' annotation.
930
- dry_run: simulate pods recycle.
931
- namespace: namespace in which dependant resource is applied.
932
- dep_kind: dependant resource kind. currently only supports Secret.
933
- dep_resource: dependant resource."""
934
-
935
- supported_kinds = ["Secret", "ConfigMap"]
936
- if dep_kind not in supported_kinds:
927
+ def _is_resource_supported_to_trigger_recycle(
928
+ self,
929
+ namespace: str,
930
+ resource: OR,
931
+ ) -> bool:
932
+ if resource.kind not in POD_RECYCLE_SUPPORTED_TRIGGER_KINDS:
937
933
  logging.debug([
938
934
  "skipping_pod_recycle_unsupported",
939
935
  self.cluster_name,
940
936
  namespace,
941
- dep_kind,
937
+ resource.kind,
938
+ resource.name,
942
939
  ])
943
- return
940
+ return False
944
941
 
945
- dep_annotations = dep_resource.body["metadata"].get("annotations", {})
946
942
  # Note, that annotations might have been set to None explicitly
947
- dep_annotations = dep_resource.body["metadata"].get("annotations") or {}
948
- qontract_recycle = dep_annotations.get("qontract.recycle")
949
- if qontract_recycle is True:
950
- raise RecyclePodsInvalidAnnotationValueError('should be "true"')
943
+ annotations = resource.body["metadata"].get("annotations") or {}
944
+ qontract_recycle = annotations.get("qontract.recycle")
951
945
  if qontract_recycle != "true":
952
946
  logging.debug([
953
947
  "skipping_pod_recycle_no_annotation",
954
948
  self.cluster_name,
955
949
  namespace,
956
- dep_kind,
950
+ resource.kind,
951
+ resource.name,
957
952
  ])
953
+ return False
954
+ return True
955
+
956
+ def recycle_pods(
957
+ self,
958
+ dry_run: bool,
959
+ namespace: str,
960
+ resource: OR,
961
+ ) -> None:
962
+ """
963
+ recycles pods which are using the specified resources.
964
+ will only act on Secret or ConfigMap containing the 'qontract.recycle' annotation.
965
+
966
+ Args:
967
+ dry_run (bool): if True, will only log the recycle action without executing it
968
+ namespace (str): namespace of the resource
969
+ resource (OR): resource object (Secret or ConfigMap) to check for pod usage
970
+ """
971
+
972
+ if not self._is_resource_supported_to_trigger_recycle(namespace, resource):
958
973
  return
959
974
 
960
- dep_name = dep_resource.name
961
975
  pods = self.get(namespace, "Pod")["items"]
962
-
963
- if dep_kind == "Secret":
964
- pods_to_recycle = [
965
- pod for pod in pods if self.secret_used_in_pod(dep_name, pod)
966
- ]
967
- elif dep_kind == "ConfigMap":
968
- pods_to_recycle = [
969
- pod for pod in pods if self.configmap_used_in_pod(dep_name, pod)
970
- ]
971
- else:
972
- raise RecyclePodsUnsupportedKindError(dep_kind)
973
-
974
- recyclables: dict[str, list[dict[str, Any]]] = {}
975
- supported_recyclables = [
976
- "Deployment",
977
- "DeploymentConfig",
978
- "StatefulSet",
979
- "DaemonSet",
976
+ pods_to_recycle = [
977
+ pod
978
+ for pod in pods
979
+ if self.is_resource_used_in_pod(
980
+ name=resource.name,
981
+ kind=resource.kind,
982
+ pod=pod,
983
+ )
980
984
  ]
985
+
986
+ recycle_names_by_kind = defaultdict(set)
981
987
  for pod in pods_to_recycle:
982
988
  owner = self.get_obj_root_owner(namespace, pod, allow_not_found=True)
983
989
  kind = owner["kind"]
984
- if kind not in supported_recyclables:
985
- continue
986
- recyclables.setdefault(kind, [])
987
- exists = False
988
- for obj in recyclables[kind]:
989
- owner_name = owner["metadata"]["name"]
990
- if obj["metadata"]["name"] == owner_name:
991
- exists = True
992
- break
993
- if not exists:
994
- recyclables[kind].append(owner)
995
-
996
- for kind, objs in recyclables.items():
997
- for obj in objs:
998
- self.recycle(dry_run, namespace, kind, obj)
999
-
1000
- @retry(exceptions=ObjectHasBeenModifiedError)
990
+ if kind in POD_RECYCLE_SUPPORTED_OWNER_KINDS:
991
+ recycle_names_by_kind[kind].add(owner["metadata"]["name"])
992
+
993
+ for kind, names in recycle_names_by_kind.items():
994
+ for name in names:
995
+ self.recycle(
996
+ dry_run=dry_run,
997
+ namespace=namespace,
998
+ kind=kind,
999
+ name=name,
1000
+ )
1001
+
1001
1002
  def recycle(
1002
- self, dry_run: bool, namespace: str, kind: str, obj: MutableMapping[str, Any]
1003
+ self,
1004
+ dry_run: bool,
1005
+ namespace: str,
1006
+ kind: str,
1007
+ name: str,
1003
1008
  ) -> None:
1004
- """Recycles an object by adding a recycle.time annotation
1009
+ """
1010
+ Recycles an object using oc rollout restart, which will add an annotation
1011
+ kubectl.kubernetes.io/restartedAt with the current timestamp to the pod
1012
+ template, triggering a rolling restart.
1005
1013
 
1006
- :param dry_run: Is this a dry run
1007
- :param namespace: Namespace to work in
1008
- :param kind: Object kind
1009
- :param obj: Object to recycle
1014
+ Args:
1015
+ dry_run (bool): if True, will only log the recycle action without executing it
1016
+ namespace (str): namespace of the object to recycle
1017
+ kind (str): kind of the object to recycle
1018
+ name (str): name of the object to recycle
1010
1019
  """
1011
- name = obj["metadata"]["name"]
1012
1020
  logging.info([f"recycle_{kind.lower()}", self.cluster_name, namespace, name])
1013
1021
  if not dry_run:
1014
- now = utc_now()
1015
- recycle_time = now.strftime("%d/%m/%Y %H:%M:%S")
1016
-
1017
- # get the object in case it was modified
1018
- obj = self.get(namespace, kind, name)
1019
- # honor update strategy by setting annotations to force
1020
- # a new rollout
1021
- a = obj["spec"]["template"]["metadata"].get("annotations", {})
1022
- a["recycle.time"] = recycle_time
1023
- obj["spec"]["template"]["metadata"]["annotations"] = a
1024
- cmd = ["apply", "-n", namespace, "-f", "-"]
1025
- stdin = json_dumps(obj)
1026
- self._run(cmd, stdin=stdin, apply=True)
1022
+ self._run(
1023
+ ["rollout", "restart", f"{kind}/{name}", "-n", namespace],
1024
+ apply=True,
1025
+ )
1027
1026
 
1028
1027
  def get_obj_root_owner(
1029
1028
  self,
@@ -1065,12 +1064,24 @@ class OCCli:
1065
1064
  )
1066
1065
  return obj
1067
1066
 
1068
- def secret_used_in_pod(self, name: str, pod: Mapping[str, Any]) -> bool:
1069
- used_resources = self.get_resources_used_in_pod_spec(pod["spec"], "Secret")
1070
- return name in used_resources
1067
+ def is_resource_used_in_pod(
1068
+ self,
1069
+ name: str,
1070
+ kind: str,
1071
+ pod: Mapping[str, Any],
1072
+ ) -> bool:
1073
+ """
1074
+ Check if a resource (Secret or ConfigMap) is used in a Pod.
1071
1075
 
1072
- def configmap_used_in_pod(self, name: str, pod: Mapping[str, Any]) -> bool:
1073
- used_resources = self.get_resources_used_in_pod_spec(pod["spec"], "ConfigMap")
1076
+ Args:
1077
+ name: Name of the resource
1078
+ kind: "Secret" or "ConfigMap"
1079
+ pod: Pod object
1080
+
1081
+ Returns:
1082
+ True if the resource is used in the Pod, False otherwise.
1083
+ """
1084
+ used_resources = self.get_resources_used_in_pod_spec(pod["spec"], kind)
1074
1085
  return name in used_resources
1075
1086
 
1076
1087
  @staticmethod
@@ -1079,25 +1090,39 @@ class OCCli:
1079
1090
  kind: str,
1080
1091
  include_optional: bool = True,
1081
1092
  ) -> dict[str, set[str]]:
1082
- if kind not in {"Secret", "ConfigMap"}:
1083
- raise KeyError(f"unsupported resource kind: {kind}")
1093
+ """
1094
+ Get resources (Secrets or ConfigMaps) used in a Pod spec.
1095
+ Returns a dictionary where keys are resource names and values are sets of keys used from that resource.
1096
+
1097
+ Args:
1098
+ spec: Pod spec
1099
+ kind: "Secret" or "ConfigMap"
1100
+ include_optional: Whether to include optional resources
1101
+
1102
+ Returns:
1103
+ A dictionary mapping resource names to sets of keys used.
1104
+ """
1105
+ match kind:
1106
+ case "Secret":
1107
+ volume_kind, volume_kind_ref, env_from_kind, env_kind, env_ref = (
1108
+ "secret",
1109
+ "secretName",
1110
+ "secretRef",
1111
+ "secretKeyRef",
1112
+ "name",
1113
+ )
1114
+ case "ConfigMap":
1115
+ volume_kind, volume_kind_ref, env_from_kind, env_kind, env_ref = (
1116
+ "configMap",
1117
+ "name",
1118
+ "configMapRef",
1119
+ "configMapKeyRef",
1120
+ "name",
1121
+ )
1122
+ case _:
1123
+ raise KeyError(f"unsupported resource kind: {kind}")
1124
+
1084
1125
  optional = "optional"
1085
- if kind == "Secret":
1086
- volume_kind, volume_kind_ref, env_from_kind, env_kind, env_ref = (
1087
- "secret",
1088
- "secretName",
1089
- "secretRef",
1090
- "secretKeyRef",
1091
- "name",
1092
- )
1093
- elif kind == "ConfigMap":
1094
- volume_kind, volume_kind_ref, env_from_kind, env_kind, env_ref = (
1095
- "configMap",
1096
- "name",
1097
- "configMapRef",
1098
- "configMapKeyRef",
1099
- "name",
1100
- )
1101
1126
 
1102
1127
  resources: dict[str, set[str]] = {}
1103
1128
  for v in spec.get("volumes") or []:
@@ -1126,8 +1151,8 @@ class OCCli:
1126
1151
  continue
1127
1152
  resource_name = resource_ref[env_ref]
1128
1153
  resources.setdefault(resource_name, set())
1129
- secret_key = resource_ref["key"]
1130
- resources[resource_name].add(secret_key)
1154
+ key = resource_ref["key"]
1155
+ resources[resource_name].add(key)
1131
1156
  except (KeyError, TypeError):
1132
1157
  continue
1133
1158
 
@@ -178,6 +178,22 @@ class RosaSession:
178
178
  )
179
179
  result.write_logs_to_logger(logging.info)
180
180
 
181
+ def upgrade_rosa_roles(
182
+ self,
183
+ cluster_name: str,
184
+ upgrade_version: str,
185
+ policy_version: str,
186
+ dry_run: bool,
187
+ ) -> None:
188
+ logging.info(
189
+ f"Upgrade roles in AWS account {self.aws_account_id} to {upgrade_version}"
190
+ )
191
+ if not dry_run:
192
+ result = self.cli_execute(
193
+ f"rosa upgrade roles -c {cluster_name} --cluster-version {upgrade_version} --policy-version {policy_version} -y -m=auto"
194
+ )
195
+ result.write_logs_to_logger(logging.info)
196
+
181
197
 
182
198
  def generate_rosa_creation_script(
183
199
  cluster_name: str, cluster: OCMSpec, dry_run: bool
@@ -92,8 +92,7 @@ from reconcile.utils.state import State
92
92
  from reconcile.utils.vcs import VCS
93
93
 
94
94
  TARGET_CONFIG_HASH = "target_config_hash"
95
-
96
-
95
+ TEMPLATE_API_VERSION = "template.openshift.io/v1"
97
96
  UNIQUE_SAAS_FILE_ENV_COMBO_LEN = 56
98
97
  REQUEST_TIMEOUT = 60
99
98
 
@@ -874,10 +873,23 @@ class SaasHerder:
874
873
  """
875
874
  if parameter_name in consolidated_parameters:
876
875
  return False
877
- for template_parameter in template.get("parameters", {}):
878
- if template_parameter["name"] == parameter_name:
879
- return True
880
- return False
876
+ return any(
877
+ template_parameter["name"] == parameter_name
878
+ for template_parameter in template.get("parameters") or []
879
+ )
880
+
881
+ @staticmethod
882
+ def _pre_process_template(template: dict[str, Any]) -> dict[str, Any]:
883
+ """
884
+ The only supported apiVersion for OpenShift Template is "template.openshift.io/v1".
885
+ There are examples of templates using "v1", it can't pass validation on 4.19+ oc versions.
886
+
887
+ Args:
888
+ template (dict): The OpenShift template dictionary.
889
+ Returns:
890
+ dict: The OpenShift template dictionary with the correct apiVersion.
891
+ """
892
+ return template | {"apiVersion": TEMPLATE_API_VERSION}
881
893
 
882
894
  def _process_template(
883
895
  self, spec: TargetSpec
@@ -967,7 +979,8 @@ class SaasHerder:
967
979
  oc = OCLocal("cluster", None, None, local=True)
968
980
  try:
969
981
  resources: Iterable[Mapping[str, Any]] = oc.process(
970
- template, consolidated_parameters
982
+ template=self._pre_process_template(template),
983
+ parameters=consolidated_parameters,
971
984
  )
972
985
  except StatusCodeError as e:
973
986
  logging.error(f"{error_prefix} error processing template: {e!s}")
reconcile/utils/vault.py CHANGED
@@ -200,7 +200,7 @@ class VaultClient:
200
200
  a v2 KV engine)
201
201
  """
202
202
  secret_path = secret["path"]
203
- secret_version = secret.get("version", SECRET_VERSION_LATEST)
203
+ secret_version = secret.get("version") or SECRET_VERSION_LATEST
204
204
 
205
205
  kv_version = self._get_mount_version_by_secret_path(secret_path)
206
206
 
@@ -133,9 +133,7 @@ class Erv2Cli:
133
133
 
134
134
  @property
135
135
  def input_data(self) -> str:
136
- return self._resource.model_dump_json(
137
- exclude={"data": {FLAG_RESOURCE_MANAGED_BY_ERV2}}
138
- )
136
+ return self._resource.export(exclude={"data": {FLAG_RESOURCE_MANAGED_BY_ERV2}})
139
137
 
140
138
  @property
141
139
  def image(self) -> str: