qontract-reconcile 0.10.2.dev414__py3-none-any.whl → 0.10.2.dev427__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qontract-reconcile might be problematic. Click here for more details.

Files changed (31) hide show
  1. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/METADATA +2 -2
  2. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/RECORD +31 -30
  3. reconcile/aus/advanced_upgrade_service.py +3 -0
  4. reconcile/aus/base.py +112 -9
  5. reconcile/aus/models.py +2 -0
  6. reconcile/aus/ocm_addons_upgrade_scheduler_org.py +1 -0
  7. reconcile/aus/ocm_upgrade_scheduler.py +8 -1
  8. reconcile/aus/ocm_upgrade_scheduler_org.py +20 -5
  9. reconcile/aus/version_gates/sts_version_gate_handler.py +54 -1
  10. reconcile/automated_actions/config/integration.py +1 -1
  11. reconcile/cli.py +62 -4
  12. reconcile/external_resources/manager.py +7 -18
  13. reconcile/external_resources/model.py +8 -8
  14. reconcile/external_resources/state.py +1 -34
  15. reconcile/gql_definitions/rhcs/certs.py +19 -74
  16. reconcile/gql_definitions/rhcs/openshift_resource_rhcs_cert.py +42 -0
  17. reconcile/ocm_machine_pools.py +12 -6
  18. reconcile/openshift_base.py +60 -2
  19. reconcile/openshift_rhcs_certs.py +22 -24
  20. reconcile/rhidp/sso_client/base.py +15 -4
  21. reconcile/utils/binary.py +7 -12
  22. reconcile/utils/glitchtip/client.py +2 -2
  23. reconcile/utils/jobcontroller/controller.py +1 -1
  24. reconcile/utils/json.py +5 -1
  25. reconcile/utils/oc.py +136 -111
  26. reconcile/utils/rosa/session.py +16 -0
  27. reconcile/utils/saasherder/saasherder.py +20 -7
  28. reconcile/utils/vault.py +1 -1
  29. tools/cli_commands/erv2.py +1 -3
  30. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/WHEEL +0 -0
  31. {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev427.dist-info}/entry_points.txt +0 -0
@@ -6,6 +6,7 @@ from reconcile.utils.ocm.base import OCMCluster, OCMVersionGate
6
6
  from reconcile.utils.ocm_base_client import OCMBaseClient
7
7
  from reconcile.utils.rosa.rosa_cli import RosaCliError
8
8
  from reconcile.utils.rosa.session import RosaSession
9
+ from reconcile.utils.semver_helper import get_version_prefix
9
10
 
10
11
  GATE_LABEL = "api.openshift.com/gate-sts"
11
12
 
@@ -63,6 +64,24 @@ class STSGateHandler(GateHandler):
63
64
  )
64
65
  return False
65
66
 
67
+ return self.upgrade_rosa_roles(
68
+ cluster=cluster,
69
+ version_raw_id_prefix=gate.version_raw_id_prefix,
70
+ dry_run=dry_run,
71
+ ocm_api=ocm_api,
72
+ ocm_org_id=ocm_org_id,
73
+ )
74
+
75
+ def upgrade_rosa_roles(
76
+ self,
77
+ cluster: OCMCluster,
78
+ version_raw_id_prefix: str,
79
+ dry_run: bool,
80
+ ocm_api: OCMBaseClient,
81
+ ocm_org_id: str,
82
+ ) -> bool:
83
+ if not cluster.aws:
84
+ return False
66
85
  rosa = RosaSession(
67
86
  aws_account_id=cluster.aws.aws_account_id,
68
87
  aws_region=cluster.region.id,
@@ -83,7 +102,7 @@ class STSGateHandler(GateHandler):
83
102
  )
84
103
  rosa.upgrade_account_roles(
85
104
  role_prefix=account_role_prefix,
86
- minor_version=gate.version_raw_id_prefix,
105
+ minor_version=version_raw_id_prefix,
87
106
  channel_group=cluster.version.channel_group,
88
107
  dry_run=dry_run,
89
108
  )
@@ -98,3 +117,37 @@ class STSGateHandler(GateHandler):
98
117
  e.write_logs_to_logger(logging.error)
99
118
  return False
100
119
  return True
120
+
121
+ def upgrade_rosa_roles_v2(
122
+ self,
123
+ cluster: OCMCluster,
124
+ upgrade_version: str,
125
+ dry_run: bool,
126
+ ocm_api: OCMBaseClient,
127
+ ocm_org_id: str,
128
+ ) -> bool:
129
+ if not cluster.aws:
130
+ return False
131
+ rosa = RosaSession(
132
+ aws_account_id=cluster.aws.aws_account_id,
133
+ aws_region=cluster.region.id,
134
+ aws_iam_role=self.aws_iam_role,
135
+ ocm_org_id=ocm_org_id,
136
+ ocm_api=ocm_api,
137
+ job_controller=self.job_controller,
138
+ image=self.rosa_job_image,
139
+ service_account=self.rosa_job_service_account,
140
+ )
141
+ policy_version = get_version_prefix(upgrade_version)
142
+ try:
143
+ rosa.upgrade_rosa_roles(
144
+ cluster_name=cluster.name,
145
+ upgrade_version=upgrade_version,
146
+ policy_version=policy_version,
147
+ dry_run=dry_run,
148
+ )
149
+ except RosaCliError as e:
150
+ logging.error(f"Failed to upgrade roles for cluster {cluster.name}: {e}")
151
+ e.write_logs_to_logger(logging.error)
152
+ return False
153
+ return True
@@ -7,7 +7,7 @@ from collections.abc import (
7
7
  from typing import Any
8
8
 
9
9
  import yaml
10
- from kubernetes.client import ( # type: ignore[attr-defined]
10
+ from kubernetes.client import (
11
11
  ApiClient,
12
12
  V1ConfigMap,
13
13
  V1ObjectMeta,
reconcile/cli.py CHANGED
@@ -50,8 +50,8 @@ from reconcile.utils.unleash import get_feature_toggle_state
50
50
  TERRAFORM_VERSION = ["1.6.6"]
51
51
  TERRAFORM_VERSION_REGEX = r"^Terraform\sv([\d]+\.[\d]+\.[\d]+)$"
52
52
 
53
- OC_VERSIONS = ["4.16.2", "4.12.46", "4.10.15"]
54
- OC_VERSION_REGEX = r"^Client\sVersion:\s([\d]+\.[\d]+\.[\d]+)$"
53
+ OC_VERSIONS = ["4.19.0", "4.16.2"]
54
+ OC_VERSION_REGEX = r"^Client\sVersion:\s([\d]+\.[\d]+\.[\d]+)"
55
55
 
56
56
  HELM_VERSIONS = ["3.11.1"]
57
57
  HELM_VERSION_REGEX = r"^version.BuildInfo{Version:\"v([\d]+\.[\d]+\.[\d]+)\".*$"
@@ -2855,6 +2855,36 @@ def ocm_addons_upgrade_scheduler_org(
2855
2855
  default=bool(os.environ.get("IGNORE_STS_CLUSTERS")),
2856
2856
  help="Ignore STS clusters",
2857
2857
  )
2858
+ @click.option(
2859
+ "--job-controller-cluster",
2860
+ help="The cluster holding the job-controller namepsace",
2861
+ required=False,
2862
+ envvar="JOB_CONTROLLER_CLUSTER",
2863
+ )
2864
+ @click.option(
2865
+ "--job-controller-namespace",
2866
+ help="The namespace used for ROSA jobs",
2867
+ required=False,
2868
+ envvar="JOB_CONTROLLER_NAMESPACE",
2869
+ )
2870
+ @click.option(
2871
+ "--rosa-job-service-account",
2872
+ help="The service-account used for ROSA jobs",
2873
+ required=False,
2874
+ envvar="ROSA_JOB_SERVICE_ACCOUNT",
2875
+ )
2876
+ @click.option(
2877
+ "--rosa-job-image",
2878
+ help="The container image to use to run ROSA cli command jobs",
2879
+ required=False,
2880
+ envvar="ROSA_JOB_IMAGE",
2881
+ )
2882
+ @click.option(
2883
+ "--rosa-role",
2884
+ help="The role to assume in the ROSA cluster account",
2885
+ required=False,
2886
+ envvar="ROSA_ROLE",
2887
+ )
2858
2888
  @click.pass_context
2859
2889
  def advanced_upgrade_scheduler(
2860
2890
  ctx: click.Context,
@@ -2862,9 +2892,21 @@ def advanced_upgrade_scheduler(
2862
2892
  org_id: Iterable[str],
2863
2893
  exclude_org_id: Iterable[str],
2864
2894
  ignore_sts_clusters: bool,
2895
+ job_controller_cluster: str | None,
2896
+ job_controller_namespace: str | None,
2897
+ rosa_job_service_account: str | None,
2898
+ rosa_role: str | None,
2899
+ rosa_job_image: str | None,
2865
2900
  ) -> None:
2866
- from reconcile.aus.advanced_upgrade_service import AdvancedUpgradeServiceIntegration
2867
- from reconcile.aus.base import AdvancedUpgradeSchedulerBaseIntegrationParams
2901
+ from reconcile.aus.advanced_upgrade_service import (
2902
+ QONTRACT_INTEGRATION,
2903
+ QONTRACT_INTEGRATION_VERSION,
2904
+ AdvancedUpgradeServiceIntegration,
2905
+ )
2906
+ from reconcile.aus.base import (
2907
+ AdvancedUpgradeSchedulerBaseIntegrationParams,
2908
+ RosaRoleUpgradeHandlerParams,
2909
+ )
2868
2910
 
2869
2911
  run_class_integration(
2870
2912
  integration=AdvancedUpgradeServiceIntegration(
@@ -2873,6 +2915,22 @@ def advanced_upgrade_scheduler(
2873
2915
  ocm_organization_ids=set(org_id),
2874
2916
  excluded_ocm_organization_ids=set(exclude_org_id),
2875
2917
  ignore_sts_clusters=ignore_sts_clusters,
2918
+ rosa_role_upgrade_handler_params=RosaRoleUpgradeHandlerParams(
2919
+ job_controller_cluster=job_controller_cluster,
2920
+ job_controller_namespace=job_controller_namespace,
2921
+ rosa_job_service_account=rosa_job_service_account,
2922
+ rosa_role=rosa_role,
2923
+ rosa_job_image=rosa_job_image,
2924
+ integration_name=QONTRACT_INTEGRATION,
2925
+ integration_version=QONTRACT_INTEGRATION_VERSION,
2926
+ )
2927
+ if all([
2928
+ job_controller_cluster,
2929
+ job_controller_namespace,
2930
+ rosa_job_service_account,
2931
+ rosa_role,
2932
+ ])
2933
+ else None,
2876
2934
  )
2877
2935
  ),
2878
2936
  ctx=ctx,
@@ -45,7 +45,6 @@ from reconcile.utils.datetime_util import utc_now
45
45
  from reconcile.utils.external_resource_spec import (
46
46
  ExternalResourceSpec,
47
47
  )
48
- from reconcile.utils.json import json_dumps
49
48
  from reconcile.utils.secret_reader import SecretReaderBase
50
49
 
51
50
 
@@ -245,7 +244,7 @@ class ExternalResourcesManager:
245
244
  reconciliation = Reconciliation(
246
245
  key=key,
247
246
  resource_hash=resource.hash(),
248
- input=json_dumps(resource),
247
+ input=resource.export(),
249
248
  action=Action.APPLY,
250
249
  module_configuration=module_conf,
251
250
  linked_resources=self._find_linked_resources(spec),
@@ -253,15 +252,11 @@ class ExternalResourcesManager:
253
252
  r.add(reconciliation)
254
253
  return r
255
254
 
256
- def _get_deleted_objects_reconciliations(
257
- self, enable_migration: bool = False
258
- ) -> set[Reconciliation]:
255
+ def _get_deleted_objects_reconciliations(self) -> set[Reconciliation]:
259
256
  to_reconcile: set[Reconciliation] = set()
260
257
  deleted_keys = (k for k, v in self.er_inventory.items() if v.marked_to_delete)
261
258
  for key in deleted_keys:
262
- state = self.state_mgr.get_external_resource_state(
263
- key, enable_migration=enable_migration
264
- )
259
+ state = self.state_mgr.get_external_resource_state(key)
265
260
  if state.resource_status == ResourceStatus.NOT_EXISTS:
266
261
  logging.debug("Resource has already been removed. key: %s", key)
267
262
  continue
@@ -354,9 +349,7 @@ class ExternalResourcesManager:
354
349
 
355
350
  if r.linked_resources:
356
351
  for lr in r.linked_resources:
357
- lrs = self.state_mgr.get_external_resource_state(
358
- lr, enable_migration=True
359
- )
352
+ lrs = self.state_mgr.get_external_resource_state(lr)
360
353
  if not lrs.resource_status.is_in_progress:
361
354
  lrs.resource_status = ResourceStatus.RECONCILIATION_REQUESTED
362
355
  self.state_mgr.set_external_resource_state(lrs)
@@ -423,12 +416,10 @@ class ExternalResourcesManager:
423
416
 
424
417
  def handle_resources(self) -> None:
425
418
  desired_r = self._get_desired_objects_reconciliations()
426
- deleted_r = self._get_deleted_objects_reconciliations(enable_migration=True)
419
+ deleted_r = self._get_deleted_objects_reconciliations()
427
420
  to_sync_keys: set[ExternalResourceKey] = set()
428
421
  for r in desired_r.union(deleted_r):
429
- state = self.state_mgr.get_external_resource_state(
430
- r.key, enable_migration=True
431
- )
422
+ state = self.state_mgr.get_external_resource_state(r.key)
432
423
  reconciliation_status = self._get_reconciliation_status(r, state)
433
424
  self._update_resource_state(r, state, reconciliation_status)
434
425
 
@@ -460,9 +451,7 @@ class ExternalResourcesManager:
460
451
  for r in desired_r.union(deleted_r)
461
452
  if self._reconciliation_needs_dry_run_run(
462
453
  r,
463
- self.state_mgr.get_external_resource_state(
464
- key=r.key, enable_migration=False
465
- ),
454
+ self.state_mgr.get_external_resource_state(key=r.key),
466
455
  )
467
456
  }
468
457
 
@@ -1,7 +1,4 @@
1
1
  import hashlib
2
- from abc import (
3
- ABC,
4
- )
5
2
  from collections.abc import ItemsView, Iterable, Iterator, MutableMapping
6
3
  from enum import StrEnum
7
4
  from typing import Any
@@ -88,9 +85,6 @@ class ExternalResourceKey(BaseModel, frozen=True):
88
85
  provider=spec.provider,
89
86
  )
90
87
 
91
- def hash(self) -> str:
92
- return hashlib.md5(json_dumps(self.model_dump()).encode("utf-8")).hexdigest()
93
-
94
88
  @property
95
89
  def state_path(self) -> str:
96
90
  return f"{self.provision_provider}/{self.provisioner_name}/{self.provider}/{self.identifier}"
@@ -407,7 +401,7 @@ class ReconciliationStatus(BaseModel):
407
401
  resource_status: ResourceStatus
408
402
 
409
403
 
410
- class ModuleProvisionData(ABC, BaseModel):
404
+ class ModuleProvisionData(BaseModel):
411
405
  pass
412
406
 
413
407
 
@@ -432,7 +426,7 @@ class ExternalResourceProvision(BaseModel):
432
426
  target_cluster: str
433
427
  target_namespace: str
434
428
  target_secret_name: str
435
- module_provision_data: ModuleProvisionData
429
+ module_provision_data: ModuleProvisionData | TerraformModuleProvisionData
436
430
 
437
431
 
438
432
  class ExternalResource(BaseModel):
@@ -441,3 +435,9 @@ class ExternalResource(BaseModel):
441
435
 
442
436
  def hash(self) -> str:
443
437
  return hashlib.sha256(json_dumps(self.data).encode("utf-8")).hexdigest()
438
+
439
+ def export(
440
+ self, exclude: dict[str, Any] | None = None, indent: int | None = None
441
+ ) -> str:
442
+ """Export the ExternalResource as a JSON string."""
443
+ return json_dumps(self, exclude=exclude, indent=indent)
@@ -271,47 +271,14 @@ class ExternalResourcesStateDynamoDB:
271
271
  def get_external_resource_state(
272
272
  self,
273
273
  key: ExternalResourceKey,
274
- enable_migration: bool = False,
275
274
  ) -> ExternalResourceState:
276
275
  data = self.aws_api.dynamodb.boto3_client.get_item(
277
276
  TableName=self._table,
278
277
  ConsistentRead=True,
279
278
  Key={self.adapter.ER_KEY_HASH: {"S": key.state_path}},
280
279
  )
281
- item = data.get("Item")
282
- if item:
280
+ if "Item" in data:
283
281
  return self.adapter.deserialize(data["Item"])
284
-
285
- old_data = self.aws_api.dynamodb.boto3_client.get_item(
286
- TableName=self._table,
287
- ConsistentRead=True,
288
- Key={self.adapter.ER_KEY_HASH: {"S": key.hash()}},
289
- )
290
- old_item = old_data.get("Item")
291
- if old_item:
292
- old_item[self.adapter.ER_KEY_HASH]["S"] = key.state_path
293
- old_item[self.adapter.RECONC]["M"][self.adapter.RECONC_RESOURCE_HASH][
294
- "S"
295
- ] = self._new_sha256_hash(old_item)
296
- if enable_migration:
297
- self.aws_api.dynamodb.boto3_client.transact_write_items(
298
- TransactItems=[
299
- {
300
- "Put": {
301
- "TableName": self._table,
302
- "Item": old_item,
303
- }
304
- },
305
- {
306
- "Delete": {
307
- "TableName": self._table,
308
- "Key": {self.adapter.ER_KEY_HASH: {"S": key.hash()}},
309
- }
310
- },
311
- ]
312
- )
313
- return self.adapter.deserialize(old_item)
314
-
315
282
  return ExternalResourceState(
316
283
  key=key,
317
284
  ts=utc_now(),
@@ -18,6 +18,7 @@ from pydantic import ( # noqa: F401 # pylint: disable=W0611
18
18
  )
19
19
 
20
20
  from reconcile.gql_definitions.fragments.jumphost_common_fields import CommonJumphostFields
21
+ from reconcile.gql_definitions.rhcs.openshift_resource_rhcs_cert import OpenshiftResourceRhcsCert
21
22
  from reconcile.gql_definitions.fragments.vault_secret import VaultSecret
22
23
 
23
24
 
@@ -33,6 +34,20 @@ fragment CommonJumphostFields on ClusterJumpHost_v1 {
33
34
  }
34
35
  }
35
36
 
37
+ fragment OpenshiftResourceRhcsCert on NamespaceOpenshiftResourceRhcsCert_v1 {
38
+ secret_name
39
+ service_account_name
40
+ service_account_password {
41
+ ... on VaultSecret_v1 {
42
+ path
43
+ field
44
+ version
45
+ }
46
+ }
47
+ auto_renew_threshold_days
48
+ annotations
49
+ }
50
+
36
51
  fragment VaultSecret on VaultSecret_v1 {
37
52
  path
38
53
  field
@@ -46,37 +61,11 @@ query RhcsCerts {
46
61
  delete
47
62
  clusterAdmin
48
63
  openshiftResources {
49
- provider
50
- ... on NamespaceOpenshiftResourceRhcsCert_v1 {
51
- secret_name
52
- service_account_name
53
- service_account_password {
54
- ... on VaultSecret_v1 {
55
- path
56
- field
57
- version
58
- }
59
- }
60
- auto_renew_threshold_days
61
- annotations
62
- }
64
+ ...OpenshiftResourceRhcsCert
63
65
  }
64
66
  sharedResources {
65
67
  openshiftResources {
66
- provider
67
- ... on NamespaceOpenshiftResourceRhcsCert_v1 {
68
- secret_name
69
- service_account_name
70
- service_account_password {
71
- ... on VaultSecret_v1 {
72
- path
73
- field
74
- version
75
- }
76
- }
77
- auto_renew_threshold_days
78
- annotations
79
- }
68
+ ...OpenshiftResourceRhcsCert
80
69
  }
81
70
  }
82
71
  cluster {
@@ -108,52 +97,8 @@ class ConfiguredBaseModel(BaseModel):
108
97
  )
109
98
 
110
99
 
111
- class NamespaceOpenshiftResourceV1(ConfiguredBaseModel):
112
- provider: str = Field(..., alias="provider")
113
-
114
-
115
- class VaultSecretV1(ConfiguredBaseModel):
116
- ...
117
-
118
-
119
- class VaultSecretV1_VaultSecretV1(VaultSecretV1):
120
- path: str = Field(..., alias="path")
121
- field: str = Field(..., alias="field")
122
- version: Optional[int] = Field(..., alias="version")
123
-
124
-
125
- class NamespaceOpenshiftResourceRhcsCertV1(NamespaceOpenshiftResourceV1):
126
- secret_name: str = Field(..., alias="secret_name")
127
- service_account_name: str = Field(..., alias="service_account_name")
128
- service_account_password: Union[VaultSecretV1_VaultSecretV1, VaultSecretV1] = Field(..., alias="service_account_password")
129
- auto_renew_threshold_days: Optional[int] = Field(..., alias="auto_renew_threshold_days")
130
- annotations: Optional[Json] = Field(..., alias="annotations")
131
-
132
-
133
- class SharedResourcesV1_NamespaceOpenshiftResourceV1(ConfiguredBaseModel):
134
- provider: str = Field(..., alias="provider")
135
-
136
-
137
- class SharedResourcesV1_NamespaceOpenshiftResourceV1_NamespaceOpenshiftResourceRhcsCertV1_VaultSecretV1(ConfiguredBaseModel):
138
- ...
139
-
140
-
141
- class SharedResourcesV1_NamespaceOpenshiftResourceV1_NamespaceOpenshiftResourceRhcsCertV1_VaultSecretV1_VaultSecretV1(SharedResourcesV1_NamespaceOpenshiftResourceV1_NamespaceOpenshiftResourceRhcsCertV1_VaultSecretV1):
142
- path: str = Field(..., alias="path")
143
- field: str = Field(..., alias="field")
144
- version: Optional[int] = Field(..., alias="version")
145
-
146
-
147
- class SharedResourcesV1_NamespaceOpenshiftResourceV1_NamespaceOpenshiftResourceRhcsCertV1(SharedResourcesV1_NamespaceOpenshiftResourceV1):
148
- secret_name: str = Field(..., alias="secret_name")
149
- service_account_name: str = Field(..., alias="service_account_name")
150
- service_account_password: Union[SharedResourcesV1_NamespaceOpenshiftResourceV1_NamespaceOpenshiftResourceRhcsCertV1_VaultSecretV1_VaultSecretV1, SharedResourcesV1_NamespaceOpenshiftResourceV1_NamespaceOpenshiftResourceRhcsCertV1_VaultSecretV1] = Field(..., alias="service_account_password")
151
- auto_renew_threshold_days: Optional[int] = Field(..., alias="auto_renew_threshold_days")
152
- annotations: Optional[Json] = Field(..., alias="annotations")
153
-
154
-
155
100
  class SharedResourcesV1(ConfiguredBaseModel):
156
- openshift_resources: list[Union[SharedResourcesV1_NamespaceOpenshiftResourceV1_NamespaceOpenshiftResourceRhcsCertV1, SharedResourcesV1_NamespaceOpenshiftResourceV1]] = Field(..., alias="openshiftResources")
101
+ openshift_resources: list[OpenshiftResourceRhcsCert] = Field(..., alias="openshiftResources")
157
102
 
158
103
 
159
104
  class DisableClusterAutomationsV1(ConfiguredBaseModel):
@@ -175,7 +120,7 @@ class NamespaceV1(ConfiguredBaseModel):
175
120
  name: str = Field(..., alias="name")
176
121
  delete: Optional[bool] = Field(..., alias="delete")
177
122
  cluster_admin: Optional[bool] = Field(..., alias="clusterAdmin")
178
- openshift_resources: Optional[list[Union[NamespaceOpenshiftResourceRhcsCertV1, NamespaceOpenshiftResourceV1]]] = Field(..., alias="openshiftResources")
123
+ openshift_resources: Optional[list[OpenshiftResourceRhcsCert]] = Field(..., alias="openshiftResources")
179
124
  shared_resources: Optional[list[SharedResourcesV1]] = Field(..., alias="sharedResources")
180
125
  cluster: ClusterV1 = Field(..., alias="cluster")
181
126
 
@@ -0,0 +1,42 @@
1
+ """
2
+ Generated by qenerate plugin=pydantic_v2. DO NOT MODIFY MANUALLY!
3
+ """
4
+ from collections.abc import Callable # noqa: F401 # pylint: disable=W0611
5
+ from datetime import datetime # noqa: F401 # pylint: disable=W0611
6
+ from enum import Enum # noqa: F401 # pylint: disable=W0611
7
+ from typing import ( # noqa: F401 # pylint: disable=W0611
8
+ Any,
9
+ Optional,
10
+ Union,
11
+ )
12
+
13
+ from pydantic import ( # noqa: F401 # pylint: disable=W0611
14
+ BaseModel,
15
+ ConfigDict,
16
+ Field,
17
+ Json,
18
+ )
19
+
20
+
21
+ class ConfiguredBaseModel(BaseModel):
22
+ model_config = ConfigDict(
23
+ extra='forbid'
24
+ )
25
+
26
+
27
+ class VaultSecretV1(ConfiguredBaseModel):
28
+ ...
29
+
30
+
31
+ class VaultSecretV1_VaultSecretV1(VaultSecretV1):
32
+ path: str = Field(..., alias="path")
33
+ field: str = Field(..., alias="field")
34
+ version: Optional[int] = Field(..., alias="version")
35
+
36
+
37
+ class OpenshiftResourceRhcsCert(ConfiguredBaseModel):
38
+ secret_name: str = Field(..., alias="secret_name")
39
+ service_account_name: str = Field(..., alias="service_account_name")
40
+ service_account_password: Union[VaultSecretV1_VaultSecretV1, VaultSecretV1] = Field(..., alias="service_account_password")
41
+ auto_renew_threshold_days: Optional[int] = Field(..., alias="auto_renew_threshold_days")
42
+ annotations: Optional[Json] = Field(..., alias="annotations")
@@ -7,7 +7,7 @@ from collections.abc import Iterable, Mapping
7
7
  from enum import Enum
8
8
  from typing import Any, Self
9
9
 
10
- from pydantic import BaseModel, Field, model_validator
10
+ from pydantic import BaseModel, Field, SerializeAsAny, model_validator
11
11
 
12
12
  from reconcile import queries
13
13
  from reconcile.gql_definitions.common.clusters import (
@@ -107,7 +107,7 @@ class AbstractPool(ABC, BaseModel):
107
107
  labels: Mapping[str, str] | None = None
108
108
  cluster: str
109
109
  cluster_type: ClusterType = Field(..., exclude=True)
110
- autoscaling: AbstractAutoscaling | None = None
110
+ autoscaling: SerializeAsAny[AbstractAutoscaling] | None = None
111
111
 
112
112
  @model_validator(mode="before")
113
113
  @classmethod
@@ -170,7 +170,10 @@ class MachinePool(AbstractPool):
170
170
  ocm.update_machine_pool(self.cluster, update_dict)
171
171
 
172
172
  def has_diff(self, pool: ClusterMachinePoolV1) -> bool:
173
- if self.taints != pool.taints or self.labels != pool.labels:
173
+ pool_taints = (
174
+ [p.model_dump(by_alias=True) for p in pool.taints] if pool.taints else None
175
+ )
176
+ if self.taints != pool_taints or self.labels != pool.labels:
174
177
  logging.warning(
175
178
  f"updating labels or taints for machine pool {pool.q_id} "
176
179
  f"will only be applied to new Nodes"
@@ -178,7 +181,7 @@ class MachinePool(AbstractPool):
178
181
 
179
182
  return (
180
183
  self.replicas != pool.replicas
181
- or self.taints != pool.taints
184
+ or self.taints != pool_taints
182
185
  or self.labels != pool.labels
183
186
  or self.instance_type != pool.instance_type
184
187
  or self._has_diff_autoscale(pool)
@@ -251,7 +254,10 @@ class NodePool(AbstractPool):
251
254
  ocm.update_node_pool(self.cluster, update_dict)
252
255
 
253
256
  def has_diff(self, pool: ClusterMachinePoolV1) -> bool:
254
- if self.taints != pool.taints or self.labels != pool.labels:
257
+ pool_taints = (
258
+ [p.model_dump(by_alias=True) for p in pool.taints] if pool.taints else None
259
+ )
260
+ if self.taints != pool_taints or self.labels != pool.labels:
255
261
  logging.warning(
256
262
  f"updating labels or taints for node pool {pool.q_id} "
257
263
  f"will only be applied to new Nodes"
@@ -259,7 +265,7 @@ class NodePool(AbstractPool):
259
265
 
260
266
  return (
261
267
  self.replicas != pool.replicas
262
- or self.taints != pool.taints
268
+ or self.taints != pool_taints
263
269
  or self.labels != pool.labels
264
270
  or self.aws_node_pool.instance_type != pool.instance_type
265
271
  or self.subnet != pool.subnet
@@ -29,7 +29,9 @@ from reconcile.utils import (
29
29
  metrics,
30
30
  )
31
31
  from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
32
+ from reconcile.utils.differ import DiffPair
32
33
  from reconcile.utils.oc import (
34
+ POD_RECYCLE_SUPPORTED_OWNER_KINDS,
33
35
  AmbiguousResourceTypeError,
34
36
  DeploymentFieldIsImmutableError,
35
37
  FieldIsImmutableError,
@@ -62,6 +64,10 @@ AUTH_METHOD_USER_KEY = {
62
64
  "oidc": "org_username",
63
65
  "rhidp": "org_username",
64
66
  }
67
+ RECYCLE_POD_ANNOTATIONS = [
68
+ "kubectl.kubernetes.io/restartedAt",
69
+ "openshift.openshift.io/restartedAt",
70
+ ]
65
71
 
66
72
 
67
73
  class ValidationError(Exception):
@@ -588,7 +594,7 @@ def apply(
588
594
  oc.resize_pvcs(namespace, owned_pvc_names, desired_storage)
589
595
 
590
596
  if recycle_pods:
591
- oc.recycle_pods(dry_run, namespace, resource_type, resource)
597
+ oc.recycle_pods(dry_run, namespace, resource)
592
598
 
593
599
 
594
600
  def create(
@@ -832,10 +838,56 @@ def handle_identical_resources(
832
838
  return actions
833
839
 
834
840
 
841
+ def patch_desired_resource_for_recycle_annotations(
842
+ desired: OR,
843
+ current: OR,
844
+ ) -> OR:
845
+ """
846
+ Patch desired resource with recycle annotations to pod template from current resource.
847
+ This is to avoid full pods recycle when changes are not affecting pod template.
848
+ Note desired annotations can override current annotations.
849
+ For example, if desired resource has kubectl.kubernetes.io/restartedAt defined,
850
+ it will be used instead of current resource annotation.
851
+
852
+ Args:
853
+ desired: desired resource
854
+ current: current resource
855
+
856
+ Returns:
857
+ patched desired resource
858
+ """
859
+ if current.kind not in POD_RECYCLE_SUPPORTED_OWNER_KINDS:
860
+ return desired
861
+
862
+ current_annotations = (
863
+ current.body.get("spec", {})
864
+ .get("template", {})
865
+ .get("metadata", {})
866
+ .get("annotations")
867
+ or {}
868
+ )
869
+ patch_annotations = {
870
+ k: value
871
+ for k in RECYCLE_POD_ANNOTATIONS
872
+ if (value := current_annotations.get(k))
873
+ }
874
+ if patch_annotations:
875
+ desired_annotations = (
876
+ desired.body.setdefault("spec", {})
877
+ .setdefault("template", {})
878
+ .setdefault("metadata", {})
879
+ .setdefault("annotations", {})
880
+ )
881
+ desired.body["spec"]["template"]["metadata"]["annotations"] = (
882
+ patch_annotations | desired_annotations
883
+ )
884
+ return desired
885
+
886
+
835
887
  def handle_modified_resources(
836
888
  oc_map: ClusterMap,
837
889
  ri: ResourceInventory,
838
- modified_resources: Mapping[Any, Any],
890
+ modified_resources: Mapping[str, DiffPair[OR, OR]],
839
891
  cluster: str,
840
892
  namespace: str,
841
893
  resource_type: str,
@@ -1031,6 +1083,12 @@ def _realize_resource_data_3way_diff(
1031
1083
  if options.enable_deletion and options.override_enable_deletion is False:
1032
1084
  options.enable_deletion = False
1033
1085
 
1086
+ for k in data["current"].keys() & data["desired"].keys():
1087
+ patch_desired_resource_for_recycle_annotations(
1088
+ desired=data["desired"][k],
1089
+ current=data["current"][k],
1090
+ )
1091
+
1034
1092
  diff_result = differ.diff_mappings(
1035
1093
  data["current"], data["desired"], equal=three_way_diff_using_hash
1036
1094
  )