qontract-reconcile 0.10.2.dev414__py3-none-any.whl → 0.10.2.dev456__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qontract-reconcile might be problematic. Click here for more details.
- {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/METADATA +2 -2
- {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/RECORD +55 -53
- {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/WHEEL +1 -1
- reconcile/aus/advanced_upgrade_service.py +3 -0
- reconcile/aus/aus_sts_gate_handler.py +59 -0
- reconcile/aus/base.py +115 -8
- reconcile/aus/models.py +2 -0
- reconcile/aus/ocm_addons_upgrade_scheduler_org.py +1 -0
- reconcile/aus/ocm_upgrade_scheduler.py +8 -1
- reconcile/aus/ocm_upgrade_scheduler_org.py +20 -5
- reconcile/aus/version_gate_approver.py +1 -16
- reconcile/aus/version_gates/sts_version_gate_handler.py +5 -72
- reconcile/automated_actions/config/integration.py +1 -1
- reconcile/aws_ecr_image_pull_secrets.py +1 -1
- reconcile/change_owners/change_owners.py +100 -34
- reconcile/cli.py +63 -5
- reconcile/external_resources/manager.py +7 -18
- reconcile/external_resources/model.py +8 -8
- reconcile/external_resources/secrets_sync.py +2 -3
- reconcile/external_resources/state.py +1 -34
- reconcile/gql_definitions/common/aws_vpc_requests.py +3 -0
- reconcile/gql_definitions/common/clusters.py +2 -0
- reconcile/gql_definitions/external_resources/external_resources_namespaces.py +3 -1
- reconcile/gql_definitions/fragments/aws_vpc_request.py +5 -0
- reconcile/gql_definitions/introspection.json +48 -0
- reconcile/gql_definitions/rhcs/certs.py +20 -74
- reconcile/gql_definitions/rhcs/openshift_resource_rhcs_cert.py +43 -0
- reconcile/gql_definitions/terraform_resources/terraform_resources_namespaces.py +5 -1
- reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator.py +3 -0
- reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator_peered_cluster_fragment.py +1 -0
- reconcile/ocm_machine_pools.py +12 -6
- reconcile/openshift_base.py +60 -2
- reconcile/openshift_namespaces.py +3 -4
- reconcile/openshift_rhcs_certs.py +71 -34
- reconcile/rhidp/sso_client/base.py +15 -4
- reconcile/templates/rosa-classic-cluster-creation.sh.j2 +1 -1
- reconcile/templates/rosa-hcp-cluster-creation.sh.j2 +1 -1
- reconcile/terraform_vpc_resources/integration.py +10 -7
- reconcile/typed_queries/saas_files.py +9 -4
- reconcile/utils/binary.py +7 -12
- reconcile/utils/environ.py +5 -0
- reconcile/utils/gitlab_api.py +12 -0
- reconcile/utils/glitchtip/client.py +2 -2
- reconcile/utils/jjb_client.py +19 -3
- reconcile/utils/jobcontroller/controller.py +1 -1
- reconcile/utils/json.py +5 -1
- reconcile/utils/oc.py +144 -113
- reconcile/utils/rhcsv2_certs.py +87 -21
- reconcile/utils/rosa/session.py +16 -0
- reconcile/utils/saasherder/saasherder.py +20 -7
- reconcile/utils/terrascript_aws_client.py +140 -50
- reconcile/utils/vault.py +1 -1
- reconcile/vpc_peerings_validator.py +13 -0
- tools/cli_commands/erv2.py +1 -3
- {qontract_reconcile-0.10.2.dev414.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/entry_points.txt +0 -0
reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator_peered_cluster_fragment.py
CHANGED
|
@@ -34,6 +34,7 @@ class ClusterSpecV1(ConfiguredBaseModel):
|
|
|
34
34
|
|
|
35
35
|
class VpcPeeringsValidatorPeeredCluster(ConfiguredBaseModel):
|
|
36
36
|
name: str = Field(..., alias="name")
|
|
37
|
+
allowed_to_bypass_public_peering_restriction: Optional[bool] = Field(..., alias="allowedToBypassPublicPeeringRestriction")
|
|
37
38
|
network: Optional[ClusterNetworkV1] = Field(..., alias="network")
|
|
38
39
|
spec: Optional[ClusterSpecV1] = Field(..., alias="spec")
|
|
39
40
|
internal: Optional[bool] = Field(..., alias="internal")
|
reconcile/ocm_machine_pools.py
CHANGED
|
@@ -7,7 +7,7 @@ from collections.abc import Iterable, Mapping
|
|
|
7
7
|
from enum import Enum
|
|
8
8
|
from typing import Any, Self
|
|
9
9
|
|
|
10
|
-
from pydantic import BaseModel, Field, model_validator
|
|
10
|
+
from pydantic import BaseModel, Field, SerializeAsAny, model_validator
|
|
11
11
|
|
|
12
12
|
from reconcile import queries
|
|
13
13
|
from reconcile.gql_definitions.common.clusters import (
|
|
@@ -107,7 +107,7 @@ class AbstractPool(ABC, BaseModel):
|
|
|
107
107
|
labels: Mapping[str, str] | None = None
|
|
108
108
|
cluster: str
|
|
109
109
|
cluster_type: ClusterType = Field(..., exclude=True)
|
|
110
|
-
autoscaling: AbstractAutoscaling | None = None
|
|
110
|
+
autoscaling: SerializeAsAny[AbstractAutoscaling] | None = None
|
|
111
111
|
|
|
112
112
|
@model_validator(mode="before")
|
|
113
113
|
@classmethod
|
|
@@ -170,7 +170,10 @@ class MachinePool(AbstractPool):
|
|
|
170
170
|
ocm.update_machine_pool(self.cluster, update_dict)
|
|
171
171
|
|
|
172
172
|
def has_diff(self, pool: ClusterMachinePoolV1) -> bool:
|
|
173
|
-
|
|
173
|
+
pool_taints = (
|
|
174
|
+
[p.model_dump(by_alias=True) for p in pool.taints] if pool.taints else None
|
|
175
|
+
)
|
|
176
|
+
if self.taints != pool_taints or self.labels != pool.labels:
|
|
174
177
|
logging.warning(
|
|
175
178
|
f"updating labels or taints for machine pool {pool.q_id} "
|
|
176
179
|
f"will only be applied to new Nodes"
|
|
@@ -178,7 +181,7 @@ class MachinePool(AbstractPool):
|
|
|
178
181
|
|
|
179
182
|
return (
|
|
180
183
|
self.replicas != pool.replicas
|
|
181
|
-
or self.taints !=
|
|
184
|
+
or self.taints != pool_taints
|
|
182
185
|
or self.labels != pool.labels
|
|
183
186
|
or self.instance_type != pool.instance_type
|
|
184
187
|
or self._has_diff_autoscale(pool)
|
|
@@ -251,7 +254,10 @@ class NodePool(AbstractPool):
|
|
|
251
254
|
ocm.update_node_pool(self.cluster, update_dict)
|
|
252
255
|
|
|
253
256
|
def has_diff(self, pool: ClusterMachinePoolV1) -> bool:
|
|
254
|
-
|
|
257
|
+
pool_taints = (
|
|
258
|
+
[p.model_dump(by_alias=True) for p in pool.taints] if pool.taints else None
|
|
259
|
+
)
|
|
260
|
+
if self.taints != pool_taints or self.labels != pool.labels:
|
|
255
261
|
logging.warning(
|
|
256
262
|
f"updating labels or taints for node pool {pool.q_id} "
|
|
257
263
|
f"will only be applied to new Nodes"
|
|
@@ -259,7 +265,7 @@ class NodePool(AbstractPool):
|
|
|
259
265
|
|
|
260
266
|
return (
|
|
261
267
|
self.replicas != pool.replicas
|
|
262
|
-
or self.taints !=
|
|
268
|
+
or self.taints != pool_taints
|
|
263
269
|
or self.labels != pool.labels
|
|
264
270
|
or self.aws_node_pool.instance_type != pool.instance_type
|
|
265
271
|
or self.subnet != pool.subnet
|
reconcile/openshift_base.py
CHANGED
|
@@ -29,7 +29,9 @@ from reconcile.utils import (
|
|
|
29
29
|
metrics,
|
|
30
30
|
)
|
|
31
31
|
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
|
32
|
+
from reconcile.utils.differ import DiffPair
|
|
32
33
|
from reconcile.utils.oc import (
|
|
34
|
+
POD_RECYCLE_SUPPORTED_OWNER_KINDS,
|
|
33
35
|
AmbiguousResourceTypeError,
|
|
34
36
|
DeploymentFieldIsImmutableError,
|
|
35
37
|
FieldIsImmutableError,
|
|
@@ -62,6 +64,10 @@ AUTH_METHOD_USER_KEY = {
|
|
|
62
64
|
"oidc": "org_username",
|
|
63
65
|
"rhidp": "org_username",
|
|
64
66
|
}
|
|
67
|
+
RECYCLE_POD_ANNOTATIONS = [
|
|
68
|
+
"kubectl.kubernetes.io/restartedAt",
|
|
69
|
+
"openshift.openshift.io/restartedAt",
|
|
70
|
+
]
|
|
65
71
|
|
|
66
72
|
|
|
67
73
|
class ValidationError(Exception):
|
|
@@ -588,7 +594,7 @@ def apply(
|
|
|
588
594
|
oc.resize_pvcs(namespace, owned_pvc_names, desired_storage)
|
|
589
595
|
|
|
590
596
|
if recycle_pods:
|
|
591
|
-
oc.recycle_pods(dry_run, namespace,
|
|
597
|
+
oc.recycle_pods(dry_run, namespace, resource)
|
|
592
598
|
|
|
593
599
|
|
|
594
600
|
def create(
|
|
@@ -832,10 +838,56 @@ def handle_identical_resources(
|
|
|
832
838
|
return actions
|
|
833
839
|
|
|
834
840
|
|
|
841
|
+
def patch_desired_resource_for_recycle_annotations(
|
|
842
|
+
desired: OR,
|
|
843
|
+
current: OR,
|
|
844
|
+
) -> OR:
|
|
845
|
+
"""
|
|
846
|
+
Patch desired resource with recycle annotations to pod template from current resource.
|
|
847
|
+
This is to avoid full pods recycle when changes are not affecting pod template.
|
|
848
|
+
Note desired annotations can override current annotations.
|
|
849
|
+
For example, if desired resource has kubectl.kubernetes.io/restartedAt defined,
|
|
850
|
+
it will be used instead of current resource annotation.
|
|
851
|
+
|
|
852
|
+
Args:
|
|
853
|
+
desired: desired resource
|
|
854
|
+
current: current resource
|
|
855
|
+
|
|
856
|
+
Returns:
|
|
857
|
+
patched desired resource
|
|
858
|
+
"""
|
|
859
|
+
if current.kind not in POD_RECYCLE_SUPPORTED_OWNER_KINDS:
|
|
860
|
+
return desired
|
|
861
|
+
|
|
862
|
+
current_annotations = (
|
|
863
|
+
current.body.get("spec", {})
|
|
864
|
+
.get("template", {})
|
|
865
|
+
.get("metadata", {})
|
|
866
|
+
.get("annotations")
|
|
867
|
+
or {}
|
|
868
|
+
)
|
|
869
|
+
patch_annotations = {
|
|
870
|
+
k: value
|
|
871
|
+
for k in RECYCLE_POD_ANNOTATIONS
|
|
872
|
+
if (value := current_annotations.get(k))
|
|
873
|
+
}
|
|
874
|
+
if patch_annotations:
|
|
875
|
+
desired_annotations = (
|
|
876
|
+
desired.body.setdefault("spec", {})
|
|
877
|
+
.setdefault("template", {})
|
|
878
|
+
.setdefault("metadata", {})
|
|
879
|
+
.setdefault("annotations", {})
|
|
880
|
+
)
|
|
881
|
+
desired.body["spec"]["template"]["metadata"]["annotations"] = (
|
|
882
|
+
patch_annotations | desired_annotations
|
|
883
|
+
)
|
|
884
|
+
return desired
|
|
885
|
+
|
|
886
|
+
|
|
835
887
|
def handle_modified_resources(
|
|
836
888
|
oc_map: ClusterMap,
|
|
837
889
|
ri: ResourceInventory,
|
|
838
|
-
modified_resources: Mapping[
|
|
890
|
+
modified_resources: Mapping[str, DiffPair[OR, OR]],
|
|
839
891
|
cluster: str,
|
|
840
892
|
namespace: str,
|
|
841
893
|
resource_type: str,
|
|
@@ -1031,6 +1083,12 @@ def _realize_resource_data_3way_diff(
|
|
|
1031
1083
|
if options.enable_deletion and options.override_enable_deletion is False:
|
|
1032
1084
|
options.enable_deletion = False
|
|
1033
1085
|
|
|
1086
|
+
for k in data["current"].keys() & data["desired"].keys():
|
|
1087
|
+
patch_desired_resource_for_recycle_annotations(
|
|
1088
|
+
desired=data["desired"][k],
|
|
1089
|
+
current=data["current"][k],
|
|
1090
|
+
)
|
|
1091
|
+
|
|
1034
1092
|
diff_result = differ.diff_mappings(
|
|
1035
1093
|
data["current"], data["desired"], equal=three_way_diff_using_hash
|
|
1036
1094
|
)
|
|
@@ -43,6 +43,7 @@ class DesiredState:
|
|
|
43
43
|
cluster: str
|
|
44
44
|
namespace: str
|
|
45
45
|
delete: bool
|
|
46
|
+
cluster_admin: bool
|
|
46
47
|
|
|
47
48
|
|
|
48
49
|
class NamespaceDuplicateError(Exception):
|
|
@@ -92,6 +93,7 @@ def build_desired_state(
|
|
|
92
93
|
cluster=namespace.cluster.name,
|
|
93
94
|
namespace=namespace.name,
|
|
94
95
|
delete=namespace.delete or False,
|
|
96
|
+
cluster_admin=namespace.cluster_admin or False,
|
|
95
97
|
)
|
|
96
98
|
for namespace in namespaces
|
|
97
99
|
]
|
|
@@ -104,7 +106,7 @@ def manage_namespace(
|
|
|
104
106
|
) -> None:
|
|
105
107
|
namespace = desired_state.namespace
|
|
106
108
|
|
|
107
|
-
oc = oc_map.get(desired_state.cluster)
|
|
109
|
+
oc = oc_map.get(desired_state.cluster, privileged=desired_state.cluster_admin)
|
|
108
110
|
if isinstance(oc, OCLogMsg):
|
|
109
111
|
logging.log(level=oc.log_level, msg=oc.message)
|
|
110
112
|
return
|
|
@@ -116,9 +118,6 @@ def manage_namespace(
|
|
|
116
118
|
|
|
117
119
|
action = Action.DELETE if desired_state.delete else Action.CREATE
|
|
118
120
|
|
|
119
|
-
if namespace.startswith("openshift-"):
|
|
120
|
-
raise ValueError(f'cannot {action} a project starting with "openshift-"')
|
|
121
|
-
|
|
122
121
|
logging.info([str(action), desired_state.cluster, namespace])
|
|
123
122
|
if not dry_run:
|
|
124
123
|
match action:
|
|
@@ -2,7 +2,7 @@ import logging
|
|
|
2
2
|
import sys
|
|
3
3
|
import time
|
|
4
4
|
from collections.abc import Callable, Iterable, Mapping
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
import reconcile.openshift_base as ob
|
|
8
8
|
import reconcile.openshift_resources_base as orb
|
|
@@ -10,8 +10,8 @@ from reconcile.gql_definitions.common.rhcs_provider_settings import (
|
|
|
10
10
|
RhcsProviderSettingsV1,
|
|
11
11
|
)
|
|
12
12
|
from reconcile.gql_definitions.rhcs.certs import (
|
|
13
|
-
NamespaceOpenshiftResourceRhcsCertV1,
|
|
14
13
|
NamespaceV1,
|
|
14
|
+
OpenshiftResourceRhcsCert,
|
|
15
15
|
)
|
|
16
16
|
from reconcile.gql_definitions.rhcs.certs import (
|
|
17
17
|
query as rhcs_certs_query,
|
|
@@ -32,7 +32,12 @@ from reconcile.utils.openshift_resource import (
|
|
|
32
32
|
ResourceInventory,
|
|
33
33
|
base64_encode_secret_field_value,
|
|
34
34
|
)
|
|
35
|
-
from reconcile.utils.rhcsv2_certs import
|
|
35
|
+
from reconcile.utils.rhcsv2_certs import (
|
|
36
|
+
CertificateFormat,
|
|
37
|
+
RhcsV2CertPem,
|
|
38
|
+
RhcsV2CertPkcs12,
|
|
39
|
+
generate_cert,
|
|
40
|
+
)
|
|
36
41
|
from reconcile.utils.runtime.integration import DesiredStateShardConfig
|
|
37
42
|
from reconcile.utils.secret_reader import create_secret_reader
|
|
38
43
|
from reconcile.utils.semver_helper import make_semver
|
|
@@ -40,7 +45,6 @@ from reconcile.utils.vault import SecretNotFoundError, VaultClient
|
|
|
40
45
|
|
|
41
46
|
QONTRACT_INTEGRATION = "openshift-rhcs-certs"
|
|
42
47
|
QONTRACT_INTEGRATION_VERSION = make_semver(1, 9, 3)
|
|
43
|
-
PROVIDERS = ["rhcs-cert"]
|
|
44
48
|
|
|
45
49
|
|
|
46
50
|
def desired_state_shard_config() -> DesiredStateShardConfig:
|
|
@@ -67,8 +71,29 @@ class OpenshiftRhcsCertExpiration(GaugeMetric):
|
|
|
67
71
|
return "qontract_reconcile_rhcs_cert_expiration_timestamp"
|
|
68
72
|
|
|
69
73
|
|
|
70
|
-
def
|
|
71
|
-
|
|
74
|
+
def _generate_placeholder_cert(
|
|
75
|
+
cert_format: CertificateFormat,
|
|
76
|
+
) -> RhcsV2CertPem | RhcsV2CertPkcs12:
|
|
77
|
+
match cert_format:
|
|
78
|
+
case CertificateFormat.PKCS12:
|
|
79
|
+
return RhcsV2CertPkcs12(
|
|
80
|
+
pkcs12_keystore="PLACEHOLDER_KEYSTORE",
|
|
81
|
+
pkcs12_truststore="PLACEHOLDER_TRUSTSTORE",
|
|
82
|
+
expiration_timestamp=int(time.time()),
|
|
83
|
+
)
|
|
84
|
+
case CertificateFormat.PEM:
|
|
85
|
+
return RhcsV2CertPem(
|
|
86
|
+
certificate="PLACEHOLDER_CERT",
|
|
87
|
+
private_key="PLACEHOLDER_PRIVATE_KEY",
|
|
88
|
+
ca_cert="PLACEHOLDER_CA_CERT",
|
|
89
|
+
expiration_timestamp=int(time.time()),
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def get_certificate_format(
|
|
94
|
+
cert_resource: OpenshiftResourceRhcsCert,
|
|
95
|
+
) -> CertificateFormat:
|
|
96
|
+
return CertificateFormat(cert_resource.certificate_format or "PEM")
|
|
72
97
|
|
|
73
98
|
|
|
74
99
|
def get_namespaces_with_rhcs_certs(
|
|
@@ -82,21 +107,28 @@ def get_namespaces_with_rhcs_certs(
|
|
|
82
107
|
integration_is_enabled(QONTRACT_INTEGRATION, ns.cluster)
|
|
83
108
|
and not bool(ns.delete)
|
|
84
109
|
and (not cluster_name or ns.cluster.name in cluster_name)
|
|
85
|
-
and
|
|
110
|
+
and ns.openshift_resources
|
|
86
111
|
):
|
|
87
112
|
result.append(ns)
|
|
88
113
|
return result
|
|
89
114
|
|
|
90
115
|
|
|
91
116
|
def construct_rhcs_cert_oc_secret(
|
|
92
|
-
secret_name: str,
|
|
117
|
+
secret_name: str,
|
|
118
|
+
cert: Mapping[str, Any],
|
|
119
|
+
annotations: Mapping[str, str],
|
|
120
|
+
certificate_format: CertificateFormat,
|
|
93
121
|
) -> OR:
|
|
94
122
|
body: dict[str, Any] = {
|
|
95
123
|
"apiVersion": "v1",
|
|
96
124
|
"kind": "Secret",
|
|
97
|
-
"type": "kubernetes.io/tls",
|
|
98
125
|
"metadata": {"name": secret_name, "annotations": annotations},
|
|
99
126
|
}
|
|
127
|
+
match certificate_format:
|
|
128
|
+
case CertificateFormat.PKCS12:
|
|
129
|
+
body["type"] = "Opaque"
|
|
130
|
+
case CertificateFormat.PEM:
|
|
131
|
+
body["type"] = "kubernetes.io/tls"
|
|
100
132
|
for k, v in cert.items():
|
|
101
133
|
v = base64_encode_secret_field_value(v)
|
|
102
134
|
body.setdefault("data", {})[k] = v
|
|
@@ -105,7 +137,7 @@ def construct_rhcs_cert_oc_secret(
|
|
|
105
137
|
|
|
106
138
|
def cert_expires_within_threshold(
|
|
107
139
|
ns: NamespaceV1,
|
|
108
|
-
cert_resource:
|
|
140
|
+
cert_resource: OpenshiftResourceRhcsCert,
|
|
109
141
|
vault_cert_secret: Mapping[str, Any],
|
|
110
142
|
) -> bool:
|
|
111
143
|
auto_renew_threshold_days = cert_resource.auto_renew_threshold_days or 7
|
|
@@ -121,7 +153,7 @@ def cert_expires_within_threshold(
|
|
|
121
153
|
|
|
122
154
|
def get_vault_cert_secret(
|
|
123
155
|
ns: NamespaceV1,
|
|
124
|
-
cert_resource:
|
|
156
|
+
cert_resource: OpenshiftResourceRhcsCert,
|
|
125
157
|
vault: VaultClient,
|
|
126
158
|
vault_base_path: str,
|
|
127
159
|
) -> dict | None:
|
|
@@ -140,7 +172,7 @@ def get_vault_cert_secret(
|
|
|
140
172
|
def generate_vault_cert_secret(
|
|
141
173
|
dry_run: bool,
|
|
142
174
|
ns: NamespaceV1,
|
|
143
|
-
cert_resource:
|
|
175
|
+
cert_resource: OpenshiftResourceRhcsCert,
|
|
144
176
|
vault: VaultClient,
|
|
145
177
|
vault_base_path: str,
|
|
146
178
|
issuer_url: str,
|
|
@@ -150,17 +182,18 @@ def generate_vault_cert_secret(
|
|
|
150
182
|
f"Creating cert with service account credentials for '{cert_resource.service_account_name}'. cluster='{ns.cluster.name}', namespace='{ns.name}', secret='{cert_resource.secret_name}'"
|
|
151
183
|
)
|
|
152
184
|
sa_password = vault.read(cert_resource.service_account_password.model_dump())
|
|
185
|
+
cert_format = get_certificate_format(cert_resource)
|
|
186
|
+
|
|
153
187
|
if dry_run:
|
|
154
|
-
rhcs_cert =
|
|
155
|
-
certificate="PLACEHOLDER_CERT",
|
|
156
|
-
private_key="PLACEHOLDER_PRIVATE_KEY",
|
|
157
|
-
ca_cert="PLACEHOLDER_CA_CERT",
|
|
158
|
-
expiration_timestamp=int(time.time()),
|
|
159
|
-
)
|
|
188
|
+
rhcs_cert = _generate_placeholder_cert(cert_format)
|
|
160
189
|
else:
|
|
161
190
|
try:
|
|
162
191
|
rhcs_cert = generate_cert(
|
|
163
|
-
issuer_url,
|
|
192
|
+
issuer_url=issuer_url,
|
|
193
|
+
uid=cert_resource.service_account_name,
|
|
194
|
+
pwd=sa_password,
|
|
195
|
+
ca_url=ca_cert_url,
|
|
196
|
+
cert_format=cert_format,
|
|
164
197
|
)
|
|
165
198
|
except ValueError as e:
|
|
166
199
|
raise Exception(
|
|
@@ -171,18 +204,18 @@ def generate_vault_cert_secret(
|
|
|
171
204
|
)
|
|
172
205
|
vault.write(
|
|
173
206
|
secret={
|
|
174
|
-
"data": rhcs_cert.model_dump(by_alias=True),
|
|
207
|
+
"data": rhcs_cert.model_dump(by_alias=True, exclude_none=True),
|
|
175
208
|
"path": f"{vault_base_path}/{ns.cluster.name}/{ns.name}/{cert_resource.secret_name}",
|
|
176
209
|
},
|
|
177
210
|
decode_base64=False,
|
|
178
211
|
)
|
|
179
|
-
return rhcs_cert.model_dump(by_alias=True)
|
|
212
|
+
return rhcs_cert.model_dump(by_alias=True, exclude_none=True)
|
|
180
213
|
|
|
181
214
|
|
|
182
215
|
def fetch_openshift_resource_for_cert_resource(
|
|
183
216
|
dry_run: bool,
|
|
184
217
|
ns: NamespaceV1,
|
|
185
|
-
cert_resource:
|
|
218
|
+
cert_resource: OpenshiftResourceRhcsCert,
|
|
186
219
|
vault: VaultClient,
|
|
187
220
|
rhcs_settings: RhcsProviderSettingsV1,
|
|
188
221
|
) -> OR:
|
|
@@ -218,6 +251,7 @@ def fetch_openshift_resource_for_cert_resource(
|
|
|
218
251
|
secret_name=cert_resource.secret_name,
|
|
219
252
|
cert=vault_cert_secret,
|
|
220
253
|
annotations=cert_resource.annotations or {},
|
|
254
|
+
certificate_format=get_certificate_format(cert_resource),
|
|
221
255
|
)
|
|
222
256
|
|
|
223
257
|
|
|
@@ -231,18 +265,13 @@ def fetch_desired_state(
|
|
|
231
265
|
cert_provider = get_rhcs_provider_settings(query_func=query_func)
|
|
232
266
|
for ns in namespaces:
|
|
233
267
|
for cert_resource in ns.openshift_resources or []:
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
cast("NamespaceOpenshiftResourceRhcsCertV1", cert_resource),
|
|
242
|
-
vault,
|
|
243
|
-
cert_provider,
|
|
244
|
-
),
|
|
245
|
-
)
|
|
268
|
+
ri.add_desired_resource(
|
|
269
|
+
cluster=ns.cluster.name,
|
|
270
|
+
namespace=ns.name,
|
|
271
|
+
resource=fetch_openshift_resource_for_cert_resource(
|
|
272
|
+
dry_run, ns, cert_resource, vault, cert_provider
|
|
273
|
+
),
|
|
274
|
+
)
|
|
246
275
|
|
|
247
276
|
|
|
248
277
|
@defer
|
|
@@ -295,3 +324,11 @@ def run(
|
|
|
295
324
|
ob.publish_metrics(ri, QONTRACT_INTEGRATION)
|
|
296
325
|
if ri.has_error_registered():
|
|
297
326
|
sys.exit(1)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
def early_exit_desired_state(*args: Any, **kwargs: Any) -> dict[str, Any]:
|
|
330
|
+
if not (query_func := kwargs.get("query_func")):
|
|
331
|
+
query_func = gql.get_api().query
|
|
332
|
+
|
|
333
|
+
cluster_name = kwargs.get("cluster_name")
|
|
334
|
+
return {"namespace": get_namespaces_with_rhcs_certs(query_func, cluster_name)}
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import http
|
|
1
2
|
import logging
|
|
2
3
|
from collections.abc import (
|
|
3
4
|
Iterable,
|
|
@@ -10,6 +11,7 @@ from urllib.parse import (
|
|
|
10
11
|
)
|
|
11
12
|
|
|
12
13
|
import jwt
|
|
14
|
+
from requests import HTTPError
|
|
13
15
|
|
|
14
16
|
from reconcile.rhidp.common import (
|
|
15
17
|
Cluster,
|
|
@@ -256,9 +258,18 @@ def delete_sso_client(
|
|
|
256
258
|
)
|
|
257
259
|
sso_client = SSOClient(**secret_reader.read_all_secret(secret=secret))
|
|
258
260
|
keycloak_api = keycloak_map.get(sso_client.issuer)
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
261
|
+
try:
|
|
262
|
+
keycloak_api.delete_client(
|
|
263
|
+
registration_client_uri=sso_client.registration_client_uri,
|
|
264
|
+
registration_access_token=sso_client.registration_access_token,
|
|
265
|
+
)
|
|
266
|
+
except HTTPError as e:
|
|
267
|
+
if e.response.status_code != http.HTTPStatus.UNAUTHORIZED:
|
|
268
|
+
logging.error(f"Failed to delete SSO client {sso_client_id}: {e}")
|
|
269
|
+
raise
|
|
270
|
+
# something went wrong with the registration token, maybe it expired
|
|
271
|
+
logging.error(
|
|
272
|
+
f"Failed to delete SSO client {sso_client_id} due to unauthorized error: {e}. Continuing to delete the vault secret."
|
|
273
|
+
)
|
|
263
274
|
|
|
264
275
|
secret_reader.vault_client.delete(path=secret.path)
|
|
@@ -47,7 +47,7 @@ rosa create cluster -y --cluster-name={{ cluster_name }} \
|
|
|
47
47
|
--service-cidr {{ cluster.network.service }} \
|
|
48
48
|
--pod-cidr {{ cluster.network.pod }} \
|
|
49
49
|
--host-prefix 23 \
|
|
50
|
-
--replicas
|
|
50
|
+
--replicas 3 \
|
|
51
51
|
--compute-machine-type {{ cluster.machine_pools[0].instance_type }} \
|
|
52
52
|
{% if cluster.spec.disable_user_workload_monitoring -%}
|
|
53
53
|
--disable-workload-monitoring \
|
|
@@ -47,7 +47,7 @@ rosa create cluster --cluster-name={{ cluster_name }} \
|
|
|
47
47
|
--service-cidr {{ cluster.network.service }} \
|
|
48
48
|
--pod-cidr {{ cluster.network.pod }} \
|
|
49
49
|
--host-prefix 23 \
|
|
50
|
-
--replicas
|
|
50
|
+
--replicas 3 \
|
|
51
51
|
--compute-machine-type {{ cluster.machine_pools[0].instance_type }} \
|
|
52
52
|
{% if cluster.spec.private -%}
|
|
53
53
|
--private \
|
|
@@ -24,6 +24,7 @@ from reconcile.typed_queries.external_resources import get_settings
|
|
|
24
24
|
from reconcile.typed_queries.github_orgs import get_github_orgs
|
|
25
25
|
from reconcile.typed_queries.gitlab_instances import get_gitlab_instances
|
|
26
26
|
from reconcile.utils import gql
|
|
27
|
+
from reconcile.utils.disabled_integrations import integration_is_enabled
|
|
27
28
|
from reconcile.utils.runtime.integration import (
|
|
28
29
|
DesiredStateShardConfig,
|
|
29
30
|
PydanticRunParams,
|
|
@@ -62,12 +63,14 @@ class TerraformVpcResources(QontractReconcileIntegration[TerraformVpcResourcesPa
|
|
|
62
63
|
) -> list[AWSAccountV1]:
|
|
63
64
|
"""Return a list of accounts extracted from the provided VPCRequests.
|
|
64
65
|
If account_name is given returns the account object with that name."""
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
66
|
+
return [
|
|
67
|
+
vpc.account
|
|
68
|
+
for vpc in data
|
|
69
|
+
if (
|
|
70
|
+
integration_is_enabled(self.name, vpc.account)
|
|
71
|
+
and (not account_name or vpc.account.name == account_name)
|
|
72
|
+
)
|
|
73
|
+
]
|
|
71
74
|
|
|
72
75
|
def _handle_outputs(
|
|
73
76
|
self, requests: Iterable[VPCRequest], outputs: Mapping[str, Any]
|
|
@@ -155,7 +158,7 @@ class TerraformVpcResources(QontractReconcileIntegration[TerraformVpcResourcesPa
|
|
|
155
158
|
if data:
|
|
156
159
|
accounts = self._filter_accounts(data, account_name)
|
|
157
160
|
if account_name and not accounts:
|
|
158
|
-
msg = f"The account {account_name} doesn't have any managed
|
|
161
|
+
msg = f"The account {account_name} doesn't have any managed vpcs or the {QONTRACT_INTEGRATION} integration is disabled for this account. Verify your input"
|
|
159
162
|
logging.debug(msg)
|
|
160
163
|
sys.exit(ExitCodes.SUCCESS)
|
|
161
164
|
else:
|
|
@@ -42,6 +42,7 @@ from reconcile.gql_definitions.fragments.saas_target_namespace import (
|
|
|
42
42
|
SaasTargetNamespace,
|
|
43
43
|
)
|
|
44
44
|
from reconcile.utils import gql
|
|
45
|
+
from reconcile.utils.environ import used_for_security_is_enabled
|
|
45
46
|
from reconcile.utils.exceptions import (
|
|
46
47
|
AppInterfaceSettingsError,
|
|
47
48
|
ParameterError,
|
|
@@ -78,10 +79,14 @@ class SaasResourceTemplateTarget(
|
|
|
78
79
|
self, parent_saas_file_name: str, parent_resource_template_name: str
|
|
79
80
|
) -> str:
|
|
80
81
|
"""Returns a unique identifier for a target."""
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
digest_size
|
|
84
|
-
|
|
82
|
+
data = f"{parent_saas_file_name}:{parent_resource_template_name}:{self.name or 'default'}:{self.namespace.cluster.name}:{self.namespace.name}".encode()
|
|
83
|
+
if used_for_security_is_enabled():
|
|
84
|
+
# When USED_FOR_SECURITY is enabled, use blake2s without digest_size and truncate to 20 bytes
|
|
85
|
+
# This is needed for FIPS compliance where digest_size parameter is not supported
|
|
86
|
+
return hashlib.blake2s(data).digest()[:20].hex()
|
|
87
|
+
else:
|
|
88
|
+
# Default behavior: use blake2s with digest_size=20
|
|
89
|
+
return hashlib.blake2s(data, digest_size=20).hexdigest()
|
|
85
90
|
|
|
86
91
|
|
|
87
92
|
class SaasResourceTemplate(ConfiguredBaseModel, validate_by_alias=True):
|
reconcile/utils/binary.py
CHANGED
|
@@ -38,10 +38,7 @@ def binary_version(
|
|
|
38
38
|
def deco_binary_version(f: Callable) -> Callable:
|
|
39
39
|
@wraps(f)
|
|
40
40
|
def f_binary_version(*args: Any, **kwargs: Any) -> None:
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
cmd = [binary]
|
|
44
|
-
cmd.extend(version_args)
|
|
41
|
+
cmd = [binary, *version_args]
|
|
45
42
|
try:
|
|
46
43
|
result = subprocess.run(cmd, capture_output=True, check=True)
|
|
47
44
|
except subprocess.CalledProcessError as e:
|
|
@@ -50,15 +47,13 @@ def binary_version(
|
|
|
50
47
|
)
|
|
51
48
|
raise Exception(msg) from e
|
|
52
49
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
found = True
|
|
59
|
-
break
|
|
50
|
+
match = re.search(
|
|
51
|
+
search_regex,
|
|
52
|
+
result.stdout.decode("utf-8"),
|
|
53
|
+
re.MULTILINE,
|
|
54
|
+
)
|
|
60
55
|
|
|
61
|
-
if
|
|
56
|
+
if match is None:
|
|
62
57
|
raise Exception(
|
|
63
58
|
f"Could not find version for binary '{binary}' via regex "
|
|
64
59
|
f"for binary version check: "
|
reconcile/utils/environ.py
CHANGED
|
@@ -4,6 +4,11 @@ from functools import wraps
|
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
6
|
|
|
7
|
+
def used_for_security_is_enabled() -> bool:
|
|
8
|
+
used_for_security_env = os.getenv("USED_FOR_SECURITY", "false")
|
|
9
|
+
return used_for_security_env.lower() == "true"
|
|
10
|
+
|
|
11
|
+
|
|
7
12
|
def environ(variables: Iterable[str] | None = None) -> Callable:
|
|
8
13
|
"""Check that environment variables are set before execution."""
|
|
9
14
|
if variables is None:
|
reconcile/utils/gitlab_api.py
CHANGED
|
@@ -444,6 +444,8 @@ class GitLabApi:
|
|
|
444
444
|
def get_merge_request_comments(
|
|
445
445
|
merge_request: ProjectMergeRequest,
|
|
446
446
|
include_description: bool = False,
|
|
447
|
+
include_approvals: bool = False,
|
|
448
|
+
approval_body: str = "",
|
|
447
449
|
) -> list[Comment]:
|
|
448
450
|
comments = []
|
|
449
451
|
if include_description:
|
|
@@ -455,6 +457,16 @@ class GitLabApi:
|
|
|
455
457
|
created_at=merge_request.created_at,
|
|
456
458
|
)
|
|
457
459
|
)
|
|
460
|
+
if include_approvals:
|
|
461
|
+
comments.extend(
|
|
462
|
+
Comment(
|
|
463
|
+
id=approval["user"]["id"],
|
|
464
|
+
username=approval["user"]["username"],
|
|
465
|
+
body=approval_body,
|
|
466
|
+
created_at=approval["approved_at"],
|
|
467
|
+
)
|
|
468
|
+
for approval in merge_request.approvals.get().approved_by
|
|
469
|
+
)
|
|
458
470
|
comments.extend(
|
|
459
471
|
Comment(
|
|
460
472
|
id=note.id,
|
|
@@ -165,7 +165,7 @@ class GlitchtipClient(ApiBase):
|
|
|
165
165
|
**self._post(
|
|
166
166
|
f"/api/0/projects/{organization_slug}/{project_slug}/alerts/",
|
|
167
167
|
data=alert.model_dump(
|
|
168
|
-
by_alias=True, exclude_unset=True, exclude_none=True
|
|
168
|
+
mode="json", by_alias=True, exclude_unset=True, exclude_none=True
|
|
169
169
|
),
|
|
170
170
|
)
|
|
171
171
|
)
|
|
@@ -186,7 +186,7 @@ class GlitchtipClient(ApiBase):
|
|
|
186
186
|
**self._put(
|
|
187
187
|
f"/api/0/projects/{organization_slug}/{project_slug}/alerts/{alert.pk}/",
|
|
188
188
|
data=alert.model_dump(
|
|
189
|
-
by_alias=True, exclude_unset=True, exclude_none=True
|
|
189
|
+
mode="json", by_alias=True, exclude_unset=True, exclude_none=True
|
|
190
190
|
),
|
|
191
191
|
)
|
|
192
192
|
)
|