qontract-reconcile 0.10.1rc763__py3-none-any.whl → 0.10.1rc764__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,249 @@
1
+ import sys
2
+ from abc import ABC, abstractmethod
3
+ from collections.abc import Iterable
4
+ from typing import Any
5
+
6
+ from kubernetes.client import (
7
+ V1Container,
8
+ V1EmptyDirVolumeSource,
9
+ V1EnvVar,
10
+ V1EnvVarSource,
11
+ V1JobSpec,
12
+ V1LocalObjectReference,
13
+ V1ObjectFieldSelector,
14
+ V1ObjectMeta,
15
+ V1PodSpec,
16
+ V1PodTemplateSpec,
17
+ V1SecretVolumeSource,
18
+ V1Volume,
19
+ V1VolumeMount,
20
+ )
21
+ from pydantic import BaseModel
22
+
23
+ from reconcile.external_resources.model import (
24
+ Reconciliation,
25
+ )
26
+ from reconcile.external_resources.state import ReconcileStatus
27
+ from reconcile.utils.jobcontroller.controller import (
28
+ JobConcurrencyPolicy,
29
+ K8sJobController,
30
+ )
31
+ from reconcile.utils.jobcontroller.models import K8sJob
32
+
33
+
34
+ class ExternalResourcesReconciler(ABC):
35
+ @abstractmethod
36
+ def get_resource_reconcile_status(
37
+ self,
38
+ reconciliation: Reconciliation,
39
+ ) -> ReconcileStatus: ...
40
+
41
+ @abstractmethod
42
+ def reconcile_resource(self, reconciliation: Reconciliation) -> None: ...
43
+
44
+ @abstractmethod
45
+ def get_resource_reconcile_logs(self, reconciliation: Reconciliation) -> None: ...
46
+
47
+ @abstractmethod
48
+ def wait_for_reconcile_list_completion(
49
+ self,
50
+ reconcile_list: Iterable[Reconciliation],
51
+ check_interval_seconds: int,
52
+ timeout_seconds: int,
53
+ ) -> dict[str, ReconcileStatus]: ...
54
+
55
+
56
+ class ReconciliationK8sJob(K8sJob, BaseModel, frozen=True):
57
+ """
58
+ Wraps a reconciliation request into a Kubernetes Job
59
+ """
60
+
61
+ reconciliation: Reconciliation
62
+ is_dry_run: bool = False
63
+ dry_run_suffix: str = ""
64
+
65
+ def name_prefix(self) -> str:
66
+ if self.is_dry_run:
67
+ return f"er-dry-run-mr-{self.dry_run_suffix}"
68
+ else:
69
+ return "er"
70
+
71
+ def unit_of_work_identity(self) -> Any:
72
+ return self.reconciliation.key
73
+
74
+ def description(self) -> str:
75
+ return f"Action: {self.reconciliation.action}, Key: {self.reconciliation.key} "
76
+
77
+ def annotations(self) -> dict[str, Any]:
78
+ return {
79
+ "provision_provider": self.reconciliation.key.provision_provider,
80
+ "provisioner": self.reconciliation.key.provisioner_name,
81
+ "provider": self.reconciliation.key.provision_provider,
82
+ "identifier": self.reconciliation.key.identifier,
83
+ }
84
+
85
+ def job_spec(self) -> V1JobSpec:
86
+ return V1JobSpec(
87
+ backoff_limit=0,
88
+ active_deadline_seconds=self.reconciliation.module_configuration.reconcile_timeout_minutes
89
+ * 60,
90
+ ttl_seconds_after_finished=3600,
91
+ template=V1PodTemplateSpec(
92
+ metadata=V1ObjectMeta(
93
+ annotations=self.annotations(), labels=self.labels()
94
+ ),
95
+ spec=V1PodSpec(
96
+ init_containers=[
97
+ V1Container(
98
+ name="job",
99
+ image=self.reconciliation.module_configuration.image_version,
100
+ image_pull_policy="Always",
101
+ env=[
102
+ V1EnvVar(
103
+ name="DRY_RUN",
104
+ value=str(self.is_dry_run),
105
+ ),
106
+ V1EnvVar(
107
+ name="ACTION",
108
+ value=self.reconciliation.action.value,
109
+ ),
110
+ ],
111
+ volume_mounts=[
112
+ V1VolumeMount(
113
+ name="credentials",
114
+ mount_path="/credentials",
115
+ sub_path="credentials",
116
+ ),
117
+ V1VolumeMount(
118
+ name="workdir",
119
+ mount_path="/work",
120
+ ),
121
+ self.scripts_volume_mount("/inputs"),
122
+ ],
123
+ )
124
+ ],
125
+ containers=[
126
+ V1Container(
127
+ name="outputs",
128
+ image="quay.io/app-sre/er-outputs-secrets:0.0.1",
129
+ command=["/bin/bash", "/app/entrypoint.sh"],
130
+ image_pull_policy="Always",
131
+ env=[
132
+ V1EnvVar(
133
+ name="NAMESPACE",
134
+ value_from=V1EnvVarSource(
135
+ field_ref=V1ObjectFieldSelector(
136
+ field_path="metadata.namespace"
137
+ )
138
+ ),
139
+ ),
140
+ V1EnvVar(
141
+ name="ACTION",
142
+ value=self.reconciliation.action,
143
+ ),
144
+ V1EnvVar(
145
+ name="DRY_RUN",
146
+ value=str(self.is_dry_run),
147
+ ),
148
+ ],
149
+ volume_mounts=[
150
+ V1VolumeMount(
151
+ name="credentials",
152
+ mount_path="/.aws/credentials",
153
+ sub_path="credentials",
154
+ ),
155
+ V1VolumeMount(
156
+ name="workdir",
157
+ mount_path="/work",
158
+ ),
159
+ self.scripts_volume_mount("/inputs"),
160
+ ],
161
+ )
162
+ ],
163
+ image_pull_secrets=[V1LocalObjectReference(name="quay.io")],
164
+ volumes=[
165
+ V1Volume(
166
+ name="credentials",
167
+ secret=V1SecretVolumeSource(
168
+ secret_name=f"credentials-{self.reconciliation.key.provisioner_name}",
169
+ ),
170
+ ),
171
+ V1Volume(
172
+ name="workdir",
173
+ empty_dir=V1EmptyDirVolumeSource(size_limit="10Mi"),
174
+ ),
175
+ self.scripts_volume(),
176
+ ],
177
+ restart_policy="Never",
178
+ service_account_name="external-resources-sa",
179
+ ),
180
+ ),
181
+ )
182
+
183
+ def scripts(self) -> dict[str, str]:
184
+ return {"input.json": self.reconciliation.input}
185
+
186
+
187
+ class K8sExternalResourcesReconciler(ExternalResourcesReconciler):
188
+ def __init__(
189
+ self, controller: K8sJobController, dry_run: bool, dry_run_job_suffix: str = ""
190
+ ) -> None:
191
+ self.controller = controller
192
+ self.dry_run = dry_run
193
+ self.dry_run_job_suffix = dry_run_job_suffix
194
+
195
+ def get_resource_reconcile_status(
196
+ self,
197
+ reconciliation: Reconciliation,
198
+ ) -> ReconcileStatus:
199
+ job_name = ReconciliationK8sJob(reconciliation=reconciliation).name()
200
+ return ReconcileStatus(self.controller.get_job_status(job_name))
201
+
202
+ def reconcile_resource(self, reconciliation: Reconciliation) -> None:
203
+ concurrency_policy = (
204
+ JobConcurrencyPolicy.REPLACE_FAILED | JobConcurrencyPolicy.REPLACE_FINISHED
205
+ )
206
+ if self.dry_run:
207
+ concurrency_policy = (
208
+ JobConcurrencyPolicy.REPLACE_FAILED
209
+ | JobConcurrencyPolicy.REPLACE_FINISHED
210
+ | JobConcurrencyPolicy.REPLACE_IN_PROGRESS
211
+ )
212
+
213
+ self.controller.enqueue_job(
214
+ ReconciliationK8sJob(
215
+ reconciliation=reconciliation,
216
+ is_dry_run=self.dry_run,
217
+ dry_run_suffix=self.dry_run_job_suffix,
218
+ ),
219
+ concurrency_policy=concurrency_policy,
220
+ )
221
+
222
+ def wait_for_reconcile_list_completion(
223
+ self,
224
+ reconcile_list: Iterable[Reconciliation],
225
+ check_interval_seconds: int,
226
+ timeout_seconds: int,
227
+ ) -> dict[str, ReconcileStatus]:
228
+ job_names = {
229
+ ReconciliationK8sJob(
230
+ reconciliation=r,
231
+ is_dry_run=self.dry_run,
232
+ dry_run_suffix=self.dry_run_job_suffix,
233
+ ).name()
234
+ for r in reconcile_list
235
+ }
236
+ job_status = self.controller.wait_for_job_list_completion(
237
+ job_names=job_names,
238
+ check_interval_seconds=check_interval_seconds,
239
+ timeout_seconds=timeout_seconds,
240
+ )
241
+ return {job: ReconcileStatus(status) for job, status in job_status.items()}
242
+
243
+ def get_resource_reconcile_logs(self, reconciliation: Reconciliation) -> None:
244
+ job = ReconciliationK8sJob(
245
+ reconciliation=reconciliation,
246
+ is_dry_run=True,
247
+ dry_run_suffix=self.dry_run_job_suffix,
248
+ )
249
+ self.controller.get_job_logs(job_name=job.name(), output=sys.stdout)
@@ -0,0 +1,229 @@
1
+ import base64
2
+ import json
3
+ from abc import abstractmethod
4
+ from collections.abc import Iterable, Mapping
5
+ from hashlib import shake_128
6
+ from typing import Any, Optional, cast
7
+
8
+ from pydantic import BaseModel
9
+ from sretoolbox.utils import retry
10
+
11
+ from reconcile.external_resources.meta import (
12
+ QONTRACT_INTEGRATION,
13
+ QONTRACT_INTEGRATION_VERSION,
14
+ )
15
+ from reconcile.typed_queries.clusters_minimal import get_clusters_minimal
16
+ from reconcile.utils.differ import diff_mappings
17
+ from reconcile.utils.external_resource_spec import (
18
+ ExternalResourceSpec,
19
+ )
20
+ from reconcile.utils.oc import (
21
+ OCCli,
22
+ )
23
+ from reconcile.utils.oc_map import OCMap, init_oc_map_from_clusters
24
+ from reconcile.utils.openshift_resource import OpenshiftResource, ResourceInventory
25
+ from reconcile.utils.secret_reader import SecretReaderBase
26
+ from reconcile.utils.three_way_diff_strategy import three_way_diff_using_hash
27
+ from reconcile.utils.vault import (
28
+ VaultClient,
29
+ _VaultClient, # noqa
30
+ )
31
+
32
+
33
+ class VaultSecret(BaseModel):
34
+ """Generic class to use the Protocol with Dicts"""
35
+
36
+ path: str
37
+ field: str
38
+ version: Optional[int]
39
+ q_format: Optional[str]
40
+
41
+
42
+ class SecretsReconciler:
43
+ def __init__(
44
+ self,
45
+ ri: ResourceInventory,
46
+ secrets_reader: SecretReaderBase,
47
+ vault_path: str,
48
+ vault_client: VaultClient,
49
+ ) -> None:
50
+ self.secrets_reader = secrets_reader
51
+ self.ri = ri
52
+ self.vault_path = vault_path
53
+ self.vault_client = cast(_VaultClient, vault_client)
54
+
55
+ @abstractmethod
56
+ def _populate_secret_data(self, specs: Iterable[ExternalResourceSpec]) -> None:
57
+ raise NotImplementedError()
58
+
59
+ def _populate_annotations(self, spec: ExternalResourceSpec) -> None:
60
+ try:
61
+ annotations = json.loads(spec.resource["annotations"])
62
+ except Exception:
63
+ annotations = {}
64
+
65
+ annotations["provision_provider"] = spec.provision_provider
66
+ annotations["provisioner"] = spec.provisioner_name
67
+ annotations["provider"] = spec.provider
68
+ annotations["identifier"] = spec.identifier
69
+
70
+ spec.resource["annotations"] = json.dumps(annotations)
71
+
72
+ def _initialize_ri(
73
+ self,
74
+ ri: ResourceInventory,
75
+ specs: Iterable[ExternalResourceSpec],
76
+ ) -> None:
77
+ for spec in specs:
78
+ ri.initialize_resource_type(
79
+ spec.cluster_name, spec.namespace_name, "Secret"
80
+ )
81
+ ri.add_desired(
82
+ spec.cluster_name,
83
+ spec.namespace_name,
84
+ "Secret",
85
+ name=spec.output_resource_name,
86
+ value=spec.build_oc_secret(
87
+ QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION
88
+ ),
89
+ )
90
+
91
+ def _init_ocmap(self, specs: Iterable[ExternalResourceSpec]) -> OCMap:
92
+ return init_oc_map_from_clusters(
93
+ clusters=[
94
+ c
95
+ for c in get_clusters_minimal()
96
+ if c.name in [o.cluster_name for o in specs]
97
+ ],
98
+ secret_reader=self.secrets_reader,
99
+ integration=QONTRACT_INTEGRATION,
100
+ )
101
+
102
+ @retry()
103
+ def _write_secrets_to_vault(self, spec: ExternalResourceSpec) -> None:
104
+ if spec.secret:
105
+ secret_path = f"{self.vault_path}/{QONTRACT_INTEGRATION}/{spec.cluster_name}/{spec.namespace_name}/{spec.output_resource_name}"
106
+ stringified_secret = {k: str(v) for k, v in spec.secret.items()}
107
+ desired_secret = {"path": secret_path, "data": stringified_secret}
108
+ self.vault_client.write(desired_secret, decode_base64=False)
109
+
110
+ def sync_secrets(self, specs: Iterable[ExternalResourceSpec]) -> None:
111
+ self._populate_secret_data(specs)
112
+ ri = ResourceInventory()
113
+ self._initialize_ri(ri, specs)
114
+ ocmap = self._init_ocmap(specs)
115
+ for item in ri:
116
+ self.reconcile_data(item, ri, ocmap)
117
+
118
+ def reconcile_data(
119
+ self,
120
+ ri_item: tuple[str, str, str, Mapping[str, Any]],
121
+ ri: ResourceInventory,
122
+ ocmap: OCMap,
123
+ ) -> None:
124
+ cluster, namespace, kind, data = ri_item
125
+ oc = ocmap.get_cluster(cluster)
126
+ names = list(data["desired"].keys())
127
+
128
+ items = oc.get_items("Secret", namespace=namespace, resource_names=names)
129
+ for item in items:
130
+ obj = OpenshiftResource(
131
+ body=item,
132
+ integration=QONTRACT_INTEGRATION,
133
+ integration_version=QONTRACT_INTEGRATION_VERSION,
134
+ )
135
+ ri.add_current(cluster, namespace, kind, name=obj.name, value=obj)
136
+
137
+ diff = diff_mappings(
138
+ data["current"], data["desired"], equal=three_way_diff_using_hash
139
+ )
140
+ items_to_update = [i.desired for i in diff.change.values()] + list(
141
+ diff.add.values()
142
+ )
143
+ self.apply_action(oc, namespace, items_to_update)
144
+
145
+ def apply_action(
146
+ self, oc: OCCli, namespace: str, items: Iterable[OpenshiftResource]
147
+ ) -> None:
148
+ for i in items:
149
+ oc.apply(namespace, resource=i)
150
+
151
+
152
+ class InClusterSecretsReconciler(SecretsReconciler):
153
+ def __init__(
154
+ self,
155
+ ri: ResourceInventory,
156
+ secrets_reader: SecretReaderBase,
157
+ vault_path: str,
158
+ vault_client: VaultClient,
159
+ cluster: str,
160
+ namespace: str,
161
+ oc: OCCli,
162
+ ):
163
+ super().__init__(ri, secrets_reader, vault_path, vault_client)
164
+
165
+ self.cluster = cluster
166
+ self.namespace = namespace
167
+ self.oc = oc
168
+ self.source_secrets: list[str] = []
169
+
170
+ def _get_spec_hash(self, spec: ExternalResourceSpec) -> str:
171
+ secret_key = f"{spec.provision_provider}-{spec.provisioner_name}-{spec.provider}-{spec.identifier}"
172
+ return shake_128(secret_key.encode("utf-8")).hexdigest(16)
173
+
174
+ def _populate_secret_data(self, specs: Iterable[ExternalResourceSpec]) -> None:
175
+ if not specs:
176
+ return
177
+ secrets_map = {
178
+ "external-resources-output-" + self._get_spec_hash(spec): spec
179
+ for spec in specs
180
+ }
181
+ secrets = self.oc.get_items(
182
+ "Secret", namespace=self.namespace, resource_names=list(secrets_map.keys())
183
+ )
184
+ for secret in secrets:
185
+ secret_name = secret["metadata"]["name"]
186
+ spec = secrets_map[secret_name]
187
+ data = dict[str, str]()
188
+ for k, v in secret["data"].items():
189
+ decoded = base64.b64decode(v).decode("utf-8")
190
+ if decoded.startswith("__vault__:"):
191
+ _secret_ref = json.loads(decoded.replace("__vault__:", ""))
192
+ secret_ref = VaultSecret(**_secret_ref)
193
+ data[k] = self.secrets_reader.read_secret(secret_ref)
194
+ else:
195
+ data[k] = decoded
196
+ spec.secret = data
197
+
198
+ self.source_secrets = list(secrets_map.keys())
199
+
200
+ def _delete_source_secrets(self) -> None:
201
+ for secret_name in self.source_secrets:
202
+ print("Deleting secret " + secret_name)
203
+ self.oc.delete(namespace=self.namespace, kind="Secret", name=secret_name)
204
+
205
+ def sync_secrets(self, specs: Iterable[ExternalResourceSpec]) -> None:
206
+ super().sync_secrets(specs)
207
+ self._delete_source_secrets()
208
+
209
+
210
+ def build_incluster_secrets_reconciler(
211
+ cluster: str, namespace: str, secrets_reader: SecretReaderBase, vault_path: str
212
+ ) -> InClusterSecretsReconciler:
213
+ ri = ResourceInventory()
214
+ ocmap = init_oc_map_from_clusters(
215
+ clusters=[c for c in get_clusters_minimal() if c.name == cluster],
216
+ secret_reader=secrets_reader,
217
+ integration=QONTRACT_INTEGRATION,
218
+ )
219
+ oc = ocmap.get_cluster(cluster)
220
+ vault_client = VaultClient()
221
+ return InClusterSecretsReconciler(
222
+ cluster=cluster,
223
+ namespace=namespace,
224
+ ri=ri,
225
+ oc=oc,
226
+ vault_path=vault_path,
227
+ vault_client=vault_client,
228
+ secrets_reader=secrets_reader,
229
+ )