qontract-reconcile 0.10.1rc612__py3-none-any.whl → 0.10.1rc614__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,743 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import random
5
+ import string
6
+ from abc import abstractmethod
7
+ from collections.abc import Mapping
8
+ from typing import (
9
+ Any,
10
+ )
11
+
12
+ from pydantic import BaseModel
13
+
14
+ from reconcile.ocm.types import (
15
+ ClusterMachinePool,
16
+ OCMClusterNetwork,
17
+ OCMClusterSpec,
18
+ OCMSpec,
19
+ OSDClusterSpec,
20
+ ROSAClusterAWSAccount,
21
+ ROSAClusterSpec,
22
+ ROSAOcmAwsAttrs,
23
+ ROSAOcmAwsStsAttrs,
24
+ )
25
+ from reconcile.utils.exceptions import ParameterError
26
+ from reconcile.utils.ocm.clusters import get_provisioning_shard_id
27
+ from reconcile.utils.ocm_base_client import OCMBaseClient
28
+ from reconcile.utils.rosa.rosa_cli import RosaCliException
29
+ from reconcile.utils.rosa.session import RosaSessionBuilder
30
+
31
+ CS_API_BASE = "/api/clusters_mgmt"
32
+
33
+ SPEC_ATTR_ACCOUNT = "account"
34
+ SPEC_ATTR_DISABLE_UWM = "disable_user_workload_monitoring"
35
+ SPEC_ATTR_PRIVATE = "private"
36
+ SPEC_ATTR_CHANNEL = "channel"
37
+ SPEC_ATTR_LOAD_BALANCERS = "load_balancers"
38
+ SPEC_ATTR_STORAGE = "storage"
39
+ SPEC_ATTR_ID = "id"
40
+ SPEC_ATTR_EXTERNAL_ID = "external_id"
41
+ SPEC_ATTR_OIDC_ENDPONT_URL = "oidc_endpoint_url"
42
+ SPEC_ATTR_PROVISION_SHARD_ID = "provision_shard_id"
43
+ SPEC_ATTR_VERSION = "version"
44
+ SPEC_ATTR_INITIAL_VERSION = "initial_version"
45
+ SPEC_ATTR_MULTI_AZ = "multi_az"
46
+ SPEC_ATTR_HYPERSHIFT = "hypershift"
47
+ SPEC_ATTR_SUBNET_IDS = "subnet_ids"
48
+ SPEC_ATTR_AVAILABILITY_ZONES = "availability_zones"
49
+
50
+ SPEC_ATTR_NETWORK = "network"
51
+
52
+ SPEC_ATTR_CONSOLE_URL = "consoleUrl"
53
+ SPEC_ATTR_SERVER_URL = "serverUrl"
54
+ SPEC_ATTR_ELBFQDN = "elbFQDN"
55
+ SPEC_ATTR_PATH = "path"
56
+
57
+ BYTES_IN_GIGABYTE = 1024**3
58
+ DEFAULT_OCM_MACHINE_POOL_ID = "worker"
59
+
60
+ OCM_PRODUCT_OSD = "osd"
61
+ OCM_PRODUCT_ROSA = "rosa"
62
+ OCM_PRODUCT_HYPERSHIFT = "hypershift"
63
+
64
+
65
+ class OCMValidationException(Exception):
66
+ pass
67
+
68
+
69
+ class OCMProduct:
70
+ ALLOWED_SPEC_UPDATE_FIELDS: set[str]
71
+ EXCLUDED_SPEC_FIELDS: set[str]
72
+
73
+ @abstractmethod
74
+ def create_cluster(
75
+ self,
76
+ ocm: OCMBaseClient,
77
+ org_id: str,
78
+ name: str,
79
+ cluster: OCMSpec,
80
+ dry_run: bool,
81
+ ) -> None:
82
+ pass
83
+
84
+ @abstractmethod
85
+ def update_cluster(
86
+ self,
87
+ ocm: OCMBaseClient,
88
+ cluster_id: str,
89
+ update_spec: Mapping[str, Any],
90
+ dry_run: bool,
91
+ ) -> None:
92
+ pass
93
+
94
+ @abstractmethod
95
+ def get_ocm_spec(
96
+ self,
97
+ ocm: OCMBaseClient,
98
+ cluster: Mapping[str, Any],
99
+ init_provision_shards: bool,
100
+ ) -> OCMSpec:
101
+ pass
102
+
103
+
104
+ class OCMProductOsd(OCMProduct):
105
+ ALLOWED_SPEC_UPDATE_FIELDS = {
106
+ SPEC_ATTR_STORAGE,
107
+ SPEC_ATTR_LOAD_BALANCERS,
108
+ SPEC_ATTR_PRIVATE,
109
+ SPEC_ATTR_CHANNEL,
110
+ SPEC_ATTR_DISABLE_UWM,
111
+ }
112
+
113
+ EXCLUDED_SPEC_FIELDS = {
114
+ SPEC_ATTR_ID,
115
+ SPEC_ATTR_EXTERNAL_ID,
116
+ SPEC_ATTR_PROVISION_SHARD_ID,
117
+ SPEC_ATTR_VERSION,
118
+ SPEC_ATTR_INITIAL_VERSION,
119
+ SPEC_ATTR_HYPERSHIFT,
120
+ }
121
+
122
+ def create_cluster(
123
+ self,
124
+ ocm: OCMBaseClient,
125
+ org_id: str,
126
+ name: str,
127
+ cluster: OCMSpec,
128
+ dry_run: bool,
129
+ ) -> None:
130
+ ocm_spec = self._get_create_cluster_spec(name, cluster)
131
+ api = f"{CS_API_BASE}/v1/clusters"
132
+ params = {}
133
+ if dry_run:
134
+ params["dryRun"] = "true"
135
+
136
+ ocm.post(api, ocm_spec, params)
137
+
138
+ def update_cluster(
139
+ self,
140
+ ocm: OCMBaseClient,
141
+ cluster_id: str,
142
+ update_spec: Mapping[str, Any],
143
+ dry_run: bool,
144
+ ) -> None:
145
+ ocm_spec = self._get_update_cluster_spec(update_spec)
146
+ api = f"{CS_API_BASE}/v1/clusters/{cluster_id}"
147
+ params: dict[str, Any] = {}
148
+ if dry_run:
149
+ params["dryRun"] = "true"
150
+ ocm.patch(api, ocm_spec, params)
151
+
152
+ def get_ocm_spec(
153
+ self,
154
+ ocm: OCMBaseClient,
155
+ cluster: Mapping[str, Any],
156
+ init_provision_shards: bool,
157
+ ) -> OCMSpec:
158
+ if init_provision_shards:
159
+ provision_shard_id = get_provisioning_shard_id(ocm, cluster["id"])
160
+ else:
161
+ provision_shard_id = None
162
+
163
+ spec = OCMClusterSpec(
164
+ product=cluster["product"]["id"],
165
+ id=cluster["id"],
166
+ external_id=cluster["external_id"],
167
+ provider=cluster["cloud_provider"]["id"],
168
+ region=cluster["region"]["id"],
169
+ channel=cluster["version"]["channel_group"],
170
+ version=cluster["version"]["raw_id"],
171
+ multi_az=cluster["multi_az"],
172
+ private=cluster["api"]["listening"] == "internal",
173
+ disable_user_workload_monitoring=cluster[
174
+ "disable_user_workload_monitoring"
175
+ ],
176
+ provision_shard_id=provision_shard_id,
177
+ hypershift=cluster["hypershift"]["enabled"],
178
+ )
179
+
180
+ if not cluster["ccs"]["enabled"]:
181
+ cluster_spec_data = spec.dict()
182
+ cluster_spec_data["storage"] = (
183
+ cluster["storage_quota"]["value"] // BYTES_IN_GIGABYTE
184
+ )
185
+ cluster_spec_data["load_balancers"] = cluster["load_balancer_quota"]
186
+ spec = OSDClusterSpec(**cluster_spec_data)
187
+
188
+ machine_pools = [
189
+ ClusterMachinePool(**p) for p in cluster.get("machinePools") or []
190
+ ]
191
+
192
+ network = OCMClusterNetwork(
193
+ type=cluster["network"].get("type") or "OVNKubernetes",
194
+ vpc=cluster["network"]["machine_cidr"],
195
+ service=cluster["network"]["service_cidr"],
196
+ pod=cluster["network"]["pod_cidr"],
197
+ )
198
+
199
+ ocm_spec = OCMSpec(
200
+ console_url=cluster["console"]["url"],
201
+ server_url=cluster["api"]["url"],
202
+ domain=cluster["dns"]["base_domain"],
203
+ spec=spec,
204
+ machine_pools=machine_pools,
205
+ network=network,
206
+ )
207
+
208
+ return ocm_spec
209
+
210
+ def _get_nodes_spec(self, cluster: OCMSpec) -> dict[str, Any]:
211
+ default_machine_pool = next(
212
+ (
213
+ mp
214
+ for mp in cluster.machine_pools
215
+ if mp.id == DEFAULT_OCM_MACHINE_POOL_ID
216
+ ),
217
+ None,
218
+ )
219
+ if default_machine_pool is None:
220
+ raise OCMValidationException(
221
+ f"No default machine pool found, id: {DEFAULT_OCM_MACHINE_POOL_ID}"
222
+ )
223
+
224
+ spec: dict[str, Any] = {
225
+ "compute_machine_type": {"id": default_machine_pool.instance_type},
226
+ }
227
+ if default_machine_pool.autoscale is not None:
228
+ spec["autoscale_compute"] = default_machine_pool.autoscale.dict()
229
+ else:
230
+ spec["compute"] = default_machine_pool.replicas
231
+ return spec
232
+
233
+ def _get_create_cluster_spec(
234
+ self, cluster_name: str, cluster: OCMSpec
235
+ ) -> dict[str, Any]:
236
+ ocm_spec: dict[str, Any] = {
237
+ "name": cluster_name,
238
+ "cloud_provider": {"id": cluster.spec.provider},
239
+ "region": {"id": cluster.spec.region},
240
+ "version": {
241
+ "id": f"openshift-v{cluster.spec.initial_version}",
242
+ "channel_group": cluster.spec.channel,
243
+ },
244
+ "multi_az": cluster.spec.multi_az,
245
+ "nodes": self._get_nodes_spec(cluster),
246
+ "network": {
247
+ "type": cluster.network.type or "OVNKubernetes",
248
+ "machine_cidr": cluster.network.vpc,
249
+ "service_cidr": cluster.network.service,
250
+ "pod_cidr": cluster.network.pod,
251
+ },
252
+ "api": {"listening": "internal" if cluster.spec.private else "external"},
253
+ "disable_user_workload_monitoring": (
254
+ duwm
255
+ if (duwm := cluster.spec.disable_user_workload_monitoring) is not None
256
+ else True
257
+ ),
258
+ }
259
+
260
+ # Workaround to enable type checks.
261
+ # cluster.spec is a Union of pydantic models Union[OSDClusterSpec, RosaClusterSpec].
262
+ # In this case, cluster.spec will always be an OSDClusterSpec because the type
263
+ # assignment is managed by pydantic, however, mypy complains if OSD attributes are set
264
+ # outside the isinstance check because it checks all the types set in the Union.
265
+ if isinstance(cluster.spec, OSDClusterSpec):
266
+ ocm_spec["storage_quota"] = {
267
+ "value": float(cluster.spec.storage * BYTES_IN_GIGABYTE),
268
+ }
269
+ ocm_spec["load_balancer_quota"] = cluster.spec.load_balancers
270
+
271
+ provision_shard_id = cluster.spec.provision_shard_id
272
+ if provision_shard_id:
273
+ ocm_spec.setdefault("properties", {})
274
+ ocm_spec["properties"]["provision_shard_id"] = provision_shard_id
275
+ return ocm_spec
276
+
277
+ def _get_update_cluster_spec(
278
+ self, update_spec: Mapping[str, Any]
279
+ ) -> dict[str, Any]:
280
+ ocm_spec: dict[str, Any] = {}
281
+
282
+ storage = update_spec.get("storage")
283
+ if storage is not None:
284
+ ocm_spec["storage_quota"] = {"value": float(storage * 1073741824)} # 1024^3
285
+
286
+ load_balancers = update_spec.get("load_balancers")
287
+ if load_balancers is not None:
288
+ ocm_spec["load_balancer_quota"] = load_balancers
289
+
290
+ private = update_spec.get("private")
291
+ if private is not None:
292
+ ocm_spec["api"] = {"listening": "internal" if private else "external"}
293
+
294
+ channel = update_spec.get("channel")
295
+ if channel is not None:
296
+ ocm_spec["version"] = {"channel_group": channel}
297
+
298
+ disable_uwm = update_spec.get("disable_user_workload_monitoring")
299
+ if disable_uwm is not None:
300
+ ocm_spec["disable_user_workload_monitoring"] = disable_uwm
301
+
302
+ return ocm_spec
303
+
304
+
305
+ class OCMProductRosa(OCMProduct):
306
+ ALLOWED_SPEC_UPDATE_FIELDS = {
307
+ SPEC_ATTR_CHANNEL,
308
+ SPEC_ATTR_DISABLE_UWM,
309
+ }
310
+
311
+ EXCLUDED_SPEC_FIELDS = {
312
+ SPEC_ATTR_ID,
313
+ SPEC_ATTR_EXTERNAL_ID,
314
+ SPEC_ATTR_PROVISION_SHARD_ID,
315
+ SPEC_ATTR_VERSION,
316
+ SPEC_ATTR_INITIAL_VERSION,
317
+ SPEC_ATTR_ACCOUNT,
318
+ SPEC_ATTR_HYPERSHIFT,
319
+ SPEC_ATTR_SUBNET_IDS,
320
+ SPEC_ATTR_AVAILABILITY_ZONES,
321
+ SPEC_ATTR_OIDC_ENDPONT_URL,
322
+ }
323
+
324
+ def create_cluster(
325
+ self,
326
+ ocm: OCMBaseClient,
327
+ org_id: str,
328
+ name: str,
329
+ cluster: OCMSpec,
330
+ dry_run: bool,
331
+ ) -> None:
332
+ ocm_spec = self._get_create_cluster_spec(name, cluster)
333
+ api = f"{CS_API_BASE}/v1/clusters"
334
+ params = {}
335
+ if dry_run:
336
+ params["dryRun"] = "true"
337
+ if cluster.spec.hypershift:
338
+ logging.info(
339
+ "Dry-Run is not yet implemented for Hosted clusters. Here is the payload:"
340
+ )
341
+ logging.info(ocm_spec)
342
+ return
343
+ ocm.post(api, ocm_spec, params)
344
+
345
+ def update_cluster(
346
+ self,
347
+ ocm: OCMBaseClient,
348
+ cluster_id: str,
349
+ update_spec: Mapping[str, Any],
350
+ dry_run: bool,
351
+ ) -> None:
352
+ ocm_spec = self._get_update_cluster_spec(update_spec)
353
+ api = f"{CS_API_BASE}/v1/clusters/{cluster_id}"
354
+ params: dict[str, Any] = {}
355
+ if dry_run:
356
+ params["dryRun"] = "true"
357
+ ocm.patch(api, ocm_spec, params)
358
+
359
+ def get_ocm_spec(
360
+ self,
361
+ ocm: OCMBaseClient,
362
+ cluster: Mapping[str, Any],
363
+ init_provision_shards: bool,
364
+ ) -> OCMSpec:
365
+ if init_provision_shards:
366
+ provision_shard_id = get_provisioning_shard_id(ocm, cluster["id"])
367
+ else:
368
+ provision_shard_id = None
369
+
370
+ sts = None
371
+ oidc_endpoint_url = None
372
+ if cluster["aws"].get("sts", None):
373
+ sts = ROSAOcmAwsStsAttrs(
374
+ installer_role_arn=cluster["aws"]["sts"]["role_arn"],
375
+ support_role_arn=cluster["aws"]["sts"]["support_role_arn"],
376
+ controlplane_role_arn=cluster["aws"]["sts"]["instance_iam_roles"].get(
377
+ "master_role_arn"
378
+ ),
379
+ worker_role_arn=cluster["aws"]["sts"]["instance_iam_roles"][
380
+ "worker_role_arn"
381
+ ],
382
+ )
383
+ oidc_endpoint_url = cluster["aws"]["sts"]["oidc_endpoint_url"]
384
+ account = ROSAClusterAWSAccount(
385
+ uid=cluster["properties"]["rosa_creator_arn"].split(":")[4],
386
+ rosa=ROSAOcmAwsAttrs(
387
+ creator_role_arn=cluster["properties"]["rosa_creator_arn"],
388
+ sts=sts,
389
+ ),
390
+ )
391
+
392
+ spec = ROSAClusterSpec(
393
+ product=cluster["product"]["id"],
394
+ account=account,
395
+ id=cluster["id"],
396
+ external_id=cluster.get("external_id"),
397
+ provider=cluster["cloud_provider"]["id"],
398
+ region=cluster["region"]["id"],
399
+ channel=cluster["version"]["channel_group"],
400
+ version=cluster["version"]["raw_id"],
401
+ multi_az=cluster["multi_az"],
402
+ private=cluster["api"]["listening"] == "internal",
403
+ disable_user_workload_monitoring=cluster[
404
+ "disable_user_workload_monitoring"
405
+ ],
406
+ provision_shard_id=provision_shard_id,
407
+ hypershift=cluster["hypershift"]["enabled"],
408
+ subnet_ids=cluster["aws"].get("subnet_ids"),
409
+ availability_zones=cluster["nodes"].get("availability_zones"),
410
+ oidc_endpoint_url=oidc_endpoint_url,
411
+ )
412
+
413
+ machine_pools = [
414
+ ClusterMachinePool(**p) for p in cluster.get("machinePools") or []
415
+ ]
416
+
417
+ network = OCMClusterNetwork(
418
+ type=cluster["network"].get("type") or "OVNKubernetes",
419
+ vpc=cluster["network"]["machine_cidr"],
420
+ service=cluster["network"]["service_cidr"],
421
+ pod=cluster["network"]["pod_cidr"],
422
+ )
423
+
424
+ ocm_spec = OCMSpec(
425
+ # Hosted control plane clusters can reach a Ready State without having the console
426
+ # Endpoint
427
+ console_url=cluster.get("console", {}).get("url", ""),
428
+ server_url=cluster["api"]["url"],
429
+ domain=cluster["dns"]["base_domain"],
430
+ spec=spec,
431
+ machine_pools=machine_pools,
432
+ network=network,
433
+ )
434
+
435
+ return ocm_spec
436
+
437
+ def _get_nodes_spec(self, cluster: OCMSpec) -> dict[str, Any]:
438
+ default_machine_pool = next(
439
+ (
440
+ mp
441
+ for mp in cluster.machine_pools
442
+ if mp.id == DEFAULT_OCM_MACHINE_POOL_ID
443
+ ),
444
+ None,
445
+ )
446
+ if default_machine_pool is None:
447
+ raise OCMValidationException(
448
+ f"No default machine pool found, id: {DEFAULT_OCM_MACHINE_POOL_ID}"
449
+ )
450
+
451
+ spec: dict[str, Any] = {
452
+ "compute_machine_type": {"id": default_machine_pool.instance_type},
453
+ }
454
+ if default_machine_pool.autoscale is not None:
455
+ spec["autoscale_compute"] = default_machine_pool.autoscale.dict()
456
+ else:
457
+ spec["compute"] = default_machine_pool.replicas
458
+ return spec
459
+
460
+ def _get_create_cluster_spec(
461
+ self, cluster_name: str, cluster: OCMSpec
462
+ ) -> dict[str, Any]:
463
+ if not isinstance(cluster.spec, ROSAClusterSpec):
464
+ # make mypy happy
465
+ raise ParameterError("spec is not for a ROSA cluster")
466
+ if not cluster.spec.account.rosa:
467
+ raise ParameterError(
468
+ "cluster.spec.account.rosa not specified... required for ROSA classic clusters"
469
+ )
470
+
471
+ operator_roles_prefix = "".join(
472
+ random.choices(string.ascii_lowercase + string.digits, k=4)
473
+ )
474
+
475
+ ocm_spec: dict[str, Any] = {
476
+ "api": {"listening": "internal" if cluster.spec.private else "external"},
477
+ "name": cluster_name,
478
+ "cloud_provider": {"id": cluster.spec.provider},
479
+ "region": {"id": cluster.spec.region},
480
+ "version": {
481
+ "id": f"openshift-v{cluster.spec.initial_version}",
482
+ "channel_group": cluster.spec.channel,
483
+ },
484
+ "hypershift": {"enabled": cluster.spec.hypershift},
485
+ "multi_az": cluster.spec.multi_az,
486
+ "nodes": self._get_nodes_spec(cluster),
487
+ "network": {
488
+ "type": cluster.network.type or "OVNKubernetes",
489
+ "machine_cidr": cluster.network.vpc,
490
+ "service_cidr": cluster.network.service,
491
+ "pod_cidr": cluster.network.pod,
492
+ },
493
+ "disable_user_workload_monitoring": (
494
+ duwm
495
+ if (duwm := cluster.spec.disable_user_workload_monitoring) is not None
496
+ else True
497
+ ),
498
+ }
499
+
500
+ provision_shard_id = cluster.spec.provision_shard_id
501
+ if provision_shard_id:
502
+ ocm_spec.setdefault("properties", {})
503
+ ocm_spec["properties"]["provision_shard_id"] = provision_shard_id
504
+
505
+ if isinstance(cluster.spec, ROSAClusterSpec):
506
+ ocm_spec.setdefault("properties", {})
507
+ ocm_spec["properties"]["rosa_creator_arn"] = (
508
+ cluster.spec.account.rosa.creator_role_arn
509
+ )
510
+
511
+ if not cluster.spec.account.rosa.sts:
512
+ raise ParameterError("STS is required for ROSA clusters")
513
+
514
+ rosa_spec: dict[str, Any] = {
515
+ "product": {"id": "rosa"},
516
+ "ccs": {"enabled": True},
517
+ "aws": {
518
+ "account_id": cluster.spec.account.uid,
519
+ "sts": {
520
+ "enabled": True,
521
+ "auto_mode": True,
522
+ "role_arn": cluster.spec.account.rosa.sts.installer_role_arn,
523
+ "support_role_arn": cluster.spec.account.rosa.sts.support_role_arn,
524
+ "instance_iam_roles": {
525
+ "worker_role_arn": cluster.spec.account.rosa.sts.worker_role_arn,
526
+ },
527
+ "operator_role_prefix": f"{cluster_name}-{operator_roles_prefix}",
528
+ },
529
+ },
530
+ }
531
+
532
+ if cluster.spec.account.rosa.sts.controlplane_role_arn:
533
+ rosa_spec["aws"]["sts"]["instance_iam_roles"]["master_role_arn"] = (
534
+ cluster.spec.account.rosa.sts.controlplane_role_arn
535
+ )
536
+
537
+ if cluster.spec.hypershift:
538
+ ocm_spec["nodes"]["availability_zones"] = (
539
+ cluster.spec.availability_zones
540
+ )
541
+ rosa_spec["aws"]["subnet_ids"] = cluster.spec.subnet_ids
542
+
543
+ ocm_spec.update(rosa_spec)
544
+ return ocm_spec
545
+
546
+ def _get_update_cluster_spec(
547
+ self, update_spec: Mapping[str, Any]
548
+ ) -> dict[str, Any]:
549
+ ocm_spec: dict[str, Any] = {}
550
+
551
+ channel = update_spec.get(SPEC_ATTR_CHANNEL)
552
+ if channel is not None:
553
+ ocm_spec["version"] = {"channel_group": channel}
554
+
555
+ disable_uwm = update_spec.get(SPEC_ATTR_DISABLE_UWM)
556
+ if disable_uwm is not None:
557
+ ocm_spec["disable_user_workload_monitoring"] = disable_uwm
558
+
559
+ return ocm_spec
560
+
561
+
562
+ class OCMProductHypershift(OCMProduct):
563
+ def __init__(self, rosa_session_builder: RosaSessionBuilder | None) -> None:
564
+ super().__init__()
565
+ self.rosa_session_builder = rosa_session_builder
566
+
567
+ # Not a real product, but a way to represent the Hypershift specialties
568
+ ALLOWED_SPEC_UPDATE_FIELDS = {
569
+ SPEC_ATTR_CHANNEL,
570
+ SPEC_ATTR_PRIVATE,
571
+ SPEC_ATTR_DISABLE_UWM,
572
+ }
573
+
574
+ EXCLUDED_SPEC_FIELDS = {
575
+ SPEC_ATTR_ID,
576
+ SPEC_ATTR_EXTERNAL_ID,
577
+ SPEC_ATTR_PROVISION_SHARD_ID,
578
+ SPEC_ATTR_VERSION,
579
+ SPEC_ATTR_INITIAL_VERSION,
580
+ SPEC_ATTR_ACCOUNT,
581
+ SPEC_ATTR_HYPERSHIFT,
582
+ SPEC_ATTR_SUBNET_IDS,
583
+ SPEC_ATTR_AVAILABILITY_ZONES,
584
+ SPEC_ATTR_OIDC_ENDPONT_URL,
585
+ }
586
+
587
+ def create_cluster(
588
+ self,
589
+ ocm: OCMBaseClient,
590
+ org_id: str,
591
+ name: str,
592
+ cluster: OCMSpec,
593
+ dry_run: bool,
594
+ ) -> None:
595
+ if not isinstance(cluster.spec, ROSAClusterSpec):
596
+ # make mypy happy
597
+ return
598
+
599
+ if self.rosa_session_builder is None:
600
+ raise Exception(
601
+ "OCMProductHypershift is not configured with a rosa session builder"
602
+ )
603
+
604
+ rosa_session = self.rosa_session_builder.build(
605
+ ocm, cluster.spec.account.uid, cluster.spec.region, org_id
606
+ )
607
+ try:
608
+ result = rosa_session.create_hcp_cluster(
609
+ cluster_name=name, spec=cluster, dry_run=dry_run
610
+ )
611
+ logging.info("cluster creation kicked off...")
612
+ result.write_logs_to_logger(logging.info)
613
+ except RosaCliException as e:
614
+ logs = "".join(e.get_log_lines(max_lines=10, from_file_end=True))
615
+ e.cleanup()
616
+ raise OCMValidationException(
617
+ f"last 10 lines from failed cluster creation job...\n\n{logs}"
618
+ )
619
+
620
+ def update_cluster(
621
+ self,
622
+ ocm: OCMBaseClient,
623
+ cluster_id: str,
624
+ update_spec: Mapping[str, Any],
625
+ dry_run: bool,
626
+ ) -> None:
627
+ ocm_spec = self._get_update_cluster_spec(update_spec)
628
+ api = f"{CS_API_BASE}/v1/clusters/{cluster_id}"
629
+ params: dict[str, Any] = {}
630
+ if dry_run:
631
+ params["dryRun"] = "true"
632
+ ocm.patch(api, ocm_spec, params)
633
+
634
+ def get_ocm_spec(
635
+ self,
636
+ ocm: OCMBaseClient,
637
+ cluster: Mapping[str, Any],
638
+ init_provision_shards: bool,
639
+ ) -> OCMSpec:
640
+ if init_provision_shards:
641
+ provision_shard_id = get_provisioning_shard_id(ocm, cluster["id"])
642
+ else:
643
+ provision_shard_id = None
644
+
645
+ sts = None
646
+ oidc_endpoint_url = None
647
+ if cluster["aws"].get("sts", None):
648
+ sts = ROSAOcmAwsStsAttrs(
649
+ installer_role_arn=cluster["aws"]["sts"]["role_arn"],
650
+ support_role_arn=cluster["aws"]["sts"]["support_role_arn"],
651
+ controlplane_role_arn=cluster["aws"]["sts"]["instance_iam_roles"].get(
652
+ "master_role_arn"
653
+ ),
654
+ worker_role_arn=cluster["aws"]["sts"]["instance_iam_roles"][
655
+ "worker_role_arn"
656
+ ],
657
+ )
658
+ oidc_endpoint_url = cluster["aws"]["sts"]["oidc_endpoint_url"]
659
+ account = ROSAClusterAWSAccount(
660
+ uid=cluster["properties"]["rosa_creator_arn"].split(":")[4],
661
+ rosa=ROSAOcmAwsAttrs(
662
+ creator_role_arn=cluster["properties"]["rosa_creator_arn"],
663
+ sts=sts,
664
+ ),
665
+ )
666
+
667
+ spec = ROSAClusterSpec(
668
+ product=cluster["product"]["id"],
669
+ account=account,
670
+ id=cluster["id"],
671
+ external_id=cluster.get("external_id"),
672
+ provider=cluster["cloud_provider"]["id"],
673
+ region=cluster["region"]["id"],
674
+ channel=cluster["version"]["channel_group"],
675
+ version=cluster["version"]["raw_id"],
676
+ multi_az=cluster["multi_az"],
677
+ private=cluster["api"]["listening"] == "internal",
678
+ disable_user_workload_monitoring=cluster[
679
+ "disable_user_workload_monitoring"
680
+ ],
681
+ provision_shard_id=provision_shard_id,
682
+ subnet_ids=cluster["aws"].get("subnet_ids"),
683
+ availability_zones=cluster["nodes"].get("availability_zones"),
684
+ hypershift=cluster["hypershift"]["enabled"],
685
+ oidc_endpoint_url=oidc_endpoint_url,
686
+ )
687
+
688
+ network = OCMClusterNetwork(
689
+ type=cluster["network"].get("type") or "OVNKubernetes",
690
+ vpc=cluster["network"]["machine_cidr"],
691
+ service=cluster["network"]["service_cidr"],
692
+ pod=cluster["network"]["pod_cidr"],
693
+ )
694
+
695
+ ocm_spec = OCMSpec(
696
+ # Hosted control plane clusters can reach a Ready State without having the console
697
+ # Endpoint
698
+ console_url=cluster.get("console", {}).get("url", ""),
699
+ server_url=cluster["api"]["url"],
700
+ domain=cluster["dns"]["base_domain"],
701
+ spec=spec,
702
+ network=network,
703
+ )
704
+
705
+ return ocm_spec
706
+
707
+ def _get_update_cluster_spec(
708
+ self, update_spec: Mapping[str, Any]
709
+ ) -> dict[str, Any]:
710
+ ocm_spec: dict[str, Any] = {}
711
+
712
+ disable_uwm = update_spec.get(SPEC_ATTR_DISABLE_UWM)
713
+ if disable_uwm is not None:
714
+ ocm_spec["disable_user_workload_monitoring"] = disable_uwm
715
+
716
+ return ocm_spec
717
+
718
+
719
+ def build_product_portfolio(
720
+ rosa_session_builder: RosaSessionBuilder | None = None,
721
+ ) -> OCMProductPortfolio:
722
+ return OCMProductPortfolio(
723
+ products={
724
+ OCM_PRODUCT_OSD: OCMProductOsd(),
725
+ OCM_PRODUCT_ROSA: OCMProductRosa(),
726
+ OCM_PRODUCT_HYPERSHIFT: OCMProductHypershift(rosa_session_builder),
727
+ }
728
+ )
729
+
730
+
731
+ class OCMProductPortfolio(BaseModel, arbitrary_types_allowed=True):
732
+ products: dict[str, OCMProduct]
733
+
734
+ @property
735
+ def product_names(self) -> list[str]:
736
+ return list(self.products.keys())
737
+
738
+ def get_product_impl(
739
+ self, product: str, hypershift: bool | None = False
740
+ ) -> OCMProduct:
741
+ if hypershift:
742
+ return self.products[OCM_PRODUCT_HYPERSHIFT]
743
+ return self.products[product]