aws-cdk.aws-eks-v2-alpha 2.213.0a0__py3-none-any.whl → 2.214.1a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aws-cdk.aws-eks-v2-alpha might be problematic. Click here for more details.
- aws_cdk/aws_eks_v2_alpha/__init__.py +83 -71
- aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py +2 -2
- aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.214.1-alpha.0.jsii.tgz +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info}/METADATA +46 -49
- aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info/RECORD +10 -0
- aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.213.0-alpha.0.jsii.tgz +0 -0
- aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info/RECORD +0 -10
- {aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info}/LICENSE +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info}/NOTICE +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info}/WHEEL +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info}/top_level.txt +0 -0
|
@@ -33,7 +33,7 @@ Here is the minimal example of defining an AWS EKS cluster
|
|
|
33
33
|
|
|
34
34
|
```python
|
|
35
35
|
cluster = eks.Cluster(self, "hello-eks",
|
|
36
|
-
version=eks.KubernetesVersion.
|
|
36
|
+
version=eks.KubernetesVersion.V1_33
|
|
37
37
|
)
|
|
38
38
|
```
|
|
39
39
|
|
|
@@ -73,7 +73,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
|
|
|
73
73
|
|
|
74
74
|
```python
|
|
75
75
|
eks.Cluster(self, "HelloEKS",
|
|
76
|
-
version=eks.KubernetesVersion.
|
|
76
|
+
version=eks.KubernetesVersion.V1_33
|
|
77
77
|
)
|
|
78
78
|
```
|
|
79
79
|
|
|
@@ -81,7 +81,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
|
|
|
81
81
|
|
|
82
82
|
```python
|
|
83
83
|
eks.FargateCluster(self, "HelloEKS",
|
|
84
|
-
version=eks.KubernetesVersion.
|
|
84
|
+
version=eks.KubernetesVersion.V1_33
|
|
85
85
|
)
|
|
86
86
|
```
|
|
87
87
|
|
|
@@ -90,22 +90,22 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
|
|
|
90
90
|
property is used.**
|
|
91
91
|
|
|
92
92
|
```python
|
|
93
|
-
from aws_cdk.
|
|
93
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
94
94
|
|
|
95
95
|
|
|
96
96
|
eks.Cluster(self, "hello-eks",
|
|
97
|
-
version=eks.KubernetesVersion.
|
|
97
|
+
version=eks.KubernetesVersion.V1_33,
|
|
98
98
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
99
|
-
kubectl_layer=
|
|
99
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl")
|
|
100
100
|
)
|
|
101
101
|
)
|
|
102
102
|
```
|
|
103
103
|
|
|
104
|
-
|
|
104
|
+
### EKS Auto Mode
|
|
105
105
|
|
|
106
106
|
[Amazon EKS Auto Mode](https://aws.amazon.com/eks/auto-mode/) extends AWS management of Kubernetes clusters beyond the cluster itself, allowing AWS to set up and manage the infrastructure that enables the smooth operation of your workloads.
|
|
107
107
|
|
|
108
|
-
|
|
108
|
+
#### Using Auto Mode
|
|
109
109
|
|
|
110
110
|
While `aws-eks` uses `DefaultCapacityType.NODEGROUP` by default, `aws-eks-v2` uses `DefaultCapacityType.AUTOMODE` as the default capacity type.
|
|
111
111
|
|
|
@@ -114,7 +114,7 @@ Auto Mode is enabled by default when creating a new cluster without specifying a
|
|
|
114
114
|
```python
|
|
115
115
|
# Create EKS cluster with Auto Mode implicitly enabled
|
|
116
116
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
117
|
-
version=eks.KubernetesVersion.
|
|
117
|
+
version=eks.KubernetesVersion.V1_33
|
|
118
118
|
)
|
|
119
119
|
```
|
|
120
120
|
|
|
@@ -123,12 +123,12 @@ You can also explicitly enable Auto Mode using `defaultCapacityType`:
|
|
|
123
123
|
```python
|
|
124
124
|
# Create EKS cluster with Auto Mode explicitly enabled
|
|
125
125
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
126
|
-
version=eks.KubernetesVersion.
|
|
126
|
+
version=eks.KubernetesVersion.V1_33,
|
|
127
127
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE
|
|
128
128
|
)
|
|
129
129
|
```
|
|
130
130
|
|
|
131
|
-
|
|
131
|
+
#### Node Pools
|
|
132
132
|
|
|
133
133
|
When Auto Mode is enabled, the cluster comes with two default node pools:
|
|
134
134
|
|
|
@@ -139,7 +139,7 @@ These node pools are managed automatically by EKS. You can configure which node
|
|
|
139
139
|
|
|
140
140
|
```python
|
|
141
141
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
142
|
-
version=eks.KubernetesVersion.
|
|
142
|
+
version=eks.KubernetesVersion.V1_33,
|
|
143
143
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
144
144
|
compute=eks.ComputeConfig(
|
|
145
145
|
node_pools=["system", "general-purpose"]
|
|
@@ -149,13 +149,13 @@ cluster = eks.Cluster(self, "EksAutoCluster",
|
|
|
149
149
|
|
|
150
150
|
For more information, see [Create a Node Pool for EKS Auto Mode](https://docs.aws.amazon.com/eks/latest/userguide/create-node-pool.html).
|
|
151
151
|
|
|
152
|
-
|
|
152
|
+
#### Disabling Default Node Pools
|
|
153
153
|
|
|
154
154
|
You can disable the default node pools entirely by setting an empty array for `nodePools`. This is useful when you want to use Auto Mode features but manage your compute resources separately:
|
|
155
155
|
|
|
156
156
|
```python
|
|
157
157
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
158
|
-
version=eks.KubernetesVersion.
|
|
158
|
+
version=eks.KubernetesVersion.V1_33,
|
|
159
159
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
160
160
|
compute=eks.ComputeConfig(
|
|
161
161
|
node_pools=[]
|
|
@@ -172,7 +172,7 @@ If you prefer to manage your own node groups instead of using Auto Mode, you can
|
|
|
172
172
|
```python
|
|
173
173
|
# Create EKS cluster with traditional managed node group
|
|
174
174
|
cluster = eks.Cluster(self, "EksCluster",
|
|
175
|
-
version=eks.KubernetesVersion.
|
|
175
|
+
version=eks.KubernetesVersion.V1_33,
|
|
176
176
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
177
177
|
default_capacity=3, # Number of instances
|
|
178
178
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.LARGE)
|
|
@@ -183,7 +183,7 @@ You can also create a cluster with no initial capacity and add node groups later
|
|
|
183
183
|
|
|
184
184
|
```python
|
|
185
185
|
cluster = eks.Cluster(self, "EksCluster",
|
|
186
|
-
version=eks.KubernetesVersion.
|
|
186
|
+
version=eks.KubernetesVersion.V1_33,
|
|
187
187
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
188
188
|
default_capacity=0
|
|
189
189
|
)
|
|
@@ -204,7 +204,7 @@ You can combine Auto Mode with traditional node groups for specific workload req
|
|
|
204
204
|
|
|
205
205
|
```python
|
|
206
206
|
cluster = eks.Cluster(self, "Cluster",
|
|
207
|
-
version=eks.KubernetesVersion.
|
|
207
|
+
version=eks.KubernetesVersion.V1_33,
|
|
208
208
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
209
209
|
compute=eks.ComputeConfig(
|
|
210
210
|
node_pools=["system", "general-purpose"]
|
|
@@ -243,7 +243,7 @@ By default, when using `DefaultCapacityType.NODEGROUP`, this library will alloca
|
|
|
243
243
|
|
|
244
244
|
```python
|
|
245
245
|
eks.Cluster(self, "HelloEKS",
|
|
246
|
-
version=eks.KubernetesVersion.
|
|
246
|
+
version=eks.KubernetesVersion.V1_33,
|
|
247
247
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP
|
|
248
248
|
)
|
|
249
249
|
```
|
|
@@ -252,7 +252,7 @@ At cluster instantiation time, you can customize the number of instances and the
|
|
|
252
252
|
|
|
253
253
|
```python
|
|
254
254
|
eks.Cluster(self, "HelloEKS",
|
|
255
|
-
version=eks.KubernetesVersion.
|
|
255
|
+
version=eks.KubernetesVersion.V1_33,
|
|
256
256
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
257
257
|
default_capacity=5,
|
|
258
258
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
|
|
@@ -265,7 +265,7 @@ Additional customizations are available post instantiation. To apply them, set t
|
|
|
265
265
|
|
|
266
266
|
```python
|
|
267
267
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
268
|
-
version=eks.KubernetesVersion.
|
|
268
|
+
version=eks.KubernetesVersion.V1_33,
|
|
269
269
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
270
270
|
default_capacity=0
|
|
271
271
|
)
|
|
@@ -318,7 +318,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
|
|
|
318
318
|
|
|
319
319
|
```python
|
|
320
320
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
321
|
-
version=eks.KubernetesVersion.
|
|
321
|
+
version=eks.KubernetesVersion.V1_33
|
|
322
322
|
)
|
|
323
323
|
```
|
|
324
324
|
|
|
@@ -337,7 +337,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
|
|
|
337
337
|
|
|
338
338
|
```python
|
|
339
339
|
cluster = eks.Cluster(self, "hello-eks",
|
|
340
|
-
version=eks.KubernetesVersion.
|
|
340
|
+
version=eks.KubernetesVersion.V1_33,
|
|
341
341
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
342
342
|
)
|
|
343
343
|
```
|
|
@@ -359,7 +359,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
|
|
|
359
359
|
|
|
360
360
|
```python
|
|
361
361
|
eks.Cluster(self, "HelloEKS",
|
|
362
|
-
version=eks.KubernetesVersion.
|
|
362
|
+
version=eks.KubernetesVersion.V1_33,
|
|
363
363
|
alb_controller=eks.AlbControllerOptions(
|
|
364
364
|
version=eks.AlbControllerVersion.V2_8_2
|
|
365
365
|
)
|
|
@@ -401,7 +401,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
|
|
|
401
401
|
|
|
402
402
|
|
|
403
403
|
eks.Cluster(self, "HelloEKS",
|
|
404
|
-
version=eks.KubernetesVersion.
|
|
404
|
+
version=eks.KubernetesVersion.V1_33,
|
|
405
405
|
vpc=vpc,
|
|
406
406
|
vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
|
|
407
407
|
)
|
|
@@ -445,13 +445,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
|
|
|
445
445
|
`kubectlLayer` is the only required property in `kubectlProviderOptions`.
|
|
446
446
|
|
|
447
447
|
```python
|
|
448
|
-
from aws_cdk.
|
|
448
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
449
449
|
|
|
450
450
|
|
|
451
451
|
eks.Cluster(self, "hello-eks",
|
|
452
|
-
version=eks.KubernetesVersion.
|
|
452
|
+
version=eks.KubernetesVersion.V1_33,
|
|
453
453
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
454
|
-
kubectl_layer=
|
|
454
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl")
|
|
455
455
|
)
|
|
456
456
|
)
|
|
457
457
|
```
|
|
@@ -461,9 +461,6 @@ eks.Cluster(self, "hello-eks",
|
|
|
461
461
|
If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
|
|
462
462
|
|
|
463
463
|
```python
|
|
464
|
-
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
465
|
-
|
|
466
|
-
|
|
467
464
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
468
465
|
# get the serivceToken from the custom resource provider
|
|
469
466
|
function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
|
|
@@ -483,13 +480,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
|
|
|
483
480
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
484
481
|
|
|
485
482
|
```python
|
|
486
|
-
from aws_cdk.
|
|
483
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
487
484
|
|
|
488
485
|
|
|
489
486
|
cluster = eks.Cluster(self, "hello-eks",
|
|
490
|
-
version=eks.KubernetesVersion.
|
|
487
|
+
version=eks.KubernetesVersion.V1_33,
|
|
491
488
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
492
|
-
kubectl_layer=
|
|
489
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl"),
|
|
493
490
|
environment={
|
|
494
491
|
"http_proxy": "http://proxy.myproxy.com"
|
|
495
492
|
}
|
|
@@ -510,13 +507,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
510
507
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
511
508
|
|
|
512
509
|
```python
|
|
513
|
-
from aws_cdk.
|
|
510
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
514
511
|
|
|
515
512
|
|
|
516
513
|
cluster = eks.Cluster(self, "hello-eks",
|
|
517
|
-
version=eks.KubernetesVersion.
|
|
514
|
+
version=eks.KubernetesVersion.V1_33,
|
|
518
515
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
519
|
-
kubectl_layer=
|
|
516
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl")
|
|
520
517
|
)
|
|
521
518
|
)
|
|
522
519
|
```
|
|
@@ -526,15 +523,15 @@ cluster = eks.Cluster(self, "hello-eks",
|
|
|
526
523
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
|
|
527
524
|
|
|
528
525
|
```python
|
|
529
|
-
from aws_cdk.
|
|
526
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
530
527
|
|
|
531
528
|
|
|
532
529
|
eks.Cluster(self, "MyCluster",
|
|
533
530
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
534
|
-
kubectl_layer=
|
|
531
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl"),
|
|
535
532
|
memory=Size.gibibytes(4)
|
|
536
533
|
),
|
|
537
|
-
version=eks.KubernetesVersion.
|
|
534
|
+
version=eks.KubernetesVersion.V1_33
|
|
538
535
|
)
|
|
539
536
|
```
|
|
540
537
|
|
|
@@ -567,7 +564,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
|
|
|
567
564
|
# role: iam.Role
|
|
568
565
|
|
|
569
566
|
eks.Cluster(self, "HelloEKS",
|
|
570
|
-
version=eks.KubernetesVersion.
|
|
567
|
+
version=eks.KubernetesVersion.V1_33,
|
|
571
568
|
masters_role=role
|
|
572
569
|
)
|
|
573
570
|
```
|
|
@@ -588,7 +585,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
588
585
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
589
586
|
cluster = eks.Cluster(self, "MyCluster",
|
|
590
587
|
secrets_encryption_key=secrets_key,
|
|
591
|
-
version=eks.KubernetesVersion.
|
|
588
|
+
version=eks.KubernetesVersion.V1_33
|
|
592
589
|
)
|
|
593
590
|
```
|
|
594
591
|
|
|
@@ -598,7 +595,7 @@ You can also use a similar configuration for running a cluster built using the F
|
|
|
598
595
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
599
596
|
cluster = eks.FargateCluster(self, "MyFargateCluster",
|
|
600
597
|
secrets_encryption_key=secrets_key,
|
|
601
|
-
version=eks.KubernetesVersion.
|
|
598
|
+
version=eks.KubernetesVersion.V1_33
|
|
602
599
|
)
|
|
603
600
|
```
|
|
604
601
|
|
|
@@ -641,7 +638,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
|
|
|
641
638
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
642
639
|
|
|
643
640
|
```python
|
|
644
|
-
from aws_cdk.
|
|
641
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
645
642
|
# vpc: ec2.Vpc
|
|
646
643
|
|
|
647
644
|
|
|
@@ -656,9 +653,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
|
|
|
656
653
|
cluster = eks.Cluster(self, "Cluster",
|
|
657
654
|
vpc=vpc,
|
|
658
655
|
masters_role=cluster_admin_role,
|
|
659
|
-
version=eks.KubernetesVersion.
|
|
656
|
+
version=eks.KubernetesVersion.V1_33,
|
|
660
657
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
661
|
-
kubectl_layer=
|
|
658
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl"),
|
|
662
659
|
memory=Size.gibibytes(4)
|
|
663
660
|
)
|
|
664
661
|
)
|
|
@@ -843,7 +840,7 @@ when a cluster is defined:
|
|
|
843
840
|
|
|
844
841
|
```python
|
|
845
842
|
eks.Cluster(self, "MyCluster",
|
|
846
|
-
version=eks.KubernetesVersion.
|
|
843
|
+
version=eks.KubernetesVersion.V1_33,
|
|
847
844
|
prune=False
|
|
848
845
|
)
|
|
849
846
|
```
|
|
@@ -1162,7 +1159,7 @@ property. For example:
|
|
|
1162
1159
|
```python
|
|
1163
1160
|
cluster = eks.Cluster(self, "Cluster",
|
|
1164
1161
|
# ...
|
|
1165
|
-
version=eks.KubernetesVersion.
|
|
1162
|
+
version=eks.KubernetesVersion.V1_33,
|
|
1166
1163
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
1167
1164
|
]
|
|
1168
1165
|
)
|
|
@@ -2202,7 +2199,7 @@ class AlbControllerOptions:
|
|
|
2202
2199
|
Example::
|
|
2203
2200
|
|
|
2204
2201
|
eks.Cluster(self, "HelloEKS",
|
|
2205
|
-
version=eks.KubernetesVersion.
|
|
2202
|
+
version=eks.KubernetesVersion.V1_33,
|
|
2206
2203
|
alb_controller=eks.AlbControllerOptions(
|
|
2207
2204
|
version=eks.AlbControllerVersion.V2_8_2
|
|
2208
2205
|
)
|
|
@@ -2414,7 +2411,7 @@ class AlbControllerVersion(
|
|
|
2414
2411
|
Example::
|
|
2415
2412
|
|
|
2416
2413
|
eks.Cluster(self, "HelloEKS",
|
|
2417
|
-
version=eks.KubernetesVersion.
|
|
2414
|
+
version=eks.KubernetesVersion.V1_33,
|
|
2418
2415
|
alb_controller=eks.AlbControllerOptions(
|
|
2419
2416
|
version=eks.AlbControllerVersion.V2_8_2
|
|
2420
2417
|
)
|
|
@@ -3864,9 +3861,6 @@ class ClusterAttributes:
|
|
|
3864
3861
|
|
|
3865
3862
|
Example::
|
|
3866
3863
|
|
|
3867
|
-
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
3868
|
-
|
|
3869
|
-
|
|
3870
3864
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
3871
3865
|
# get the serivceToken from the custom resource provider
|
|
3872
3866
|
function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
|
|
@@ -4524,7 +4518,7 @@ class ClusterLoggingTypes(enum.Enum):
|
|
|
4524
4518
|
|
|
4525
4519
|
cluster = eks.Cluster(self, "Cluster",
|
|
4526
4520
|
# ...
|
|
4527
|
-
version=eks.KubernetesVersion.
|
|
4521
|
+
version=eks.KubernetesVersion.V1_33,
|
|
4528
4522
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
4529
4523
|
]
|
|
4530
4524
|
)
|
|
@@ -4646,7 +4640,7 @@ class ClusterProps(ClusterCommonOptions):
|
|
|
4646
4640
|
Example::
|
|
4647
4641
|
|
|
4648
4642
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
4649
|
-
version=eks.KubernetesVersion.
|
|
4643
|
+
version=eks.KubernetesVersion.V1_33,
|
|
4650
4644
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
4651
4645
|
compute=eks.ComputeConfig(
|
|
4652
4646
|
node_pools=["system", "general-purpose"]
|
|
@@ -5067,7 +5061,7 @@ class ComputeConfig:
|
|
|
5067
5061
|
Example::
|
|
5068
5062
|
|
|
5069
5063
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
5070
|
-
version=eks.KubernetesVersion.
|
|
5064
|
+
version=eks.KubernetesVersion.V1_33,
|
|
5071
5065
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
5072
5066
|
compute=eks.ComputeConfig(
|
|
5073
5067
|
node_pools=["system", "general-purpose"]
|
|
@@ -5171,7 +5165,7 @@ class DefaultCapacityType(enum.Enum):
|
|
|
5171
5165
|
Example::
|
|
5172
5166
|
|
|
5173
5167
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
5174
|
-
version=eks.KubernetesVersion.
|
|
5168
|
+
version=eks.KubernetesVersion.V1_33,
|
|
5175
5169
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
5176
5170
|
default_capacity=0
|
|
5177
5171
|
)
|
|
@@ -5371,7 +5365,7 @@ class EndpointAccess(
|
|
|
5371
5365
|
Example::
|
|
5372
5366
|
|
|
5373
5367
|
cluster = eks.Cluster(self, "hello-eks",
|
|
5374
|
-
version=eks.KubernetesVersion.
|
|
5368
|
+
version=eks.KubernetesVersion.V1_33,
|
|
5375
5369
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
5376
5370
|
)
|
|
5377
5371
|
'''
|
|
@@ -5509,7 +5503,7 @@ class FargateClusterProps(ClusterCommonOptions):
|
|
|
5509
5503
|
Example::
|
|
5510
5504
|
|
|
5511
5505
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
5512
|
-
version=eks.KubernetesVersion.
|
|
5506
|
+
version=eks.KubernetesVersion.V1_33
|
|
5513
5507
|
)
|
|
5514
5508
|
'''
|
|
5515
5509
|
if isinstance(alb_controller, dict):
|
|
@@ -7970,9 +7964,6 @@ class KubectlProvider(
|
|
|
7970
7964
|
|
|
7971
7965
|
Example::
|
|
7972
7966
|
|
|
7973
|
-
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
7974
|
-
|
|
7975
|
-
|
|
7976
7967
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
7977
7968
|
# get the serivceToken from the custom resource provider
|
|
7978
7969
|
function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
|
|
@@ -8120,9 +8111,6 @@ class KubectlProviderAttributes:
|
|
|
8120
8111
|
|
|
8121
8112
|
Example::
|
|
8122
8113
|
|
|
8123
|
-
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
8124
|
-
|
|
8125
|
-
|
|
8126
8114
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
8127
8115
|
# get the serivceToken from the custom resource provider
|
|
8128
8116
|
function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
|
|
@@ -8220,13 +8208,13 @@ class KubectlProviderOptions:
|
|
|
8220
8208
|
|
|
8221
8209
|
Example::
|
|
8222
8210
|
|
|
8223
|
-
from aws_cdk.
|
|
8211
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
8224
8212
|
|
|
8225
8213
|
|
|
8226
8214
|
cluster = eks.Cluster(self, "hello-eks",
|
|
8227
|
-
version=eks.KubernetesVersion.
|
|
8215
|
+
version=eks.KubernetesVersion.V1_33,
|
|
8228
8216
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
8229
|
-
kubectl_layer=
|
|
8217
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl"),
|
|
8230
8218
|
environment={
|
|
8231
8219
|
"http_proxy": "http://proxy.myproxy.com"
|
|
8232
8220
|
}
|
|
@@ -9502,7 +9490,7 @@ class KubernetesVersion(
|
|
|
9502
9490
|
Example::
|
|
9503
9491
|
|
|
9504
9492
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
9505
|
-
version=eks.KubernetesVersion.
|
|
9493
|
+
version=eks.KubernetesVersion.V1_33,
|
|
9506
9494
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
9507
9495
|
compute=eks.ComputeConfig(
|
|
9508
9496
|
node_pools=["system", "general-purpose"]
|
|
@@ -9628,6 +9616,19 @@ class KubernetesVersion(
|
|
|
9628
9616
|
'''
|
|
9629
9617
|
return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_32"))
|
|
9630
9618
|
|
|
9619
|
+
@jsii.python.classproperty
|
|
9620
|
+
@jsii.member(jsii_name="V1_33")
|
|
9621
|
+
def V1_33(cls) -> "KubernetesVersion":
|
|
9622
|
+
'''(experimental) Kubernetes version 1.33.
|
|
9623
|
+
|
|
9624
|
+
When creating a ``Cluster`` with this version, you need to also specify the
|
|
9625
|
+
``kubectlLayer`` property with a ``KubectlV33Layer`` from
|
|
9626
|
+
``@aws-cdk/lambda-layer-kubectl-v33``.
|
|
9627
|
+
|
|
9628
|
+
:stability: experimental
|
|
9629
|
+
'''
|
|
9630
|
+
return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_33"))
|
|
9631
|
+
|
|
9631
9632
|
@builtins.property
|
|
9632
9633
|
@jsii.member(jsii_name="version")
|
|
9633
9634
|
def version(self) -> builtins.str:
|
|
@@ -10184,7 +10185,7 @@ class NodegroupOptions:
|
|
|
10184
10185
|
Example::
|
|
10185
10186
|
|
|
10186
10187
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
10187
|
-
version=eks.KubernetesVersion.
|
|
10188
|
+
version=eks.KubernetesVersion.V1_33,
|
|
10188
10189
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
10189
10190
|
default_capacity=0
|
|
10190
10191
|
)
|
|
@@ -11975,6 +11976,17 @@ class ServiceLoadBalancerAddressOptions:
|
|
|
11975
11976
|
class TaintEffect(enum.Enum):
|
|
11976
11977
|
'''(experimental) Effect types of kubernetes node taint.
|
|
11977
11978
|
|
|
11979
|
+
Note: These values are specifically for AWS EKS NodeGroups and use the AWS API format.
|
|
11980
|
+
When using AWS CLI or API, taint effects must be NO_SCHEDULE, PREFER_NO_SCHEDULE, or NO_EXECUTE.
|
|
11981
|
+
When using Kubernetes directly or kubectl, taint effects must be NoSchedule, PreferNoSchedule, or NoExecute.
|
|
11982
|
+
|
|
11983
|
+
For Kubernetes manifests (like Karpenter NodePools), use string literals with PascalCase format:
|
|
11984
|
+
|
|
11985
|
+
- 'NoSchedule' instead of TaintEffect.NO_SCHEDULE
|
|
11986
|
+
- 'PreferNoSchedule' instead of TaintEffect.PREFER_NO_SCHEDULE
|
|
11987
|
+
- 'NoExecute' instead of TaintEffect.NO_EXECUTE
|
|
11988
|
+
|
|
11989
|
+
:see: https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html
|
|
11978
11990
|
:stability: experimental
|
|
11979
11991
|
'''
|
|
11980
11992
|
|
|
@@ -12485,7 +12497,7 @@ class Cluster(
|
|
|
12485
12497
|
Example::
|
|
12486
12498
|
|
|
12487
12499
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
12488
|
-
version=eks.KubernetesVersion.
|
|
12500
|
+
version=eks.KubernetesVersion.V1_33,
|
|
12489
12501
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
12490
12502
|
compute=eks.ComputeConfig(
|
|
12491
12503
|
node_pools=["system", "general-purpose"]
|
|
@@ -13411,7 +13423,7 @@ class FargateCluster(
|
|
|
13411
13423
|
Example::
|
|
13412
13424
|
|
|
13413
13425
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
13414
|
-
version=eks.KubernetesVersion.
|
|
13426
|
+
version=eks.KubernetesVersion.V1_33
|
|
13415
13427
|
)
|
|
13416
13428
|
'''
|
|
13417
13429
|
|
|
@@ -33,9 +33,9 @@ import constructs._jsii
|
|
|
33
33
|
|
|
34
34
|
__jsii_assembly__ = jsii.JSIIAssembly.load(
|
|
35
35
|
"@aws-cdk/aws-eks-v2-alpha",
|
|
36
|
-
"2.
|
|
36
|
+
"2.214.1-alpha.0",
|
|
37
37
|
__name__[0:-6],
|
|
38
|
-
"aws-eks-v2-alpha@2.
|
|
38
|
+
"aws-eks-v2-alpha@2.214.1-alpha.0.jsii.tgz",
|
|
39
39
|
)
|
|
40
40
|
|
|
41
41
|
__all__ = [
|
|
Binary file
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: aws-cdk.aws-eks-v2-alpha
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.214.1a0
|
|
4
4
|
Summary: The CDK Construct Library for AWS::EKS
|
|
5
5
|
Home-page: https://github.com/aws/aws-cdk
|
|
6
6
|
Author: Amazon Web Services
|
|
@@ -22,7 +22,7 @@ Requires-Python: ~=3.9
|
|
|
22
22
|
Description-Content-Type: text/markdown
|
|
23
23
|
License-File: LICENSE
|
|
24
24
|
License-File: NOTICE
|
|
25
|
-
Requires-Dist: aws-cdk-lib <3.0.0,>=2.
|
|
25
|
+
Requires-Dist: aws-cdk-lib <3.0.0,>=2.214.1
|
|
26
26
|
Requires-Dist: constructs <11.0.0,>=10.0.0
|
|
27
27
|
Requires-Dist: jsii <2.0.0,>=1.113.0
|
|
28
28
|
Requires-Dist: publication >=0.0.3
|
|
@@ -62,7 +62,7 @@ Here is the minimal example of defining an AWS EKS cluster
|
|
|
62
62
|
|
|
63
63
|
```python
|
|
64
64
|
cluster = eks.Cluster(self, "hello-eks",
|
|
65
|
-
version=eks.KubernetesVersion.
|
|
65
|
+
version=eks.KubernetesVersion.V1_33
|
|
66
66
|
)
|
|
67
67
|
```
|
|
68
68
|
|
|
@@ -102,7 +102,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
|
|
|
102
102
|
|
|
103
103
|
```python
|
|
104
104
|
eks.Cluster(self, "HelloEKS",
|
|
105
|
-
version=eks.KubernetesVersion.
|
|
105
|
+
version=eks.KubernetesVersion.V1_33
|
|
106
106
|
)
|
|
107
107
|
```
|
|
108
108
|
|
|
@@ -110,7 +110,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
|
|
|
110
110
|
|
|
111
111
|
```python
|
|
112
112
|
eks.FargateCluster(self, "HelloEKS",
|
|
113
|
-
version=eks.KubernetesVersion.
|
|
113
|
+
version=eks.KubernetesVersion.V1_33
|
|
114
114
|
)
|
|
115
115
|
```
|
|
116
116
|
|
|
@@ -119,22 +119,22 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
|
|
|
119
119
|
property is used.**
|
|
120
120
|
|
|
121
121
|
```python
|
|
122
|
-
from aws_cdk.
|
|
122
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
123
123
|
|
|
124
124
|
|
|
125
125
|
eks.Cluster(self, "hello-eks",
|
|
126
|
-
version=eks.KubernetesVersion.
|
|
126
|
+
version=eks.KubernetesVersion.V1_33,
|
|
127
127
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
128
|
-
kubectl_layer=
|
|
128
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl")
|
|
129
129
|
)
|
|
130
130
|
)
|
|
131
131
|
```
|
|
132
132
|
|
|
133
|
-
|
|
133
|
+
### EKS Auto Mode
|
|
134
134
|
|
|
135
135
|
[Amazon EKS Auto Mode](https://aws.amazon.com/eks/auto-mode/) extends AWS management of Kubernetes clusters beyond the cluster itself, allowing AWS to set up and manage the infrastructure that enables the smooth operation of your workloads.
|
|
136
136
|
|
|
137
|
-
|
|
137
|
+
#### Using Auto Mode
|
|
138
138
|
|
|
139
139
|
While `aws-eks` uses `DefaultCapacityType.NODEGROUP` by default, `aws-eks-v2` uses `DefaultCapacityType.AUTOMODE` as the default capacity type.
|
|
140
140
|
|
|
@@ -143,7 +143,7 @@ Auto Mode is enabled by default when creating a new cluster without specifying a
|
|
|
143
143
|
```python
|
|
144
144
|
# Create EKS cluster with Auto Mode implicitly enabled
|
|
145
145
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
146
|
-
version=eks.KubernetesVersion.
|
|
146
|
+
version=eks.KubernetesVersion.V1_33
|
|
147
147
|
)
|
|
148
148
|
```
|
|
149
149
|
|
|
@@ -152,12 +152,12 @@ You can also explicitly enable Auto Mode using `defaultCapacityType`:
|
|
|
152
152
|
```python
|
|
153
153
|
# Create EKS cluster with Auto Mode explicitly enabled
|
|
154
154
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
155
|
-
version=eks.KubernetesVersion.
|
|
155
|
+
version=eks.KubernetesVersion.V1_33,
|
|
156
156
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE
|
|
157
157
|
)
|
|
158
158
|
```
|
|
159
159
|
|
|
160
|
-
|
|
160
|
+
#### Node Pools
|
|
161
161
|
|
|
162
162
|
When Auto Mode is enabled, the cluster comes with two default node pools:
|
|
163
163
|
|
|
@@ -168,7 +168,7 @@ These node pools are managed automatically by EKS. You can configure which node
|
|
|
168
168
|
|
|
169
169
|
```python
|
|
170
170
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
171
|
-
version=eks.KubernetesVersion.
|
|
171
|
+
version=eks.KubernetesVersion.V1_33,
|
|
172
172
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
173
173
|
compute=eks.ComputeConfig(
|
|
174
174
|
node_pools=["system", "general-purpose"]
|
|
@@ -178,13 +178,13 @@ cluster = eks.Cluster(self, "EksAutoCluster",
|
|
|
178
178
|
|
|
179
179
|
For more information, see [Create a Node Pool for EKS Auto Mode](https://docs.aws.amazon.com/eks/latest/userguide/create-node-pool.html).
|
|
180
180
|
|
|
181
|
-
|
|
181
|
+
#### Disabling Default Node Pools
|
|
182
182
|
|
|
183
183
|
You can disable the default node pools entirely by setting an empty array for `nodePools`. This is useful when you want to use Auto Mode features but manage your compute resources separately:
|
|
184
184
|
|
|
185
185
|
```python
|
|
186
186
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
187
|
-
version=eks.KubernetesVersion.
|
|
187
|
+
version=eks.KubernetesVersion.V1_33,
|
|
188
188
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
189
189
|
compute=eks.ComputeConfig(
|
|
190
190
|
node_pools=[]
|
|
@@ -201,7 +201,7 @@ If you prefer to manage your own node groups instead of using Auto Mode, you can
|
|
|
201
201
|
```python
|
|
202
202
|
# Create EKS cluster with traditional managed node group
|
|
203
203
|
cluster = eks.Cluster(self, "EksCluster",
|
|
204
|
-
version=eks.KubernetesVersion.
|
|
204
|
+
version=eks.KubernetesVersion.V1_33,
|
|
205
205
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
206
206
|
default_capacity=3, # Number of instances
|
|
207
207
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.LARGE)
|
|
@@ -212,7 +212,7 @@ You can also create a cluster with no initial capacity and add node groups later
|
|
|
212
212
|
|
|
213
213
|
```python
|
|
214
214
|
cluster = eks.Cluster(self, "EksCluster",
|
|
215
|
-
version=eks.KubernetesVersion.
|
|
215
|
+
version=eks.KubernetesVersion.V1_33,
|
|
216
216
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
217
217
|
default_capacity=0
|
|
218
218
|
)
|
|
@@ -233,7 +233,7 @@ You can combine Auto Mode with traditional node groups for specific workload req
|
|
|
233
233
|
|
|
234
234
|
```python
|
|
235
235
|
cluster = eks.Cluster(self, "Cluster",
|
|
236
|
-
version=eks.KubernetesVersion.
|
|
236
|
+
version=eks.KubernetesVersion.V1_33,
|
|
237
237
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
238
238
|
compute=eks.ComputeConfig(
|
|
239
239
|
node_pools=["system", "general-purpose"]
|
|
@@ -272,7 +272,7 @@ By default, when using `DefaultCapacityType.NODEGROUP`, this library will alloca
|
|
|
272
272
|
|
|
273
273
|
```python
|
|
274
274
|
eks.Cluster(self, "HelloEKS",
|
|
275
|
-
version=eks.KubernetesVersion.
|
|
275
|
+
version=eks.KubernetesVersion.V1_33,
|
|
276
276
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP
|
|
277
277
|
)
|
|
278
278
|
```
|
|
@@ -281,7 +281,7 @@ At cluster instantiation time, you can customize the number of instances and the
|
|
|
281
281
|
|
|
282
282
|
```python
|
|
283
283
|
eks.Cluster(self, "HelloEKS",
|
|
284
|
-
version=eks.KubernetesVersion.
|
|
284
|
+
version=eks.KubernetesVersion.V1_33,
|
|
285
285
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
286
286
|
default_capacity=5,
|
|
287
287
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
|
|
@@ -294,7 +294,7 @@ Additional customizations are available post instantiation. To apply them, set t
|
|
|
294
294
|
|
|
295
295
|
```python
|
|
296
296
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
297
|
-
version=eks.KubernetesVersion.
|
|
297
|
+
version=eks.KubernetesVersion.V1_33,
|
|
298
298
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
299
299
|
default_capacity=0
|
|
300
300
|
)
|
|
@@ -347,7 +347,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
|
|
|
347
347
|
|
|
348
348
|
```python
|
|
349
349
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
350
|
-
version=eks.KubernetesVersion.
|
|
350
|
+
version=eks.KubernetesVersion.V1_33
|
|
351
351
|
)
|
|
352
352
|
```
|
|
353
353
|
|
|
@@ -366,7 +366,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
|
|
|
366
366
|
|
|
367
367
|
```python
|
|
368
368
|
cluster = eks.Cluster(self, "hello-eks",
|
|
369
|
-
version=eks.KubernetesVersion.
|
|
369
|
+
version=eks.KubernetesVersion.V1_33,
|
|
370
370
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
371
371
|
)
|
|
372
372
|
```
|
|
@@ -388,7 +388,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
|
|
|
388
388
|
|
|
389
389
|
```python
|
|
390
390
|
eks.Cluster(self, "HelloEKS",
|
|
391
|
-
version=eks.KubernetesVersion.
|
|
391
|
+
version=eks.KubernetesVersion.V1_33,
|
|
392
392
|
alb_controller=eks.AlbControllerOptions(
|
|
393
393
|
version=eks.AlbControllerVersion.V2_8_2
|
|
394
394
|
)
|
|
@@ -430,7 +430,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
|
|
|
430
430
|
|
|
431
431
|
|
|
432
432
|
eks.Cluster(self, "HelloEKS",
|
|
433
|
-
version=eks.KubernetesVersion.
|
|
433
|
+
version=eks.KubernetesVersion.V1_33,
|
|
434
434
|
vpc=vpc,
|
|
435
435
|
vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
|
|
436
436
|
)
|
|
@@ -474,13 +474,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
|
|
|
474
474
|
`kubectlLayer` is the only required property in `kubectlProviderOptions`.
|
|
475
475
|
|
|
476
476
|
```python
|
|
477
|
-
from aws_cdk.
|
|
477
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
478
478
|
|
|
479
479
|
|
|
480
480
|
eks.Cluster(self, "hello-eks",
|
|
481
|
-
version=eks.KubernetesVersion.
|
|
481
|
+
version=eks.KubernetesVersion.V1_33,
|
|
482
482
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
483
|
-
kubectl_layer=
|
|
483
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl")
|
|
484
484
|
)
|
|
485
485
|
)
|
|
486
486
|
```
|
|
@@ -490,9 +490,6 @@ eks.Cluster(self, "hello-eks",
|
|
|
490
490
|
If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
|
|
491
491
|
|
|
492
492
|
```python
|
|
493
|
-
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
494
|
-
|
|
495
|
-
|
|
496
493
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
497
494
|
# get the serivceToken from the custom resource provider
|
|
498
495
|
function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
|
|
@@ -512,13 +509,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
|
|
|
512
509
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
513
510
|
|
|
514
511
|
```python
|
|
515
|
-
from aws_cdk.
|
|
512
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
516
513
|
|
|
517
514
|
|
|
518
515
|
cluster = eks.Cluster(self, "hello-eks",
|
|
519
|
-
version=eks.KubernetesVersion.
|
|
516
|
+
version=eks.KubernetesVersion.V1_33,
|
|
520
517
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
521
|
-
kubectl_layer=
|
|
518
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl"),
|
|
522
519
|
environment={
|
|
523
520
|
"http_proxy": "http://proxy.myproxy.com"
|
|
524
521
|
}
|
|
@@ -539,13 +536,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
539
536
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
540
537
|
|
|
541
538
|
```python
|
|
542
|
-
from aws_cdk.
|
|
539
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
543
540
|
|
|
544
541
|
|
|
545
542
|
cluster = eks.Cluster(self, "hello-eks",
|
|
546
|
-
version=eks.KubernetesVersion.
|
|
543
|
+
version=eks.KubernetesVersion.V1_33,
|
|
547
544
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
548
|
-
kubectl_layer=
|
|
545
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl")
|
|
549
546
|
)
|
|
550
547
|
)
|
|
551
548
|
```
|
|
@@ -555,15 +552,15 @@ cluster = eks.Cluster(self, "hello-eks",
|
|
|
555
552
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
|
|
556
553
|
|
|
557
554
|
```python
|
|
558
|
-
from aws_cdk.
|
|
555
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
559
556
|
|
|
560
557
|
|
|
561
558
|
eks.Cluster(self, "MyCluster",
|
|
562
559
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
563
|
-
kubectl_layer=
|
|
560
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl"),
|
|
564
561
|
memory=Size.gibibytes(4)
|
|
565
562
|
),
|
|
566
|
-
version=eks.KubernetesVersion.
|
|
563
|
+
version=eks.KubernetesVersion.V1_33
|
|
567
564
|
)
|
|
568
565
|
```
|
|
569
566
|
|
|
@@ -596,7 +593,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
|
|
|
596
593
|
# role: iam.Role
|
|
597
594
|
|
|
598
595
|
eks.Cluster(self, "HelloEKS",
|
|
599
|
-
version=eks.KubernetesVersion.
|
|
596
|
+
version=eks.KubernetesVersion.V1_33,
|
|
600
597
|
masters_role=role
|
|
601
598
|
)
|
|
602
599
|
```
|
|
@@ -617,7 +614,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
617
614
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
618
615
|
cluster = eks.Cluster(self, "MyCluster",
|
|
619
616
|
secrets_encryption_key=secrets_key,
|
|
620
|
-
version=eks.KubernetesVersion.
|
|
617
|
+
version=eks.KubernetesVersion.V1_33
|
|
621
618
|
)
|
|
622
619
|
```
|
|
623
620
|
|
|
@@ -627,7 +624,7 @@ You can also use a similar configuration for running a cluster built using the F
|
|
|
627
624
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
628
625
|
cluster = eks.FargateCluster(self, "MyFargateCluster",
|
|
629
626
|
secrets_encryption_key=secrets_key,
|
|
630
|
-
version=eks.KubernetesVersion.
|
|
627
|
+
version=eks.KubernetesVersion.V1_33
|
|
631
628
|
)
|
|
632
629
|
```
|
|
633
630
|
|
|
@@ -670,7 +667,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
|
|
|
670
667
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
671
668
|
|
|
672
669
|
```python
|
|
673
|
-
from aws_cdk.
|
|
670
|
+
from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
|
|
674
671
|
# vpc: ec2.Vpc
|
|
675
672
|
|
|
676
673
|
|
|
@@ -685,9 +682,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
|
|
|
685
682
|
cluster = eks.Cluster(self, "Cluster",
|
|
686
683
|
vpc=vpc,
|
|
687
684
|
masters_role=cluster_admin_role,
|
|
688
|
-
version=eks.KubernetesVersion.
|
|
685
|
+
version=eks.KubernetesVersion.V1_33,
|
|
689
686
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
690
|
-
kubectl_layer=
|
|
687
|
+
kubectl_layer=KubectlV33Layer(self, "kubectl"),
|
|
691
688
|
memory=Size.gibibytes(4)
|
|
692
689
|
)
|
|
693
690
|
)
|
|
@@ -872,7 +869,7 @@ when a cluster is defined:
|
|
|
872
869
|
|
|
873
870
|
```python
|
|
874
871
|
eks.Cluster(self, "MyCluster",
|
|
875
|
-
version=eks.KubernetesVersion.
|
|
872
|
+
version=eks.KubernetesVersion.V1_33,
|
|
876
873
|
prune=False
|
|
877
874
|
)
|
|
878
875
|
```
|
|
@@ -1191,7 +1188,7 @@ property. For example:
|
|
|
1191
1188
|
```python
|
|
1192
1189
|
cluster = eks.Cluster(self, "Cluster",
|
|
1193
1190
|
# ...
|
|
1194
|
-
version=eks.KubernetesVersion.
|
|
1191
|
+
version=eks.KubernetesVersion.V1_33,
|
|
1195
1192
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
1196
1193
|
]
|
|
1197
1194
|
)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
aws_cdk/aws_eks_v2_alpha/__init__.py,sha256=lo8DHWdFVc1Ghp-TytkqHaw3PB6ZxAAkGxBORvx8y8Y,703369
|
|
2
|
+
aws_cdk/aws_eks_v2_alpha/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
3
|
+
aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py,sha256=8T4IrFA0BE0Rd4dd-LrgIuc-xKFi1cPdGa0YsA6i_8g,1485
|
|
4
|
+
aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.214.1-alpha.0.jsii.tgz,sha256=CswteNzStxf8LxKpieQcTI7Gs99rnOg8bIhnljPU_UY,404762
|
|
5
|
+
aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info/LICENSE,sha256=y47tc38H0C4DpGljYUZDl8XxidQjNxxGLq-K4jwv6Xc,11391
|
|
6
|
+
aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info/METADATA,sha256=I_5vlQqkS23fU9ensC8XT2-lpYiyMc4gvu8c7HRX164,42796
|
|
7
|
+
aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info/NOTICE,sha256=6Jdq-MQvHIyOFx_9SdfwJrEmcxlScjONPAJru73PESY,919
|
|
8
|
+
aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
9
|
+
aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info/top_level.txt,sha256=1TALAKbuUGsMSrfKWEf268lySCmcqSEO6cDYe_XlLHM,8
|
|
10
|
+
aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info/RECORD,,
|
|
Binary file
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
aws_cdk/aws_eks_v2_alpha/__init__.py,sha256=Gq0B6-NuECKu9MLinHdRKyZlljocUoF-oycj5YvIyNA,702556
|
|
2
|
-
aws_cdk/aws_eks_v2_alpha/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
3
|
-
aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py,sha256=RjII-mpt5RsmaUZri-PRGTW3cD5l1z5yVyF26dE-gKw,1485
|
|
4
|
-
aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.213.0-alpha.0.jsii.tgz,sha256=W95wbsTEYFfZgZ9FfYk7yh5YSHjERJm1lxSj1qo6MIU,403860
|
|
5
|
-
aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info/LICENSE,sha256=y47tc38H0C4DpGljYUZDl8XxidQjNxxGLq-K4jwv6Xc,11391
|
|
6
|
-
aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info/METADATA,sha256=v51YZ96r8QXvGvDX4WyzpbOeufBSvEoVsUkWjbALB0s,42855
|
|
7
|
-
aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info/NOTICE,sha256=6Jdq-MQvHIyOFx_9SdfwJrEmcxlScjONPAJru73PESY,919
|
|
8
|
-
aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
9
|
-
aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info/top_level.txt,sha256=1TALAKbuUGsMSrfKWEf268lySCmcqSEO6cDYe_XlLHM,8
|
|
10
|
-
aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info/RECORD,,
|
|
File without changes
|
{aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info}/NOTICE
RENAMED
|
File without changes
|
{aws_cdk_aws_eks_v2_alpha-2.213.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.214.1a0.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|