aws-cdk.aws-eks-v2-alpha 2.212.0a0__tar.gz → 2.214.0a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk.aws-eks-v2-alpha might be problematic. Click here for more details.

Files changed (18) hide show
  1. {aws_cdk_aws_eks_v2_alpha-2.212.0a0/src/aws_cdk.aws_eks_v2_alpha.egg-info → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/PKG-INFO +46 -49
  2. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/README.md +44 -47
  3. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/setup.py +3 -3
  4. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/src/aws_cdk/aws_eks_v2_alpha/__init__.py +83 -71
  5. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/src/aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py +2 -2
  6. aws_cdk_aws_eks_v2_alpha-2.214.0a0/src/aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.214.0-alpha.0.jsii.tgz +0 -0
  7. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0/src/aws_cdk.aws_eks_v2_alpha.egg-info}/PKG-INFO +46 -49
  8. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/SOURCES.txt +1 -1
  9. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/requires.txt +1 -1
  10. aws_cdk_aws_eks_v2_alpha-2.212.0a0/src/aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.212.0-alpha.0.jsii.tgz +0 -0
  11. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/LICENSE +0 -0
  12. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/MANIFEST.in +0 -0
  13. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/NOTICE +0 -0
  14. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/pyproject.toml +0 -0
  15. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/setup.cfg +0 -0
  16. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/src/aws_cdk/aws_eks_v2_alpha/py.typed +0 -0
  17. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/dependency_links.txt +0 -0
  18. {aws_cdk_aws_eks_v2_alpha-2.212.0a0 → aws_cdk_aws_eks_v2_alpha-2.214.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aws-cdk.aws-eks-v2-alpha
3
- Version: 2.212.0a0
3
+ Version: 2.214.0a0
4
4
  Summary: The CDK Construct Library for AWS::EKS
5
5
  Home-page: https://github.com/aws/aws-cdk
6
6
  Author: Amazon Web Services
@@ -22,7 +22,7 @@ Requires-Python: ~=3.9
22
22
  Description-Content-Type: text/markdown
23
23
  License-File: LICENSE
24
24
  License-File: NOTICE
25
- Requires-Dist: aws-cdk-lib<3.0.0,>=2.212.0
25
+ Requires-Dist: aws-cdk-lib<3.0.0,>=2.214.0
26
26
  Requires-Dist: constructs<11.0.0,>=10.0.0
27
27
  Requires-Dist: jsii<2.0.0,>=1.113.0
28
28
  Requires-Dist: publication>=0.0.3
@@ -62,7 +62,7 @@ Here is the minimal example of defining an AWS EKS cluster
62
62
 
63
63
  ```python
64
64
  cluster = eks.Cluster(self, "hello-eks",
65
- version=eks.KubernetesVersion.V1_32
65
+ version=eks.KubernetesVersion.V1_33
66
66
  )
67
67
  ```
68
68
 
@@ -102,7 +102,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
102
102
 
103
103
  ```python
104
104
  eks.Cluster(self, "HelloEKS",
105
- version=eks.KubernetesVersion.V1_32
105
+ version=eks.KubernetesVersion.V1_33
106
106
  )
107
107
  ```
108
108
 
@@ -110,7 +110,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
110
110
 
111
111
  ```python
112
112
  eks.FargateCluster(self, "HelloEKS",
113
- version=eks.KubernetesVersion.V1_32
113
+ version=eks.KubernetesVersion.V1_33
114
114
  )
115
115
  ```
116
116
 
@@ -119,22 +119,22 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
119
119
  property is used.**
120
120
 
121
121
  ```python
122
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
122
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
123
123
 
124
124
 
125
125
  eks.Cluster(self, "hello-eks",
126
- version=eks.KubernetesVersion.V1_32,
126
+ version=eks.KubernetesVersion.V1_33,
127
127
  kubectl_provider_options=eks.KubectlProviderOptions(
128
- kubectl_layer=KubectlV32Layer(self, "kubectl")
128
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
129
129
  )
130
130
  )
131
131
  ```
132
132
 
133
- ## EKS Auto Mode
133
+ ### EKS Auto Mode
134
134
 
135
135
  [Amazon EKS Auto Mode](https://aws.amazon.com/eks/auto-mode/) extends AWS management of Kubernetes clusters beyond the cluster itself, allowing AWS to set up and manage the infrastructure that enables the smooth operation of your workloads.
136
136
 
137
- ### Using Auto Mode
137
+ #### Using Auto Mode
138
138
 
139
139
  While `aws-eks` uses `DefaultCapacityType.NODEGROUP` by default, `aws-eks-v2` uses `DefaultCapacityType.AUTOMODE` as the default capacity type.
140
140
 
@@ -143,7 +143,7 @@ Auto Mode is enabled by default when creating a new cluster without specifying a
143
143
  ```python
144
144
  # Create EKS cluster with Auto Mode implicitly enabled
145
145
  cluster = eks.Cluster(self, "EksAutoCluster",
146
- version=eks.KubernetesVersion.V1_32
146
+ version=eks.KubernetesVersion.V1_33
147
147
  )
148
148
  ```
149
149
 
@@ -152,12 +152,12 @@ You can also explicitly enable Auto Mode using `defaultCapacityType`:
152
152
  ```python
153
153
  # Create EKS cluster with Auto Mode explicitly enabled
154
154
  cluster = eks.Cluster(self, "EksAutoCluster",
155
- version=eks.KubernetesVersion.V1_32,
155
+ version=eks.KubernetesVersion.V1_33,
156
156
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE
157
157
  )
158
158
  ```
159
159
 
160
- ### Node Pools
160
+ #### Node Pools
161
161
 
162
162
  When Auto Mode is enabled, the cluster comes with two default node pools:
163
163
 
@@ -168,7 +168,7 @@ These node pools are managed automatically by EKS. You can configure which node
168
168
 
169
169
  ```python
170
170
  cluster = eks.Cluster(self, "EksAutoCluster",
171
- version=eks.KubernetesVersion.V1_32,
171
+ version=eks.KubernetesVersion.V1_33,
172
172
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
173
173
  compute=eks.ComputeConfig(
174
174
  node_pools=["system", "general-purpose"]
@@ -178,13 +178,13 @@ cluster = eks.Cluster(self, "EksAutoCluster",
178
178
 
179
179
  For more information, see [Create a Node Pool for EKS Auto Mode](https://docs.aws.amazon.com/eks/latest/userguide/create-node-pool.html).
180
180
 
181
- ### Disabling Default Node Pools
181
+ #### Disabling Default Node Pools
182
182
 
183
183
  You can disable the default node pools entirely by setting an empty array for `nodePools`. This is useful when you want to use Auto Mode features but manage your compute resources separately:
184
184
 
185
185
  ```python
186
186
  cluster = eks.Cluster(self, "EksAutoCluster",
187
- version=eks.KubernetesVersion.V1_32,
187
+ version=eks.KubernetesVersion.V1_33,
188
188
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
189
189
  compute=eks.ComputeConfig(
190
190
  node_pools=[]
@@ -201,7 +201,7 @@ If you prefer to manage your own node groups instead of using Auto Mode, you can
201
201
  ```python
202
202
  # Create EKS cluster with traditional managed node group
203
203
  cluster = eks.Cluster(self, "EksCluster",
204
- version=eks.KubernetesVersion.V1_32,
204
+ version=eks.KubernetesVersion.V1_33,
205
205
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
206
206
  default_capacity=3, # Number of instances
207
207
  default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.LARGE)
@@ -212,7 +212,7 @@ You can also create a cluster with no initial capacity and add node groups later
212
212
 
213
213
  ```python
214
214
  cluster = eks.Cluster(self, "EksCluster",
215
- version=eks.KubernetesVersion.V1_32,
215
+ version=eks.KubernetesVersion.V1_33,
216
216
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
217
217
  default_capacity=0
218
218
  )
@@ -233,7 +233,7 @@ You can combine Auto Mode with traditional node groups for specific workload req
233
233
 
234
234
  ```python
235
235
  cluster = eks.Cluster(self, "Cluster",
236
- version=eks.KubernetesVersion.V1_32,
236
+ version=eks.KubernetesVersion.V1_33,
237
237
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
238
238
  compute=eks.ComputeConfig(
239
239
  node_pools=["system", "general-purpose"]
@@ -272,7 +272,7 @@ By default, when using `DefaultCapacityType.NODEGROUP`, this library will alloca
272
272
 
273
273
  ```python
274
274
  eks.Cluster(self, "HelloEKS",
275
- version=eks.KubernetesVersion.V1_32,
275
+ version=eks.KubernetesVersion.V1_33,
276
276
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP
277
277
  )
278
278
  ```
@@ -281,7 +281,7 @@ At cluster instantiation time, you can customize the number of instances and the
281
281
 
282
282
  ```python
283
283
  eks.Cluster(self, "HelloEKS",
284
- version=eks.KubernetesVersion.V1_32,
284
+ version=eks.KubernetesVersion.V1_33,
285
285
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
286
286
  default_capacity=5,
287
287
  default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
@@ -294,7 +294,7 @@ Additional customizations are available post instantiation. To apply them, set t
294
294
 
295
295
  ```python
296
296
  cluster = eks.Cluster(self, "HelloEKS",
297
- version=eks.KubernetesVersion.V1_32,
297
+ version=eks.KubernetesVersion.V1_33,
298
298
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
299
299
  default_capacity=0
300
300
  )
@@ -347,7 +347,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
347
347
 
348
348
  ```python
349
349
  cluster = eks.FargateCluster(self, "MyCluster",
350
- version=eks.KubernetesVersion.V1_32
350
+ version=eks.KubernetesVersion.V1_33
351
351
  )
352
352
  ```
353
353
 
@@ -366,7 +366,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
366
366
 
367
367
  ```python
368
368
  cluster = eks.Cluster(self, "hello-eks",
369
- version=eks.KubernetesVersion.V1_32,
369
+ version=eks.KubernetesVersion.V1_33,
370
370
  endpoint_access=eks.EndpointAccess.PRIVATE
371
371
  )
372
372
  ```
@@ -388,7 +388,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
388
388
 
389
389
  ```python
390
390
  eks.Cluster(self, "HelloEKS",
391
- version=eks.KubernetesVersion.V1_32,
391
+ version=eks.KubernetesVersion.V1_33,
392
392
  alb_controller=eks.AlbControllerOptions(
393
393
  version=eks.AlbControllerVersion.V2_8_2
394
394
  )
@@ -430,7 +430,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
430
430
 
431
431
 
432
432
  eks.Cluster(self, "HelloEKS",
433
- version=eks.KubernetesVersion.V1_32,
433
+ version=eks.KubernetesVersion.V1_33,
434
434
  vpc=vpc,
435
435
  vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
436
436
  )
@@ -474,13 +474,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
474
474
  `kubectlLayer` is the only required property in `kubectlProviderOptions`.
475
475
 
476
476
  ```python
477
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
477
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
478
478
 
479
479
 
480
480
  eks.Cluster(self, "hello-eks",
481
- version=eks.KubernetesVersion.V1_32,
481
+ version=eks.KubernetesVersion.V1_33,
482
482
  kubectl_provider_options=eks.KubectlProviderOptions(
483
- kubectl_layer=KubectlV32Layer(self, "kubectl")
483
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
484
484
  )
485
485
  )
486
486
  ```
@@ -490,9 +490,6 @@ eks.Cluster(self, "hello-eks",
490
490
  If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
491
491
 
492
492
  ```python
493
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
494
-
495
-
496
493
  handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
497
494
  # get the serivceToken from the custom resource provider
498
495
  function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
@@ -512,13 +509,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
512
509
  You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
513
510
 
514
511
  ```python
515
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
512
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
516
513
 
517
514
 
518
515
  cluster = eks.Cluster(self, "hello-eks",
519
- version=eks.KubernetesVersion.V1_32,
516
+ version=eks.KubernetesVersion.V1_33,
520
517
  kubectl_provider_options=eks.KubectlProviderOptions(
521
- kubectl_layer=KubectlV32Layer(self, "kubectl"),
518
+ kubectl_layer=KubectlV33Layer(self, "kubectl"),
522
519
  environment={
523
520
  "http_proxy": "http://proxy.myproxy.com"
524
521
  }
@@ -539,13 +536,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
539
536
  the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
540
537
 
541
538
  ```python
542
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
539
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
543
540
 
544
541
 
545
542
  cluster = eks.Cluster(self, "hello-eks",
546
- version=eks.KubernetesVersion.V1_32,
543
+ version=eks.KubernetesVersion.V1_33,
547
544
  kubectl_provider_options=eks.KubectlProviderOptions(
548
- kubectl_layer=KubectlV32Layer(self, "kubectl")
545
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
549
546
  )
550
547
  )
551
548
  ```
@@ -555,15 +552,15 @@ cluster = eks.Cluster(self, "hello-eks",
555
552
  By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
556
553
 
557
554
  ```python
558
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
555
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
559
556
 
560
557
 
561
558
  eks.Cluster(self, "MyCluster",
562
559
  kubectl_provider_options=eks.KubectlProviderOptions(
563
- kubectl_layer=KubectlV32Layer(self, "kubectl"),
560
+ kubectl_layer=KubectlV33Layer(self, "kubectl"),
564
561
  memory=Size.gibibytes(4)
565
562
  ),
566
- version=eks.KubernetesVersion.V1_32
563
+ version=eks.KubernetesVersion.V1_33
567
564
  )
568
565
  ```
569
566
 
@@ -596,7 +593,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
596
593
  # role: iam.Role
597
594
 
598
595
  eks.Cluster(self, "HelloEKS",
599
- version=eks.KubernetesVersion.V1_32,
596
+ version=eks.KubernetesVersion.V1_33,
600
597
  masters_role=role
601
598
  )
602
599
  ```
@@ -617,7 +614,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
617
614
  secrets_key = kms.Key(self, "SecretsKey")
618
615
  cluster = eks.Cluster(self, "MyCluster",
619
616
  secrets_encryption_key=secrets_key,
620
- version=eks.KubernetesVersion.V1_32
617
+ version=eks.KubernetesVersion.V1_33
621
618
  )
622
619
  ```
623
620
 
@@ -627,7 +624,7 @@ You can also use a similar configuration for running a cluster built using the F
627
624
  secrets_key = kms.Key(self, "SecretsKey")
628
625
  cluster = eks.FargateCluster(self, "MyFargateCluster",
629
626
  secrets_encryption_key=secrets_key,
630
- version=eks.KubernetesVersion.V1_32
627
+ version=eks.KubernetesVersion.V1_33
631
628
  )
632
629
  ```
633
630
 
@@ -670,7 +667,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
670
667
  Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
671
668
 
672
669
  ```python
673
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
670
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
674
671
  # vpc: ec2.Vpc
675
672
 
676
673
 
@@ -685,9 +682,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
685
682
  cluster = eks.Cluster(self, "Cluster",
686
683
  vpc=vpc,
687
684
  masters_role=cluster_admin_role,
688
- version=eks.KubernetesVersion.V1_32,
685
+ version=eks.KubernetesVersion.V1_33,
689
686
  kubectl_provider_options=eks.KubectlProviderOptions(
690
- kubectl_layer=KubectlV32Layer(self, "kubectl"),
687
+ kubectl_layer=KubectlV33Layer(self, "kubectl"),
691
688
  memory=Size.gibibytes(4)
692
689
  )
693
690
  )
@@ -872,7 +869,7 @@ when a cluster is defined:
872
869
 
873
870
  ```python
874
871
  eks.Cluster(self, "MyCluster",
875
- version=eks.KubernetesVersion.V1_32,
872
+ version=eks.KubernetesVersion.V1_33,
876
873
  prune=False
877
874
  )
878
875
  ```
@@ -1191,7 +1188,7 @@ property. For example:
1191
1188
  ```python
1192
1189
  cluster = eks.Cluster(self, "Cluster",
1193
1190
  # ...
1194
- version=eks.KubernetesVersion.V1_32,
1191
+ version=eks.KubernetesVersion.V1_33,
1195
1192
  cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
1196
1193
  ]
1197
1194
  )
@@ -32,7 +32,7 @@ Here is the minimal example of defining an AWS EKS cluster
32
32
 
33
33
  ```python
34
34
  cluster = eks.Cluster(self, "hello-eks",
35
- version=eks.KubernetesVersion.V1_32
35
+ version=eks.KubernetesVersion.V1_33
36
36
  )
37
37
  ```
38
38
 
@@ -72,7 +72,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
72
72
 
73
73
  ```python
74
74
  eks.Cluster(self, "HelloEKS",
75
- version=eks.KubernetesVersion.V1_32
75
+ version=eks.KubernetesVersion.V1_33
76
76
  )
77
77
  ```
78
78
 
@@ -80,7 +80,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
80
80
 
81
81
  ```python
82
82
  eks.FargateCluster(self, "HelloEKS",
83
- version=eks.KubernetesVersion.V1_32
83
+ version=eks.KubernetesVersion.V1_33
84
84
  )
85
85
  ```
86
86
 
@@ -89,22 +89,22 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
89
89
  property is used.**
90
90
 
91
91
  ```python
92
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
92
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
93
93
 
94
94
 
95
95
  eks.Cluster(self, "hello-eks",
96
- version=eks.KubernetesVersion.V1_32,
96
+ version=eks.KubernetesVersion.V1_33,
97
97
  kubectl_provider_options=eks.KubectlProviderOptions(
98
- kubectl_layer=KubectlV32Layer(self, "kubectl")
98
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
99
99
  )
100
100
  )
101
101
  ```
102
102
 
103
- ## EKS Auto Mode
103
+ ### EKS Auto Mode
104
104
 
105
105
  [Amazon EKS Auto Mode](https://aws.amazon.com/eks/auto-mode/) extends AWS management of Kubernetes clusters beyond the cluster itself, allowing AWS to set up and manage the infrastructure that enables the smooth operation of your workloads.
106
106
 
107
- ### Using Auto Mode
107
+ #### Using Auto Mode
108
108
 
109
109
  While `aws-eks` uses `DefaultCapacityType.NODEGROUP` by default, `aws-eks-v2` uses `DefaultCapacityType.AUTOMODE` as the default capacity type.
110
110
 
@@ -113,7 +113,7 @@ Auto Mode is enabled by default when creating a new cluster without specifying a
113
113
  ```python
114
114
  # Create EKS cluster with Auto Mode implicitly enabled
115
115
  cluster = eks.Cluster(self, "EksAutoCluster",
116
- version=eks.KubernetesVersion.V1_32
116
+ version=eks.KubernetesVersion.V1_33
117
117
  )
118
118
  ```
119
119
 
@@ -122,12 +122,12 @@ You can also explicitly enable Auto Mode using `defaultCapacityType`:
122
122
  ```python
123
123
  # Create EKS cluster with Auto Mode explicitly enabled
124
124
  cluster = eks.Cluster(self, "EksAutoCluster",
125
- version=eks.KubernetesVersion.V1_32,
125
+ version=eks.KubernetesVersion.V1_33,
126
126
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE
127
127
  )
128
128
  ```
129
129
 
130
- ### Node Pools
130
+ #### Node Pools
131
131
 
132
132
  When Auto Mode is enabled, the cluster comes with two default node pools:
133
133
 
@@ -138,7 +138,7 @@ These node pools are managed automatically by EKS. You can configure which node
138
138
 
139
139
  ```python
140
140
  cluster = eks.Cluster(self, "EksAutoCluster",
141
- version=eks.KubernetesVersion.V1_32,
141
+ version=eks.KubernetesVersion.V1_33,
142
142
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
143
143
  compute=eks.ComputeConfig(
144
144
  node_pools=["system", "general-purpose"]
@@ -148,13 +148,13 @@ cluster = eks.Cluster(self, "EksAutoCluster",
148
148
 
149
149
  For more information, see [Create a Node Pool for EKS Auto Mode](https://docs.aws.amazon.com/eks/latest/userguide/create-node-pool.html).
150
150
 
151
- ### Disabling Default Node Pools
151
+ #### Disabling Default Node Pools
152
152
 
153
153
  You can disable the default node pools entirely by setting an empty array for `nodePools`. This is useful when you want to use Auto Mode features but manage your compute resources separately:
154
154
 
155
155
  ```python
156
156
  cluster = eks.Cluster(self, "EksAutoCluster",
157
- version=eks.KubernetesVersion.V1_32,
157
+ version=eks.KubernetesVersion.V1_33,
158
158
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
159
159
  compute=eks.ComputeConfig(
160
160
  node_pools=[]
@@ -171,7 +171,7 @@ If you prefer to manage your own node groups instead of using Auto Mode, you can
171
171
  ```python
172
172
  # Create EKS cluster with traditional managed node group
173
173
  cluster = eks.Cluster(self, "EksCluster",
174
- version=eks.KubernetesVersion.V1_32,
174
+ version=eks.KubernetesVersion.V1_33,
175
175
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
176
176
  default_capacity=3, # Number of instances
177
177
  default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.LARGE)
@@ -182,7 +182,7 @@ You can also create a cluster with no initial capacity and add node groups later
182
182
 
183
183
  ```python
184
184
  cluster = eks.Cluster(self, "EksCluster",
185
- version=eks.KubernetesVersion.V1_32,
185
+ version=eks.KubernetesVersion.V1_33,
186
186
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
187
187
  default_capacity=0
188
188
  )
@@ -203,7 +203,7 @@ You can combine Auto Mode with traditional node groups for specific workload req
203
203
 
204
204
  ```python
205
205
  cluster = eks.Cluster(self, "Cluster",
206
- version=eks.KubernetesVersion.V1_32,
206
+ version=eks.KubernetesVersion.V1_33,
207
207
  default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
208
208
  compute=eks.ComputeConfig(
209
209
  node_pools=["system", "general-purpose"]
@@ -242,7 +242,7 @@ By default, when using `DefaultCapacityType.NODEGROUP`, this library will alloca
242
242
 
243
243
  ```python
244
244
  eks.Cluster(self, "HelloEKS",
245
- version=eks.KubernetesVersion.V1_32,
245
+ version=eks.KubernetesVersion.V1_33,
246
246
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP
247
247
  )
248
248
  ```
@@ -251,7 +251,7 @@ At cluster instantiation time, you can customize the number of instances and the
251
251
 
252
252
  ```python
253
253
  eks.Cluster(self, "HelloEKS",
254
- version=eks.KubernetesVersion.V1_32,
254
+ version=eks.KubernetesVersion.V1_33,
255
255
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
256
256
  default_capacity=5,
257
257
  default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
@@ -264,7 +264,7 @@ Additional customizations are available post instantiation. To apply them, set t
264
264
 
265
265
  ```python
266
266
  cluster = eks.Cluster(self, "HelloEKS",
267
- version=eks.KubernetesVersion.V1_32,
267
+ version=eks.KubernetesVersion.V1_33,
268
268
  default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
269
269
  default_capacity=0
270
270
  )
@@ -317,7 +317,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
317
317
 
318
318
  ```python
319
319
  cluster = eks.FargateCluster(self, "MyCluster",
320
- version=eks.KubernetesVersion.V1_32
320
+ version=eks.KubernetesVersion.V1_33
321
321
  )
322
322
  ```
323
323
 
@@ -336,7 +336,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
336
336
 
337
337
  ```python
338
338
  cluster = eks.Cluster(self, "hello-eks",
339
- version=eks.KubernetesVersion.V1_32,
339
+ version=eks.KubernetesVersion.V1_33,
340
340
  endpoint_access=eks.EndpointAccess.PRIVATE
341
341
  )
342
342
  ```
@@ -358,7 +358,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
358
358
 
359
359
  ```python
360
360
  eks.Cluster(self, "HelloEKS",
361
- version=eks.KubernetesVersion.V1_32,
361
+ version=eks.KubernetesVersion.V1_33,
362
362
  alb_controller=eks.AlbControllerOptions(
363
363
  version=eks.AlbControllerVersion.V2_8_2
364
364
  )
@@ -400,7 +400,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
400
400
 
401
401
 
402
402
  eks.Cluster(self, "HelloEKS",
403
- version=eks.KubernetesVersion.V1_32,
403
+ version=eks.KubernetesVersion.V1_33,
404
404
  vpc=vpc,
405
405
  vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
406
406
  )
@@ -444,13 +444,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
444
444
  `kubectlLayer` is the only required property in `kubectlProviderOptions`.
445
445
 
446
446
  ```python
447
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
447
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
448
448
 
449
449
 
450
450
  eks.Cluster(self, "hello-eks",
451
- version=eks.KubernetesVersion.V1_32,
451
+ version=eks.KubernetesVersion.V1_33,
452
452
  kubectl_provider_options=eks.KubectlProviderOptions(
453
- kubectl_layer=KubectlV32Layer(self, "kubectl")
453
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
454
454
  )
455
455
  )
456
456
  ```
@@ -460,9 +460,6 @@ eks.Cluster(self, "hello-eks",
460
460
  If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
461
461
 
462
462
  ```python
463
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
464
-
465
-
466
463
  handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
467
464
  # get the serivceToken from the custom resource provider
468
465
  function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
@@ -482,13 +479,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
482
479
  You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
483
480
 
484
481
  ```python
485
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
482
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
486
483
 
487
484
 
488
485
  cluster = eks.Cluster(self, "hello-eks",
489
- version=eks.KubernetesVersion.V1_32,
486
+ version=eks.KubernetesVersion.V1_33,
490
487
  kubectl_provider_options=eks.KubectlProviderOptions(
491
- kubectl_layer=KubectlV32Layer(self, "kubectl"),
488
+ kubectl_layer=KubectlV33Layer(self, "kubectl"),
492
489
  environment={
493
490
  "http_proxy": "http://proxy.myproxy.com"
494
491
  }
@@ -509,13 +506,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
509
506
  the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
510
507
 
511
508
  ```python
512
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
509
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
513
510
 
514
511
 
515
512
  cluster = eks.Cluster(self, "hello-eks",
516
- version=eks.KubernetesVersion.V1_32,
513
+ version=eks.KubernetesVersion.V1_33,
517
514
  kubectl_provider_options=eks.KubectlProviderOptions(
518
- kubectl_layer=KubectlV32Layer(self, "kubectl")
515
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
519
516
  )
520
517
  )
521
518
  ```
@@ -525,15 +522,15 @@ cluster = eks.Cluster(self, "hello-eks",
525
522
  By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
526
523
 
527
524
  ```python
528
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
525
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
529
526
 
530
527
 
531
528
  eks.Cluster(self, "MyCluster",
532
529
  kubectl_provider_options=eks.KubectlProviderOptions(
533
- kubectl_layer=KubectlV32Layer(self, "kubectl"),
530
+ kubectl_layer=KubectlV33Layer(self, "kubectl"),
534
531
  memory=Size.gibibytes(4)
535
532
  ),
536
- version=eks.KubernetesVersion.V1_32
533
+ version=eks.KubernetesVersion.V1_33
537
534
  )
538
535
  ```
539
536
 
@@ -566,7 +563,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
566
563
  # role: iam.Role
567
564
 
568
565
  eks.Cluster(self, "HelloEKS",
569
- version=eks.KubernetesVersion.V1_32,
566
+ version=eks.KubernetesVersion.V1_33,
570
567
  masters_role=role
571
568
  )
572
569
  ```
@@ -587,7 +584,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
587
584
  secrets_key = kms.Key(self, "SecretsKey")
588
585
  cluster = eks.Cluster(self, "MyCluster",
589
586
  secrets_encryption_key=secrets_key,
590
- version=eks.KubernetesVersion.V1_32
587
+ version=eks.KubernetesVersion.V1_33
591
588
  )
592
589
  ```
593
590
 
@@ -597,7 +594,7 @@ You can also use a similar configuration for running a cluster built using the F
597
594
  secrets_key = kms.Key(self, "SecretsKey")
598
595
  cluster = eks.FargateCluster(self, "MyFargateCluster",
599
596
  secrets_encryption_key=secrets_key,
600
- version=eks.KubernetesVersion.V1_32
597
+ version=eks.KubernetesVersion.V1_33
601
598
  )
602
599
  ```
603
600
 
@@ -640,7 +637,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
640
637
  Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
641
638
 
642
639
  ```python
643
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
640
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
644
641
  # vpc: ec2.Vpc
645
642
 
646
643
 
@@ -655,9 +652,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
655
652
  cluster = eks.Cluster(self, "Cluster",
656
653
  vpc=vpc,
657
654
  masters_role=cluster_admin_role,
658
- version=eks.KubernetesVersion.V1_32,
655
+ version=eks.KubernetesVersion.V1_33,
659
656
  kubectl_provider_options=eks.KubectlProviderOptions(
660
- kubectl_layer=KubectlV32Layer(self, "kubectl"),
657
+ kubectl_layer=KubectlV33Layer(self, "kubectl"),
661
658
  memory=Size.gibibytes(4)
662
659
  )
663
660
  )
@@ -842,7 +839,7 @@ when a cluster is defined:
842
839
 
843
840
  ```python
844
841
  eks.Cluster(self, "MyCluster",
845
- version=eks.KubernetesVersion.V1_32,
842
+ version=eks.KubernetesVersion.V1_33,
846
843
  prune=False
847
844
  )
848
845
  ```
@@ -1161,7 +1158,7 @@ property. For example:
1161
1158
  ```python
1162
1159
  cluster = eks.Cluster(self, "Cluster",
1163
1160
  # ...
1164
- version=eks.KubernetesVersion.V1_32,
1161
+ version=eks.KubernetesVersion.V1_33,
1165
1162
  cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
1166
1163
  ]
1167
1164
  )