aws-cdk.aws-eks-v2-alpha 2.206.0a0__py3-none-any.whl → 2.231.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aws_cdk/aws_eks_v2_alpha/__init__.py +288 -149
- aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py +2 -2
- aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.231.0-alpha.0.jsii.tgz +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info}/METADATA +161 -82
- aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info/RECORD +10 -0
- aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.206.0-alpha.0.jsii.tgz +0 -0
- aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info/RECORD +0 -10
- {aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info}/LICENSE +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info}/NOTICE +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info}/WHEEL +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info}/top_level.txt +0 -0
|
@@ -33,9 +33,9 @@ import constructs._jsii
|
|
|
33
33
|
|
|
34
34
|
__jsii_assembly__ = jsii.JSIIAssembly.load(
|
|
35
35
|
"@aws-cdk/aws-eks-v2-alpha",
|
|
36
|
-
"2.
|
|
36
|
+
"2.231.0-alpha.0",
|
|
37
37
|
__name__[0:-6],
|
|
38
|
-
"aws-eks-v2-alpha@2.
|
|
38
|
+
"aws-eks-v2-alpha@2.231.0-alpha.0.jsii.tgz",
|
|
39
39
|
)
|
|
40
40
|
|
|
41
41
|
__all__ = [
|
|
Binary file
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: aws-cdk.aws-eks-v2-alpha
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.231.0a0
|
|
4
4
|
Summary: The CDK Construct Library for AWS::EKS
|
|
5
5
|
Home-page: https://github.com/aws/aws-cdk
|
|
6
6
|
Author: Amazon Web Services
|
|
@@ -22,24 +22,24 @@ Requires-Python: ~=3.9
|
|
|
22
22
|
Description-Content-Type: text/markdown
|
|
23
23
|
License-File: LICENSE
|
|
24
24
|
License-File: NOTICE
|
|
25
|
-
Requires-Dist: aws-cdk-lib<3.0.0,>=2.
|
|
26
|
-
Requires-Dist: constructs<11.0.0,>=10.0.0
|
|
27
|
-
Requires-Dist: jsii<2.0.0,>=1.
|
|
28
|
-
Requires-Dist: publication>=0.0.3
|
|
29
|
-
Requires-Dist: typeguard<4.3.0,>=2.13.3
|
|
25
|
+
Requires-Dist: aws-cdk-lib <3.0.0,>=2.231.0
|
|
26
|
+
Requires-Dist: constructs <11.0.0,>=10.0.0
|
|
27
|
+
Requires-Dist: jsii <2.0.0,>=1.119.0
|
|
28
|
+
Requires-Dist: publication >=0.0.3
|
|
29
|
+
Requires-Dist: typeguard <4.3.0,>=2.13.3
|
|
30
30
|
|
|
31
31
|
# Amazon EKS V2 Construct Library
|
|
32
32
|
|
|
33
33
|
<!--BEGIN STABILITY BANNER-->---
|
|
34
34
|
|
|
35
35
|
|
|
36
|
-

|
|
37
37
|
|
|
38
|
-
> The APIs of higher level constructs in this module are
|
|
39
|
-
>
|
|
40
|
-
> not subject to
|
|
41
|
-
> announced in
|
|
42
|
-
> your source code when upgrading to a newer version of this package.
|
|
38
|
+
> The APIs of higher level constructs in this module are in **developer preview** before they
|
|
39
|
+
> become stable. We will only make breaking changes to address unforeseen API issues. Therefore,
|
|
40
|
+
> these APIs are not subject to [Semantic Versioning](https://semver.org/), and breaking changes
|
|
41
|
+
> will be announced in release notes. This means that while you may use them, you may need to
|
|
42
|
+
> update your source code when upgrading to a newer version of this package.
|
|
43
43
|
|
|
44
44
|
---
|
|
45
45
|
<!--END STABILITY BANNER-->
|
|
@@ -62,39 +62,88 @@ Here is the minimal example of defining an AWS EKS cluster
|
|
|
62
62
|
|
|
63
63
|
```python
|
|
64
64
|
cluster = eks.Cluster(self, "hello-eks",
|
|
65
|
-
version=eks.KubernetesVersion.
|
|
65
|
+
version=eks.KubernetesVersion.V1_34
|
|
66
66
|
)
|
|
67
67
|
```
|
|
68
68
|
|
|
69
69
|
## Architecture
|
|
70
70
|
|
|
71
|
-
```text
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
71
|
+
```text +-----------------+
|
|
72
|
+
kubectl | |
|
|
73
|
+
+------------>| Kubectl Handler |
|
|
74
|
+
| | (Optional) |
|
|
75
|
+
| +-----------------+
|
|
76
|
+
+-------------------------------------+-------------------------------------+
|
|
77
|
+
| EKS Cluster (Auto Mode) |
|
|
78
|
+
| AWS::EKS::Cluster |
|
|
79
|
+
| |
|
|
80
|
+
| +---------------------------------------------------------------------+ |
|
|
81
|
+
| | Auto Mode Compute (Managed by EKS) (Default) | |
|
|
82
|
+
| | | |
|
|
83
|
+
| | - Automatically provisions EC2 instances | |
|
|
84
|
+
| | - Auto scaling based on pod requirements | |
|
|
85
|
+
| | - No manual node group configuration needed | |
|
|
86
|
+
| | | |
|
|
87
|
+
| +---------------------------------------------------------------------+ |
|
|
88
|
+
| |
|
|
89
|
+
+---------------------------------------------------------------------------+
|
|
88
90
|
```
|
|
89
91
|
|
|
90
92
|
In a nutshell:
|
|
91
93
|
|
|
92
|
-
*
|
|
93
|
-
|
|
94
|
-
*
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
cluster
|
|
94
|
+
* **[Auto Mode](#eks-auto-mode)** (Default) – The fully managed capacity mode in EKS.
|
|
95
|
+
EKS automatically provisions and scales EC2 capacity based on pod requirements.
|
|
96
|
+
It manages internal *system* and *general-purpose* NodePools, handles networking and storage setup, and removes the need for user-managed node groups or Auto Scaling Groups.
|
|
97
|
+
|
|
98
|
+
```python
|
|
99
|
+
cluster = eks.Cluster(self, "AutoModeCluster",
|
|
100
|
+
version=eks.KubernetesVersion.V1_34
|
|
101
|
+
)
|
|
102
|
+
```
|
|
103
|
+
* **[Managed Node Groups](#managed-node-groups)** – The semi-managed capacity mode.
|
|
104
|
+
EKS provisions and manages EC2 nodes on your behalf but you configure the instance types, scaling ranges, and update strategy.
|
|
105
|
+
AWS handles node health, draining, and rolling updates while you retain control over scaling and cost optimization.
|
|
106
|
+
|
|
107
|
+
You can also define *Fargate Profiles* that determine which pods or namespaces run on Fargate infrastructure.
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
cluster = eks.Cluster(self, "ManagedNodeCluster",
|
|
111
|
+
version=eks.KubernetesVersion.V1_34,
|
|
112
|
+
default_capacity_type=eks.DefaultCapacityType.NODEGROUP
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Add a Fargate Profile for specific workloads (e.g., default namespace)
|
|
116
|
+
cluster.add_fargate_profile("FargateProfile",
|
|
117
|
+
selectors=[eks.Selector(namespace="default")
|
|
118
|
+
]
|
|
119
|
+
)
|
|
120
|
+
```
|
|
121
|
+
* **[Fargate Mode](#fargate-profiles)** – The Fargate capacity mode.
|
|
122
|
+
EKS runs your pods directly on AWS Fargate without provisioning EC2 nodes.
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
cluster = eks.FargateCluster(self, "FargateCluster",
|
|
126
|
+
version=eks.KubernetesVersion.V1_34
|
|
127
|
+
)
|
|
128
|
+
```
|
|
129
|
+
* **[Self-Managed Nodes](#self-managed-capacity)** – The fully manual capacity mode.
|
|
130
|
+
You create and manage EC2 instances (via an Auto Scaling Group) and connect them to the cluster manually.
|
|
131
|
+
This provides maximum flexibility for custom AMIs or configurations but also the highest operational overhead.
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
cluster = eks.Cluster(self, "SelfManagedCluster",
|
|
135
|
+
version=eks.KubernetesVersion.V1_34
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Add self-managed Auto Scaling Group
|
|
139
|
+
cluster.add_auto_scaling_group_capacity("self-managed-asg",
|
|
140
|
+
instance_type=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.MEDIUM),
|
|
141
|
+
min_capacity=1,
|
|
142
|
+
max_capacity=5
|
|
143
|
+
)
|
|
144
|
+
```
|
|
145
|
+
* **[Kubectl Handler](#kubectl-support) (Optional)** – A Lambda-backed custom resource created by the AWS CDK to execute `kubectl` commands (like `apply` or `patch`) during deployment.
|
|
146
|
+
Regardless of the capacity mode, this handler may still be created to apply Kubernetes manifests as part of CDK provisioning.
|
|
98
147
|
|
|
99
148
|
## Provisioning cluster
|
|
100
149
|
|
|
@@ -102,7 +151,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
|
|
|
102
151
|
|
|
103
152
|
```python
|
|
104
153
|
eks.Cluster(self, "HelloEKS",
|
|
105
|
-
version=eks.KubernetesVersion.
|
|
154
|
+
version=eks.KubernetesVersion.V1_34
|
|
106
155
|
)
|
|
107
156
|
```
|
|
108
157
|
|
|
@@ -110,7 +159,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
|
|
|
110
159
|
|
|
111
160
|
```python
|
|
112
161
|
eks.FargateCluster(self, "HelloEKS",
|
|
113
|
-
version=eks.KubernetesVersion.
|
|
162
|
+
version=eks.KubernetesVersion.V1_34
|
|
114
163
|
)
|
|
115
164
|
```
|
|
116
165
|
|
|
@@ -119,22 +168,22 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
|
|
|
119
168
|
property is used.**
|
|
120
169
|
|
|
121
170
|
```python
|
|
122
|
-
from aws_cdk.
|
|
171
|
+
from aws_cdk.lambda_layer_kubectl_v34 import KubectlV34Layer
|
|
123
172
|
|
|
124
173
|
|
|
125
174
|
eks.Cluster(self, "hello-eks",
|
|
126
|
-
version=eks.KubernetesVersion.
|
|
175
|
+
version=eks.KubernetesVersion.V1_34,
|
|
127
176
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
128
|
-
kubectl_layer=
|
|
177
|
+
kubectl_layer=KubectlV34Layer(self, "kubectl")
|
|
129
178
|
)
|
|
130
179
|
)
|
|
131
180
|
```
|
|
132
181
|
|
|
133
|
-
|
|
182
|
+
### EKS Auto Mode
|
|
134
183
|
|
|
135
184
|
[Amazon EKS Auto Mode](https://aws.amazon.com/eks/auto-mode/) extends AWS management of Kubernetes clusters beyond the cluster itself, allowing AWS to set up and manage the infrastructure that enables the smooth operation of your workloads.
|
|
136
185
|
|
|
137
|
-
|
|
186
|
+
#### Using Auto Mode
|
|
138
187
|
|
|
139
188
|
While `aws-eks` uses `DefaultCapacityType.NODEGROUP` by default, `aws-eks-v2` uses `DefaultCapacityType.AUTOMODE` as the default capacity type.
|
|
140
189
|
|
|
@@ -143,7 +192,7 @@ Auto Mode is enabled by default when creating a new cluster without specifying a
|
|
|
143
192
|
```python
|
|
144
193
|
# Create EKS cluster with Auto Mode implicitly enabled
|
|
145
194
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
146
|
-
version=eks.KubernetesVersion.
|
|
195
|
+
version=eks.KubernetesVersion.V1_34
|
|
147
196
|
)
|
|
148
197
|
```
|
|
149
198
|
|
|
@@ -152,12 +201,12 @@ You can also explicitly enable Auto Mode using `defaultCapacityType`:
|
|
|
152
201
|
```python
|
|
153
202
|
# Create EKS cluster with Auto Mode explicitly enabled
|
|
154
203
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
155
|
-
version=eks.KubernetesVersion.
|
|
204
|
+
version=eks.KubernetesVersion.V1_34,
|
|
156
205
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE
|
|
157
206
|
)
|
|
158
207
|
```
|
|
159
208
|
|
|
160
|
-
|
|
209
|
+
#### Node Pools
|
|
161
210
|
|
|
162
211
|
When Auto Mode is enabled, the cluster comes with two default node pools:
|
|
163
212
|
|
|
@@ -168,7 +217,7 @@ These node pools are managed automatically by EKS. You can configure which node
|
|
|
168
217
|
|
|
169
218
|
```python
|
|
170
219
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
171
|
-
version=eks.KubernetesVersion.
|
|
220
|
+
version=eks.KubernetesVersion.V1_34,
|
|
172
221
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
173
222
|
compute=eks.ComputeConfig(
|
|
174
223
|
node_pools=["system", "general-purpose"]
|
|
@@ -178,13 +227,13 @@ cluster = eks.Cluster(self, "EksAutoCluster",
|
|
|
178
227
|
|
|
179
228
|
For more information, see [Create a Node Pool for EKS Auto Mode](https://docs.aws.amazon.com/eks/latest/userguide/create-node-pool.html).
|
|
180
229
|
|
|
181
|
-
|
|
230
|
+
#### Disabling Default Node Pools
|
|
182
231
|
|
|
183
232
|
You can disable the default node pools entirely by setting an empty array for `nodePools`. This is useful when you want to use Auto Mode features but manage your compute resources separately:
|
|
184
233
|
|
|
185
234
|
```python
|
|
186
235
|
cluster = eks.Cluster(self, "EksAutoCluster",
|
|
187
|
-
version=eks.KubernetesVersion.
|
|
236
|
+
version=eks.KubernetesVersion.V1_34,
|
|
188
237
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
189
238
|
compute=eks.ComputeConfig(
|
|
190
239
|
node_pools=[]
|
|
@@ -201,7 +250,7 @@ If you prefer to manage your own node groups instead of using Auto Mode, you can
|
|
|
201
250
|
```python
|
|
202
251
|
# Create EKS cluster with traditional managed node group
|
|
203
252
|
cluster = eks.Cluster(self, "EksCluster",
|
|
204
|
-
version=eks.KubernetesVersion.
|
|
253
|
+
version=eks.KubernetesVersion.V1_34,
|
|
205
254
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
206
255
|
default_capacity=3, # Number of instances
|
|
207
256
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.LARGE)
|
|
@@ -212,7 +261,7 @@ You can also create a cluster with no initial capacity and add node groups later
|
|
|
212
261
|
|
|
213
262
|
```python
|
|
214
263
|
cluster = eks.Cluster(self, "EksCluster",
|
|
215
|
-
version=eks.KubernetesVersion.
|
|
264
|
+
version=eks.KubernetesVersion.V1_34,
|
|
216
265
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
217
266
|
default_capacity=0
|
|
218
267
|
)
|
|
@@ -233,7 +282,7 @@ You can combine Auto Mode with traditional node groups for specific workload req
|
|
|
233
282
|
|
|
234
283
|
```python
|
|
235
284
|
cluster = eks.Cluster(self, "Cluster",
|
|
236
|
-
version=eks.KubernetesVersion.
|
|
285
|
+
version=eks.KubernetesVersion.V1_34,
|
|
237
286
|
default_capacity_type=eks.DefaultCapacityType.AUTOMODE,
|
|
238
287
|
compute=eks.ComputeConfig(
|
|
239
288
|
node_pools=["system", "general-purpose"]
|
|
@@ -272,7 +321,7 @@ By default, when using `DefaultCapacityType.NODEGROUP`, this library will alloca
|
|
|
272
321
|
|
|
273
322
|
```python
|
|
274
323
|
eks.Cluster(self, "HelloEKS",
|
|
275
|
-
version=eks.KubernetesVersion.
|
|
324
|
+
version=eks.KubernetesVersion.V1_34,
|
|
276
325
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP
|
|
277
326
|
)
|
|
278
327
|
```
|
|
@@ -281,7 +330,7 @@ At cluster instantiation time, you can customize the number of instances and the
|
|
|
281
330
|
|
|
282
331
|
```python
|
|
283
332
|
eks.Cluster(self, "HelloEKS",
|
|
284
|
-
version=eks.KubernetesVersion.
|
|
333
|
+
version=eks.KubernetesVersion.V1_34,
|
|
285
334
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
286
335
|
default_capacity=5,
|
|
287
336
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
|
|
@@ -294,7 +343,7 @@ Additional customizations are available post instantiation. To apply them, set t
|
|
|
294
343
|
|
|
295
344
|
```python
|
|
296
345
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
297
|
-
version=eks.KubernetesVersion.
|
|
346
|
+
version=eks.KubernetesVersion.V1_34,
|
|
298
347
|
default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
|
|
299
348
|
default_capacity=0
|
|
300
349
|
)
|
|
@@ -347,7 +396,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
|
|
|
347
396
|
|
|
348
397
|
```python
|
|
349
398
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
350
|
-
version=eks.KubernetesVersion.
|
|
399
|
+
version=eks.KubernetesVersion.V1_34
|
|
351
400
|
)
|
|
352
401
|
```
|
|
353
402
|
|
|
@@ -358,6 +407,39 @@ pods running on Fargate. For ingress, we recommend that you use the [ALB Ingress
|
|
|
358
407
|
Controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html)
|
|
359
408
|
on Amazon EKS (minimum version v1.1.4).
|
|
360
409
|
|
|
410
|
+
### Self-managed capacity
|
|
411
|
+
|
|
412
|
+
Self-managed capacity gives you the most control over your worker nodes by allowing you to create and manage your own EC2 Auto Scaling Groups. This approach provides maximum flexibility for custom AMIs, instance configurations, and scaling policies, but requires more operational overhead.
|
|
413
|
+
|
|
414
|
+
You can add self-managed capacity to any cluster using the `addAutoScalingGroupCapacity` method:
|
|
415
|
+
|
|
416
|
+
```python
|
|
417
|
+
cluster = eks.Cluster(self, "Cluster",
|
|
418
|
+
version=eks.KubernetesVersion.V1_34
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
cluster.add_auto_scaling_group_capacity("self-managed-nodes",
|
|
422
|
+
instance_type=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.MEDIUM),
|
|
423
|
+
min_capacity=1,
|
|
424
|
+
max_capacity=10,
|
|
425
|
+
desired_capacity=3
|
|
426
|
+
)
|
|
427
|
+
```
|
|
428
|
+
|
|
429
|
+
You can specify custom subnets for the Auto Scaling Group:
|
|
430
|
+
|
|
431
|
+
```python
|
|
432
|
+
# vpc: ec2.Vpc
|
|
433
|
+
# cluster: eks.Cluster
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
cluster.add_auto_scaling_group_capacity("custom-subnet-nodes",
|
|
437
|
+
vpc_subnets=ec2.SubnetSelection(subnets=vpc.private_subnets),
|
|
438
|
+
instance_type=ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.MEDIUM),
|
|
439
|
+
min_capacity=2
|
|
440
|
+
)
|
|
441
|
+
```
|
|
442
|
+
|
|
361
443
|
### Endpoint Access
|
|
362
444
|
|
|
363
445
|
When you create a new cluster, Amazon EKS creates an endpoint for the managed Kubernetes API server that you use to communicate with your cluster (using Kubernetes management tools such as `kubectl`)
|
|
@@ -366,7 +448,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
|
|
|
366
448
|
|
|
367
449
|
```python
|
|
368
450
|
cluster = eks.Cluster(self, "hello-eks",
|
|
369
|
-
version=eks.KubernetesVersion.
|
|
451
|
+
version=eks.KubernetesVersion.V1_34,
|
|
370
452
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
371
453
|
)
|
|
372
454
|
```
|
|
@@ -388,7 +470,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
|
|
|
388
470
|
|
|
389
471
|
```python
|
|
390
472
|
eks.Cluster(self, "HelloEKS",
|
|
391
|
-
version=eks.KubernetesVersion.
|
|
473
|
+
version=eks.KubernetesVersion.V1_34,
|
|
392
474
|
alb_controller=eks.AlbControllerOptions(
|
|
393
475
|
version=eks.AlbControllerVersion.V2_8_2
|
|
394
476
|
)
|
|
@@ -430,7 +512,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
|
|
|
430
512
|
|
|
431
513
|
|
|
432
514
|
eks.Cluster(self, "HelloEKS",
|
|
433
|
-
version=eks.KubernetesVersion.
|
|
515
|
+
version=eks.KubernetesVersion.V1_34,
|
|
434
516
|
vpc=vpc,
|
|
435
517
|
vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
|
|
436
518
|
)
|
|
@@ -474,13 +556,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
|
|
|
474
556
|
`kubectlLayer` is the only required property in `kubectlProviderOptions`.
|
|
475
557
|
|
|
476
558
|
```python
|
|
477
|
-
from aws_cdk.
|
|
559
|
+
from aws_cdk.lambda_layer_kubectl_v34 import KubectlV34Layer
|
|
478
560
|
|
|
479
561
|
|
|
480
562
|
eks.Cluster(self, "hello-eks",
|
|
481
|
-
version=eks.KubernetesVersion.
|
|
563
|
+
version=eks.KubernetesVersion.V1_34,
|
|
482
564
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
483
|
-
kubectl_layer=
|
|
565
|
+
kubectl_layer=KubectlV34Layer(self, "kubectl")
|
|
484
566
|
)
|
|
485
567
|
)
|
|
486
568
|
```
|
|
@@ -490,9 +572,6 @@ eks.Cluster(self, "hello-eks",
|
|
|
490
572
|
If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
|
|
491
573
|
|
|
492
574
|
```python
|
|
493
|
-
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
494
|
-
|
|
495
|
-
|
|
496
575
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
497
576
|
# get the serivceToken from the custom resource provider
|
|
498
577
|
function_arn = lambda_.Function.from_function_name(self, "ProviderOnEventFunc", "ProviderframeworkonEvent-XXX").function_arn
|
|
@@ -512,13 +591,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
|
|
|
512
591
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
513
592
|
|
|
514
593
|
```python
|
|
515
|
-
from aws_cdk.
|
|
594
|
+
from aws_cdk.lambda_layer_kubectl_v34 import KubectlV34Layer
|
|
516
595
|
|
|
517
596
|
|
|
518
597
|
cluster = eks.Cluster(self, "hello-eks",
|
|
519
|
-
version=eks.KubernetesVersion.
|
|
598
|
+
version=eks.KubernetesVersion.V1_34,
|
|
520
599
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
521
|
-
kubectl_layer=
|
|
600
|
+
kubectl_layer=KubectlV34Layer(self, "kubectl"),
|
|
522
601
|
environment={
|
|
523
602
|
"http_proxy": "http://proxy.myproxy.com"
|
|
524
603
|
}
|
|
@@ -539,13 +618,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
539
618
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
540
619
|
|
|
541
620
|
```python
|
|
542
|
-
from aws_cdk.
|
|
621
|
+
from aws_cdk.lambda_layer_kubectl_v34 import KubectlV34Layer
|
|
543
622
|
|
|
544
623
|
|
|
545
624
|
cluster = eks.Cluster(self, "hello-eks",
|
|
546
|
-
version=eks.KubernetesVersion.
|
|
625
|
+
version=eks.KubernetesVersion.V1_34,
|
|
547
626
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
548
|
-
kubectl_layer=
|
|
627
|
+
kubectl_layer=KubectlV34Layer(self, "kubectl")
|
|
549
628
|
)
|
|
550
629
|
)
|
|
551
630
|
```
|
|
@@ -555,15 +634,15 @@ cluster = eks.Cluster(self, "hello-eks",
|
|
|
555
634
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
|
|
556
635
|
|
|
557
636
|
```python
|
|
558
|
-
from aws_cdk.
|
|
637
|
+
from aws_cdk.lambda_layer_kubectl_v34 import KubectlV34Layer
|
|
559
638
|
|
|
560
639
|
|
|
561
640
|
eks.Cluster(self, "MyCluster",
|
|
562
641
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
563
|
-
kubectl_layer=
|
|
642
|
+
kubectl_layer=KubectlV34Layer(self, "kubectl"),
|
|
564
643
|
memory=Size.gibibytes(4)
|
|
565
644
|
),
|
|
566
|
-
version=eks.KubernetesVersion.
|
|
645
|
+
version=eks.KubernetesVersion.V1_34
|
|
567
646
|
)
|
|
568
647
|
```
|
|
569
648
|
|
|
@@ -596,7 +675,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
|
|
|
596
675
|
# role: iam.Role
|
|
597
676
|
|
|
598
677
|
eks.Cluster(self, "HelloEKS",
|
|
599
|
-
version=eks.KubernetesVersion.
|
|
678
|
+
version=eks.KubernetesVersion.V1_34,
|
|
600
679
|
masters_role=role
|
|
601
680
|
)
|
|
602
681
|
```
|
|
@@ -617,7 +696,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
617
696
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
618
697
|
cluster = eks.Cluster(self, "MyCluster",
|
|
619
698
|
secrets_encryption_key=secrets_key,
|
|
620
|
-
version=eks.KubernetesVersion.
|
|
699
|
+
version=eks.KubernetesVersion.V1_34
|
|
621
700
|
)
|
|
622
701
|
```
|
|
623
702
|
|
|
@@ -627,7 +706,7 @@ You can also use a similar configuration for running a cluster built using the F
|
|
|
627
706
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
628
707
|
cluster = eks.FargateCluster(self, "MyFargateCluster",
|
|
629
708
|
secrets_encryption_key=secrets_key,
|
|
630
|
-
version=eks.KubernetesVersion.
|
|
709
|
+
version=eks.KubernetesVersion.V1_34
|
|
631
710
|
)
|
|
632
711
|
```
|
|
633
712
|
|
|
@@ -670,7 +749,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
|
|
|
670
749
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
671
750
|
|
|
672
751
|
```python
|
|
673
|
-
from aws_cdk.
|
|
752
|
+
from aws_cdk.lambda_layer_kubectl_v34 import KubectlV34Layer
|
|
674
753
|
# vpc: ec2.Vpc
|
|
675
754
|
|
|
676
755
|
|
|
@@ -685,9 +764,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
|
|
|
685
764
|
cluster = eks.Cluster(self, "Cluster",
|
|
686
765
|
vpc=vpc,
|
|
687
766
|
masters_role=cluster_admin_role,
|
|
688
|
-
version=eks.KubernetesVersion.
|
|
767
|
+
version=eks.KubernetesVersion.V1_34,
|
|
689
768
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
690
|
-
kubectl_layer=
|
|
769
|
+
kubectl_layer=KubectlV34Layer(self, "kubectl"),
|
|
691
770
|
memory=Size.gibibytes(4)
|
|
692
771
|
)
|
|
693
772
|
)
|
|
@@ -872,7 +951,7 @@ when a cluster is defined:
|
|
|
872
951
|
|
|
873
952
|
```python
|
|
874
953
|
eks.Cluster(self, "MyCluster",
|
|
875
|
-
version=eks.KubernetesVersion.
|
|
954
|
+
version=eks.KubernetesVersion.V1_34,
|
|
876
955
|
prune=False
|
|
877
956
|
)
|
|
878
957
|
```
|
|
@@ -1191,7 +1270,7 @@ property. For example:
|
|
|
1191
1270
|
```python
|
|
1192
1271
|
cluster = eks.Cluster(self, "Cluster",
|
|
1193
1272
|
# ...
|
|
1194
|
-
version=eks.KubernetesVersion.
|
|
1273
|
+
version=eks.KubernetesVersion.V1_34,
|
|
1195
1274
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
1196
1275
|
]
|
|
1197
1276
|
)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
aws_cdk/aws_eks_v2_alpha/__init__.py,sha256=16jajvIJRWfYaXBpr94H7uF28X1ZUpGum4i9dP8uWCM,709522
|
|
2
|
+
aws_cdk/aws_eks_v2_alpha/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
3
|
+
aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py,sha256=UQksEa1klkdc3aXP_5ma5fZE3J1c8Uzt5_xr_JCeHE4,1485
|
|
4
|
+
aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.231.0-alpha.0.jsii.tgz,sha256=obQo74nh_E94tvInrakG04aDuQhDlpXhDnmamh2AMe8,423381
|
|
5
|
+
aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info/LICENSE,sha256=y47tc38H0C4DpGljYUZDl8XxidQjNxxGLq-K4jwv6Xc,11391
|
|
6
|
+
aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info/METADATA,sha256=uoD8AA_qEGj7OeX6uZj55FeJW5AbAVA7QhAI9ky7ZRs,46874
|
|
7
|
+
aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info/NOTICE,sha256=6Jdq-MQvHIyOFx_9SdfwJrEmcxlScjONPAJru73PESY,919
|
|
8
|
+
aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
9
|
+
aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info/top_level.txt,sha256=1TALAKbuUGsMSrfKWEf268lySCmcqSEO6cDYe_XlLHM,8
|
|
10
|
+
aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info/RECORD,,
|
|
Binary file
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
aws_cdk/aws_eks_v2_alpha/__init__.py,sha256=u2qM3QzOTqxgEuc28Wz12mm6tgbR2J2kMskId_4jOu4,702556
|
|
2
|
-
aws_cdk/aws_eks_v2_alpha/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
3
|
-
aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py,sha256=g3FVYJ-X5mpQwyGxllEvo3uOUdcBIh2Z9hxbz1tpcYM,1485
|
|
4
|
-
aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.206.0-alpha.0.jsii.tgz,sha256=Jc1z6DjGNDGDxJlz8rrRg-KZuF9_36ENibxU7iAcjAM,403101
|
|
5
|
-
aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info/LICENSE,sha256=y47tc38H0C4DpGljYUZDl8XxidQjNxxGLq-K4jwv6Xc,11391
|
|
6
|
-
aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info/METADATA,sha256=EEryIpF4eEVBPAVxhcyLU4p5sH1-legfxi18_OzCzxY,42850
|
|
7
|
-
aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info/NOTICE,sha256=6Jdq-MQvHIyOFx_9SdfwJrEmcxlScjONPAJru73PESY,919
|
|
8
|
-
aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
9
|
-
aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info/top_level.txt,sha256=1TALAKbuUGsMSrfKWEf268lySCmcqSEO6cDYe_XlLHM,8
|
|
10
|
-
aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info/RECORD,,
|
|
File without changes
|
{aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info}/NOTICE
RENAMED
|
File without changes
|
{aws_cdk_aws_eks_v2_alpha-2.206.0a0.dist-info → aws_cdk_aws_eks_v2_alpha-2.231.0a0.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|