aws-cdk.aws-eks-v2-alpha 2.178.1a0__tar.gz → 2.179.0a0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aws-cdk.aws-eks-v2-alpha might be problematic. Click here for more details.
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0/src/aws_cdk.aws_eks_v2_alpha.egg-info → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/PKG-INFO +36 -36
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/README.md +34 -34
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/setup.py +3 -3
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/src/aws_cdk/aws_eks_v2_alpha/__init__.py +63 -50
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/src/aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py +2 -2
- aws_cdk_aws_eks_v2_alpha-2.179.0a0/src/aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.179.0-alpha.0.jsii.tgz +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0/src/aws_cdk.aws_eks_v2_alpha.egg-info}/PKG-INFO +36 -36
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/SOURCES.txt +1 -1
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/requires.txt +1 -1
- aws_cdk_aws_eks_v2_alpha-2.178.1a0/src/aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.178.1-alpha.0.jsii.tgz +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/LICENSE +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/MANIFEST.in +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/NOTICE +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/pyproject.toml +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/setup.cfg +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/src/aws_cdk/aws_eks_v2_alpha/py.typed +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/dependency_links.txt +0 -0
- {aws_cdk_aws_eks_v2_alpha-2.178.1a0 → aws_cdk_aws_eks_v2_alpha-2.179.0a0}/src/aws_cdk.aws_eks_v2_alpha.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: aws-cdk.aws-eks-v2-alpha
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.179.0a0
|
|
4
4
|
Summary: The CDK Construct Library for AWS::EKS
|
|
5
5
|
Home-page: https://github.com/aws/aws-cdk
|
|
6
6
|
Author: Amazon Web Services
|
|
@@ -23,7 +23,7 @@ Requires-Python: ~=3.8
|
|
|
23
23
|
Description-Content-Type: text/markdown
|
|
24
24
|
License-File: LICENSE
|
|
25
25
|
License-File: NOTICE
|
|
26
|
-
Requires-Dist: aws-cdk-lib<3.0.0,>=2.
|
|
26
|
+
Requires-Dist: aws-cdk-lib<3.0.0,>=2.179.0
|
|
27
27
|
Requires-Dist: constructs<11.0.0,>=10.0.0
|
|
28
28
|
Requires-Dist: jsii<2.0.0,>=1.106.0
|
|
29
29
|
Requires-Dist: publication>=0.0.3
|
|
@@ -63,7 +63,7 @@ Here is the minimal example of defining an AWS EKS cluster
|
|
|
63
63
|
|
|
64
64
|
```python
|
|
65
65
|
cluster = eks.Cluster(self, "hello-eks",
|
|
66
|
-
version=eks.KubernetesVersion.
|
|
66
|
+
version=eks.KubernetesVersion.V1_32
|
|
67
67
|
)
|
|
68
68
|
```
|
|
69
69
|
|
|
@@ -103,7 +103,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
|
|
|
103
103
|
|
|
104
104
|
```python
|
|
105
105
|
eks.Cluster(self, "HelloEKS",
|
|
106
|
-
version=eks.KubernetesVersion.
|
|
106
|
+
version=eks.KubernetesVersion.V1_32
|
|
107
107
|
)
|
|
108
108
|
```
|
|
109
109
|
|
|
@@ -111,7 +111,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
|
|
|
111
111
|
|
|
112
112
|
```python
|
|
113
113
|
eks.FargateCluster(self, "HelloEKS",
|
|
114
|
-
version=eks.KubernetesVersion.
|
|
114
|
+
version=eks.KubernetesVersion.V1_32
|
|
115
115
|
)
|
|
116
116
|
```
|
|
117
117
|
|
|
@@ -120,13 +120,13 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
|
|
|
120
120
|
property is used.**
|
|
121
121
|
|
|
122
122
|
```python
|
|
123
|
-
from aws_cdk.
|
|
123
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
124
124
|
|
|
125
125
|
|
|
126
126
|
eks.Cluster(self, "hello-eks",
|
|
127
|
-
version=eks.KubernetesVersion.
|
|
127
|
+
version=eks.KubernetesVersion.V1_32,
|
|
128
128
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
129
|
-
kubectl_layer=
|
|
129
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
130
130
|
)
|
|
131
131
|
)
|
|
132
132
|
```
|
|
@@ -134,7 +134,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
134
134
|
### Managed node groups
|
|
135
135
|
|
|
136
136
|
Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters.
|
|
137
|
-
With Amazon EKS managed node groups, you don
|
|
137
|
+
With Amazon EKS managed node groups, you don't need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available.
|
|
138
138
|
|
|
139
139
|
> For more details visit [Amazon EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).
|
|
140
140
|
|
|
@@ -146,7 +146,7 @@ At cluster instantiation time, you can customize the number of instances and the
|
|
|
146
146
|
|
|
147
147
|
```python
|
|
148
148
|
eks.Cluster(self, "HelloEKS",
|
|
149
|
-
version=eks.KubernetesVersion.
|
|
149
|
+
version=eks.KubernetesVersion.V1_32,
|
|
150
150
|
default_capacity=5,
|
|
151
151
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
|
|
152
152
|
)
|
|
@@ -158,7 +158,7 @@ Additional customizations are available post instantiation. To apply them, set t
|
|
|
158
158
|
|
|
159
159
|
```python
|
|
160
160
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
161
|
-
version=eks.KubernetesVersion.
|
|
161
|
+
version=eks.KubernetesVersion.V1_32,
|
|
162
162
|
default_capacity=0
|
|
163
163
|
)
|
|
164
164
|
|
|
@@ -210,7 +210,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
|
|
|
210
210
|
|
|
211
211
|
```python
|
|
212
212
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
213
|
-
version=eks.KubernetesVersion.
|
|
213
|
+
version=eks.KubernetesVersion.V1_32
|
|
214
214
|
)
|
|
215
215
|
```
|
|
216
216
|
|
|
@@ -229,7 +229,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
|
|
|
229
229
|
|
|
230
230
|
```python
|
|
231
231
|
cluster = eks.Cluster(self, "hello-eks",
|
|
232
|
-
version=eks.KubernetesVersion.
|
|
232
|
+
version=eks.KubernetesVersion.V1_32,
|
|
233
233
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
234
234
|
)
|
|
235
235
|
```
|
|
@@ -251,7 +251,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
|
|
|
251
251
|
|
|
252
252
|
```python
|
|
253
253
|
eks.Cluster(self, "HelloEKS",
|
|
254
|
-
version=eks.KubernetesVersion.
|
|
254
|
+
version=eks.KubernetesVersion.V1_32,
|
|
255
255
|
alb_controller=eks.AlbControllerOptions(
|
|
256
256
|
version=eks.AlbControllerVersion.V2_8_2
|
|
257
257
|
)
|
|
@@ -293,7 +293,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
|
|
|
293
293
|
|
|
294
294
|
|
|
295
295
|
eks.Cluster(self, "HelloEKS",
|
|
296
|
-
version=eks.KubernetesVersion.
|
|
296
|
+
version=eks.KubernetesVersion.V1_32,
|
|
297
297
|
vpc=vpc,
|
|
298
298
|
vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
|
|
299
299
|
)
|
|
@@ -337,13 +337,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
|
|
|
337
337
|
`kubectlLayer` is the only required property in `kubectlProviderOptions`.
|
|
338
338
|
|
|
339
339
|
```python
|
|
340
|
-
from aws_cdk.
|
|
340
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
341
341
|
|
|
342
342
|
|
|
343
343
|
eks.Cluster(self, "hello-eks",
|
|
344
|
-
version=eks.KubernetesVersion.
|
|
344
|
+
version=eks.KubernetesVersion.V1_32,
|
|
345
345
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
346
|
-
kubectl_layer=
|
|
346
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
347
347
|
)
|
|
348
348
|
)
|
|
349
349
|
```
|
|
@@ -353,7 +353,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
353
353
|
If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
|
|
354
354
|
|
|
355
355
|
```python
|
|
356
|
-
from aws_cdk.
|
|
356
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
357
357
|
|
|
358
358
|
|
|
359
359
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
@@ -375,13 +375,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
|
|
|
375
375
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
376
376
|
|
|
377
377
|
```python
|
|
378
|
-
from aws_cdk.
|
|
378
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
379
379
|
|
|
380
380
|
|
|
381
381
|
cluster = eks.Cluster(self, "hello-eks",
|
|
382
|
-
version=eks.KubernetesVersion.
|
|
382
|
+
version=eks.KubernetesVersion.V1_32,
|
|
383
383
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
384
|
-
kubectl_layer=
|
|
384
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
385
385
|
environment={
|
|
386
386
|
"http_proxy": "http://proxy.myproxy.com"
|
|
387
387
|
}
|
|
@@ -402,13 +402,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
402
402
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
403
403
|
|
|
404
404
|
```python
|
|
405
|
-
from aws_cdk.
|
|
405
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
406
406
|
|
|
407
407
|
|
|
408
408
|
cluster = eks.Cluster(self, "hello-eks",
|
|
409
|
-
version=eks.KubernetesVersion.
|
|
409
|
+
version=eks.KubernetesVersion.V1_32,
|
|
410
410
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
411
|
-
kubectl_layer=
|
|
411
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
412
412
|
)
|
|
413
413
|
)
|
|
414
414
|
```
|
|
@@ -418,15 +418,15 @@ cluster = eks.Cluster(self, "hello-eks",
|
|
|
418
418
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
|
|
419
419
|
|
|
420
420
|
```python
|
|
421
|
-
from aws_cdk.
|
|
421
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
422
422
|
|
|
423
423
|
|
|
424
424
|
eks.Cluster(self, "MyCluster",
|
|
425
425
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
426
|
-
kubectl_layer=
|
|
426
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
427
427
|
memory=Size.gibibytes(4)
|
|
428
428
|
),
|
|
429
|
-
version=eks.KubernetesVersion.
|
|
429
|
+
version=eks.KubernetesVersion.V1_32
|
|
430
430
|
)
|
|
431
431
|
```
|
|
432
432
|
|
|
@@ -459,7 +459,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
|
|
|
459
459
|
# role: iam.Role
|
|
460
460
|
|
|
461
461
|
eks.Cluster(self, "HelloEKS",
|
|
462
|
-
version=eks.KubernetesVersion.
|
|
462
|
+
version=eks.KubernetesVersion.V1_32,
|
|
463
463
|
masters_role=role
|
|
464
464
|
)
|
|
465
465
|
```
|
|
@@ -480,7 +480,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
480
480
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
481
481
|
cluster = eks.Cluster(self, "MyCluster",
|
|
482
482
|
secrets_encryption_key=secrets_key,
|
|
483
|
-
version=eks.KubernetesVersion.
|
|
483
|
+
version=eks.KubernetesVersion.V1_32
|
|
484
484
|
)
|
|
485
485
|
```
|
|
486
486
|
|
|
@@ -490,7 +490,7 @@ You can also use a similar configuration for running a cluster built using the F
|
|
|
490
490
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
491
491
|
cluster = eks.FargateCluster(self, "MyFargateCluster",
|
|
492
492
|
secrets_encryption_key=secrets_key,
|
|
493
|
-
version=eks.KubernetesVersion.
|
|
493
|
+
version=eks.KubernetesVersion.V1_32
|
|
494
494
|
)
|
|
495
495
|
```
|
|
496
496
|
|
|
@@ -533,7 +533,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
|
|
|
533
533
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
534
534
|
|
|
535
535
|
```python
|
|
536
|
-
from aws_cdk.
|
|
536
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
537
537
|
# vpc: ec2.Vpc
|
|
538
538
|
|
|
539
539
|
|
|
@@ -548,9 +548,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
|
|
|
548
548
|
cluster = eks.Cluster(self, "Cluster",
|
|
549
549
|
vpc=vpc,
|
|
550
550
|
masters_role=cluster_admin_role,
|
|
551
|
-
version=eks.KubernetesVersion.
|
|
551
|
+
version=eks.KubernetesVersion.V1_32,
|
|
552
552
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
553
|
-
kubectl_layer=
|
|
553
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
554
554
|
memory=Size.gibibytes(4)
|
|
555
555
|
)
|
|
556
556
|
)
|
|
@@ -735,7 +735,7 @@ when a cluster is defined:
|
|
|
735
735
|
|
|
736
736
|
```python
|
|
737
737
|
eks.Cluster(self, "MyCluster",
|
|
738
|
-
version=eks.KubernetesVersion.
|
|
738
|
+
version=eks.KubernetesVersion.V1_32,
|
|
739
739
|
prune=False
|
|
740
740
|
)
|
|
741
741
|
```
|
|
@@ -1051,7 +1051,7 @@ property. For example:
|
|
|
1051
1051
|
```python
|
|
1052
1052
|
cluster = eks.Cluster(self, "Cluster",
|
|
1053
1053
|
# ...
|
|
1054
|
-
version=eks.KubernetesVersion.
|
|
1054
|
+
version=eks.KubernetesVersion.V1_32,
|
|
1055
1055
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
1056
1056
|
]
|
|
1057
1057
|
)
|
|
@@ -32,7 +32,7 @@ Here is the minimal example of defining an AWS EKS cluster
|
|
|
32
32
|
|
|
33
33
|
```python
|
|
34
34
|
cluster = eks.Cluster(self, "hello-eks",
|
|
35
|
-
version=eks.KubernetesVersion.
|
|
35
|
+
version=eks.KubernetesVersion.V1_32
|
|
36
36
|
)
|
|
37
37
|
```
|
|
38
38
|
|
|
@@ -72,7 +72,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
|
|
|
72
72
|
|
|
73
73
|
```python
|
|
74
74
|
eks.Cluster(self, "HelloEKS",
|
|
75
|
-
version=eks.KubernetesVersion.
|
|
75
|
+
version=eks.KubernetesVersion.V1_32
|
|
76
76
|
)
|
|
77
77
|
```
|
|
78
78
|
|
|
@@ -80,7 +80,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
|
|
|
80
80
|
|
|
81
81
|
```python
|
|
82
82
|
eks.FargateCluster(self, "HelloEKS",
|
|
83
|
-
version=eks.KubernetesVersion.
|
|
83
|
+
version=eks.KubernetesVersion.V1_32
|
|
84
84
|
)
|
|
85
85
|
```
|
|
86
86
|
|
|
@@ -89,13 +89,13 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
|
|
|
89
89
|
property is used.**
|
|
90
90
|
|
|
91
91
|
```python
|
|
92
|
-
from aws_cdk.
|
|
92
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
93
93
|
|
|
94
94
|
|
|
95
95
|
eks.Cluster(self, "hello-eks",
|
|
96
|
-
version=eks.KubernetesVersion.
|
|
96
|
+
version=eks.KubernetesVersion.V1_32,
|
|
97
97
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
98
|
-
kubectl_layer=
|
|
98
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
99
99
|
)
|
|
100
100
|
)
|
|
101
101
|
```
|
|
@@ -103,7 +103,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
103
103
|
### Managed node groups
|
|
104
104
|
|
|
105
105
|
Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters.
|
|
106
|
-
With Amazon EKS managed node groups, you don
|
|
106
|
+
With Amazon EKS managed node groups, you don't need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available.
|
|
107
107
|
|
|
108
108
|
> For more details visit [Amazon EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).
|
|
109
109
|
|
|
@@ -115,7 +115,7 @@ At cluster instantiation time, you can customize the number of instances and the
|
|
|
115
115
|
|
|
116
116
|
```python
|
|
117
117
|
eks.Cluster(self, "HelloEKS",
|
|
118
|
-
version=eks.KubernetesVersion.
|
|
118
|
+
version=eks.KubernetesVersion.V1_32,
|
|
119
119
|
default_capacity=5,
|
|
120
120
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
|
|
121
121
|
)
|
|
@@ -127,7 +127,7 @@ Additional customizations are available post instantiation. To apply them, set t
|
|
|
127
127
|
|
|
128
128
|
```python
|
|
129
129
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
130
|
-
version=eks.KubernetesVersion.
|
|
130
|
+
version=eks.KubernetesVersion.V1_32,
|
|
131
131
|
default_capacity=0
|
|
132
132
|
)
|
|
133
133
|
|
|
@@ -179,7 +179,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
|
|
|
179
179
|
|
|
180
180
|
```python
|
|
181
181
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
182
|
-
version=eks.KubernetesVersion.
|
|
182
|
+
version=eks.KubernetesVersion.V1_32
|
|
183
183
|
)
|
|
184
184
|
```
|
|
185
185
|
|
|
@@ -198,7 +198,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
|
|
|
198
198
|
|
|
199
199
|
```python
|
|
200
200
|
cluster = eks.Cluster(self, "hello-eks",
|
|
201
|
-
version=eks.KubernetesVersion.
|
|
201
|
+
version=eks.KubernetesVersion.V1_32,
|
|
202
202
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
203
203
|
)
|
|
204
204
|
```
|
|
@@ -220,7 +220,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
|
|
|
220
220
|
|
|
221
221
|
```python
|
|
222
222
|
eks.Cluster(self, "HelloEKS",
|
|
223
|
-
version=eks.KubernetesVersion.
|
|
223
|
+
version=eks.KubernetesVersion.V1_32,
|
|
224
224
|
alb_controller=eks.AlbControllerOptions(
|
|
225
225
|
version=eks.AlbControllerVersion.V2_8_2
|
|
226
226
|
)
|
|
@@ -262,7 +262,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
|
|
|
262
262
|
|
|
263
263
|
|
|
264
264
|
eks.Cluster(self, "HelloEKS",
|
|
265
|
-
version=eks.KubernetesVersion.
|
|
265
|
+
version=eks.KubernetesVersion.V1_32,
|
|
266
266
|
vpc=vpc,
|
|
267
267
|
vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
|
|
268
268
|
)
|
|
@@ -306,13 +306,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
|
|
|
306
306
|
`kubectlLayer` is the only required property in `kubectlProviderOptions`.
|
|
307
307
|
|
|
308
308
|
```python
|
|
309
|
-
from aws_cdk.
|
|
309
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
310
310
|
|
|
311
311
|
|
|
312
312
|
eks.Cluster(self, "hello-eks",
|
|
313
|
-
version=eks.KubernetesVersion.
|
|
313
|
+
version=eks.KubernetesVersion.V1_32,
|
|
314
314
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
315
|
-
kubectl_layer=
|
|
315
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
316
316
|
)
|
|
317
317
|
)
|
|
318
318
|
```
|
|
@@ -322,7 +322,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
322
322
|
If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
|
|
323
323
|
|
|
324
324
|
```python
|
|
325
|
-
from aws_cdk.
|
|
325
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
326
326
|
|
|
327
327
|
|
|
328
328
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
@@ -344,13 +344,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
|
|
|
344
344
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
345
345
|
|
|
346
346
|
```python
|
|
347
|
-
from aws_cdk.
|
|
347
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
348
348
|
|
|
349
349
|
|
|
350
350
|
cluster = eks.Cluster(self, "hello-eks",
|
|
351
|
-
version=eks.KubernetesVersion.
|
|
351
|
+
version=eks.KubernetesVersion.V1_32,
|
|
352
352
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
353
|
-
kubectl_layer=
|
|
353
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
354
354
|
environment={
|
|
355
355
|
"http_proxy": "http://proxy.myproxy.com"
|
|
356
356
|
}
|
|
@@ -371,13 +371,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
371
371
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
372
372
|
|
|
373
373
|
```python
|
|
374
|
-
from aws_cdk.
|
|
374
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
375
375
|
|
|
376
376
|
|
|
377
377
|
cluster = eks.Cluster(self, "hello-eks",
|
|
378
|
-
version=eks.KubernetesVersion.
|
|
378
|
+
version=eks.KubernetesVersion.V1_32,
|
|
379
379
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
380
|
-
kubectl_layer=
|
|
380
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
381
381
|
)
|
|
382
382
|
)
|
|
383
383
|
```
|
|
@@ -387,15 +387,15 @@ cluster = eks.Cluster(self, "hello-eks",
|
|
|
387
387
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
|
|
388
388
|
|
|
389
389
|
```python
|
|
390
|
-
from aws_cdk.
|
|
390
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
391
391
|
|
|
392
392
|
|
|
393
393
|
eks.Cluster(self, "MyCluster",
|
|
394
394
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
395
|
-
kubectl_layer=
|
|
395
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
396
396
|
memory=Size.gibibytes(4)
|
|
397
397
|
),
|
|
398
|
-
version=eks.KubernetesVersion.
|
|
398
|
+
version=eks.KubernetesVersion.V1_32
|
|
399
399
|
)
|
|
400
400
|
```
|
|
401
401
|
|
|
@@ -428,7 +428,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
|
|
|
428
428
|
# role: iam.Role
|
|
429
429
|
|
|
430
430
|
eks.Cluster(self, "HelloEKS",
|
|
431
|
-
version=eks.KubernetesVersion.
|
|
431
|
+
version=eks.KubernetesVersion.V1_32,
|
|
432
432
|
masters_role=role
|
|
433
433
|
)
|
|
434
434
|
```
|
|
@@ -449,7 +449,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
449
449
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
450
450
|
cluster = eks.Cluster(self, "MyCluster",
|
|
451
451
|
secrets_encryption_key=secrets_key,
|
|
452
|
-
version=eks.KubernetesVersion.
|
|
452
|
+
version=eks.KubernetesVersion.V1_32
|
|
453
453
|
)
|
|
454
454
|
```
|
|
455
455
|
|
|
@@ -459,7 +459,7 @@ You can also use a similar configuration for running a cluster built using the F
|
|
|
459
459
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
460
460
|
cluster = eks.FargateCluster(self, "MyFargateCluster",
|
|
461
461
|
secrets_encryption_key=secrets_key,
|
|
462
|
-
version=eks.KubernetesVersion.
|
|
462
|
+
version=eks.KubernetesVersion.V1_32
|
|
463
463
|
)
|
|
464
464
|
```
|
|
465
465
|
|
|
@@ -502,7 +502,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
|
|
|
502
502
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
503
503
|
|
|
504
504
|
```python
|
|
505
|
-
from aws_cdk.
|
|
505
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
506
506
|
# vpc: ec2.Vpc
|
|
507
507
|
|
|
508
508
|
|
|
@@ -517,9 +517,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
|
|
|
517
517
|
cluster = eks.Cluster(self, "Cluster",
|
|
518
518
|
vpc=vpc,
|
|
519
519
|
masters_role=cluster_admin_role,
|
|
520
|
-
version=eks.KubernetesVersion.
|
|
520
|
+
version=eks.KubernetesVersion.V1_32,
|
|
521
521
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
522
|
-
kubectl_layer=
|
|
522
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
523
523
|
memory=Size.gibibytes(4)
|
|
524
524
|
)
|
|
525
525
|
)
|
|
@@ -704,7 +704,7 @@ when a cluster is defined:
|
|
|
704
704
|
|
|
705
705
|
```python
|
|
706
706
|
eks.Cluster(self, "MyCluster",
|
|
707
|
-
version=eks.KubernetesVersion.
|
|
707
|
+
version=eks.KubernetesVersion.V1_32,
|
|
708
708
|
prune=False
|
|
709
709
|
)
|
|
710
710
|
```
|
|
@@ -1020,7 +1020,7 @@ property. For example:
|
|
|
1020
1020
|
```python
|
|
1021
1021
|
cluster = eks.Cluster(self, "Cluster",
|
|
1022
1022
|
# ...
|
|
1023
|
-
version=eks.KubernetesVersion.
|
|
1023
|
+
version=eks.KubernetesVersion.V1_32,
|
|
1024
1024
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
1025
1025
|
]
|
|
1026
1026
|
)
|
|
@@ -5,7 +5,7 @@ kwargs = json.loads(
|
|
|
5
5
|
"""
|
|
6
6
|
{
|
|
7
7
|
"name": "aws-cdk.aws-eks-v2-alpha",
|
|
8
|
-
"version": "2.
|
|
8
|
+
"version": "2.179.0.a0",
|
|
9
9
|
"description": "The CDK Construct Library for AWS::EKS",
|
|
10
10
|
"license": "Apache-2.0",
|
|
11
11
|
"url": "https://github.com/aws/aws-cdk",
|
|
@@ -26,7 +26,7 @@ kwargs = json.loads(
|
|
|
26
26
|
],
|
|
27
27
|
"package_data": {
|
|
28
28
|
"aws_cdk.aws_eks_v2_alpha._jsii": [
|
|
29
|
-
"aws-eks-v2-alpha@2.
|
|
29
|
+
"aws-eks-v2-alpha@2.179.0-alpha.0.jsii.tgz"
|
|
30
30
|
],
|
|
31
31
|
"aws_cdk.aws_eks_v2_alpha": [
|
|
32
32
|
"py.typed"
|
|
@@ -34,7 +34,7 @@ kwargs = json.loads(
|
|
|
34
34
|
},
|
|
35
35
|
"python_requires": "~=3.8",
|
|
36
36
|
"install_requires": [
|
|
37
|
-
"aws-cdk-lib>=2.
|
|
37
|
+
"aws-cdk-lib>=2.179.0, <3.0.0",
|
|
38
38
|
"constructs>=10.0.0, <11.0.0",
|
|
39
39
|
"jsii>=1.106.0, <2.0.0",
|
|
40
40
|
"publication>=0.0.3",
|
|
@@ -33,7 +33,7 @@ Here is the minimal example of defining an AWS EKS cluster
|
|
|
33
33
|
|
|
34
34
|
```python
|
|
35
35
|
cluster = eks.Cluster(self, "hello-eks",
|
|
36
|
-
version=eks.KubernetesVersion.
|
|
36
|
+
version=eks.KubernetesVersion.V1_32
|
|
37
37
|
)
|
|
38
38
|
```
|
|
39
39
|
|
|
@@ -73,7 +73,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
|
|
|
73
73
|
|
|
74
74
|
```python
|
|
75
75
|
eks.Cluster(self, "HelloEKS",
|
|
76
|
-
version=eks.KubernetesVersion.
|
|
76
|
+
version=eks.KubernetesVersion.V1_32
|
|
77
77
|
)
|
|
78
78
|
```
|
|
79
79
|
|
|
@@ -81,7 +81,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
|
|
|
81
81
|
|
|
82
82
|
```python
|
|
83
83
|
eks.FargateCluster(self, "HelloEKS",
|
|
84
|
-
version=eks.KubernetesVersion.
|
|
84
|
+
version=eks.KubernetesVersion.V1_32
|
|
85
85
|
)
|
|
86
86
|
```
|
|
87
87
|
|
|
@@ -90,13 +90,13 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
|
|
|
90
90
|
property is used.**
|
|
91
91
|
|
|
92
92
|
```python
|
|
93
|
-
from aws_cdk.
|
|
93
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
94
94
|
|
|
95
95
|
|
|
96
96
|
eks.Cluster(self, "hello-eks",
|
|
97
|
-
version=eks.KubernetesVersion.
|
|
97
|
+
version=eks.KubernetesVersion.V1_32,
|
|
98
98
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
99
|
-
kubectl_layer=
|
|
99
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
100
100
|
)
|
|
101
101
|
)
|
|
102
102
|
```
|
|
@@ -104,7 +104,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
104
104
|
### Managed node groups
|
|
105
105
|
|
|
106
106
|
Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters.
|
|
107
|
-
With Amazon EKS managed node groups, you don
|
|
107
|
+
With Amazon EKS managed node groups, you don't need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available.
|
|
108
108
|
|
|
109
109
|
> For more details visit [Amazon EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).
|
|
110
110
|
|
|
@@ -116,7 +116,7 @@ At cluster instantiation time, you can customize the number of instances and the
|
|
|
116
116
|
|
|
117
117
|
```python
|
|
118
118
|
eks.Cluster(self, "HelloEKS",
|
|
119
|
-
version=eks.KubernetesVersion.
|
|
119
|
+
version=eks.KubernetesVersion.V1_32,
|
|
120
120
|
default_capacity=5,
|
|
121
121
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
|
|
122
122
|
)
|
|
@@ -128,7 +128,7 @@ Additional customizations are available post instantiation. To apply them, set t
|
|
|
128
128
|
|
|
129
129
|
```python
|
|
130
130
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
131
|
-
version=eks.KubernetesVersion.
|
|
131
|
+
version=eks.KubernetesVersion.V1_32,
|
|
132
132
|
default_capacity=0
|
|
133
133
|
)
|
|
134
134
|
|
|
@@ -180,7 +180,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
|
|
|
180
180
|
|
|
181
181
|
```python
|
|
182
182
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
183
|
-
version=eks.KubernetesVersion.
|
|
183
|
+
version=eks.KubernetesVersion.V1_32
|
|
184
184
|
)
|
|
185
185
|
```
|
|
186
186
|
|
|
@@ -199,7 +199,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
|
|
|
199
199
|
|
|
200
200
|
```python
|
|
201
201
|
cluster = eks.Cluster(self, "hello-eks",
|
|
202
|
-
version=eks.KubernetesVersion.
|
|
202
|
+
version=eks.KubernetesVersion.V1_32,
|
|
203
203
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
204
204
|
)
|
|
205
205
|
```
|
|
@@ -221,7 +221,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
|
|
|
221
221
|
|
|
222
222
|
```python
|
|
223
223
|
eks.Cluster(self, "HelloEKS",
|
|
224
|
-
version=eks.KubernetesVersion.
|
|
224
|
+
version=eks.KubernetesVersion.V1_32,
|
|
225
225
|
alb_controller=eks.AlbControllerOptions(
|
|
226
226
|
version=eks.AlbControllerVersion.V2_8_2
|
|
227
227
|
)
|
|
@@ -263,7 +263,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
|
|
|
263
263
|
|
|
264
264
|
|
|
265
265
|
eks.Cluster(self, "HelloEKS",
|
|
266
|
-
version=eks.KubernetesVersion.
|
|
266
|
+
version=eks.KubernetesVersion.V1_32,
|
|
267
267
|
vpc=vpc,
|
|
268
268
|
vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
|
|
269
269
|
)
|
|
@@ -307,13 +307,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
|
|
|
307
307
|
`kubectlLayer` is the only required property in `kubectlProviderOptions`.
|
|
308
308
|
|
|
309
309
|
```python
|
|
310
|
-
from aws_cdk.
|
|
310
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
311
311
|
|
|
312
312
|
|
|
313
313
|
eks.Cluster(self, "hello-eks",
|
|
314
|
-
version=eks.KubernetesVersion.
|
|
314
|
+
version=eks.KubernetesVersion.V1_32,
|
|
315
315
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
316
|
-
kubectl_layer=
|
|
316
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
317
317
|
)
|
|
318
318
|
)
|
|
319
319
|
```
|
|
@@ -323,7 +323,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
323
323
|
If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
|
|
324
324
|
|
|
325
325
|
```python
|
|
326
|
-
from aws_cdk.
|
|
326
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
327
327
|
|
|
328
328
|
|
|
329
329
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
@@ -345,13 +345,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
|
|
|
345
345
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
346
346
|
|
|
347
347
|
```python
|
|
348
|
-
from aws_cdk.
|
|
348
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
349
349
|
|
|
350
350
|
|
|
351
351
|
cluster = eks.Cluster(self, "hello-eks",
|
|
352
|
-
version=eks.KubernetesVersion.
|
|
352
|
+
version=eks.KubernetesVersion.V1_32,
|
|
353
353
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
354
|
-
kubectl_layer=
|
|
354
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
355
355
|
environment={
|
|
356
356
|
"http_proxy": "http://proxy.myproxy.com"
|
|
357
357
|
}
|
|
@@ -372,13 +372,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
372
372
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
373
373
|
|
|
374
374
|
```python
|
|
375
|
-
from aws_cdk.
|
|
375
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
376
376
|
|
|
377
377
|
|
|
378
378
|
cluster = eks.Cluster(self, "hello-eks",
|
|
379
|
-
version=eks.KubernetesVersion.
|
|
379
|
+
version=eks.KubernetesVersion.V1_32,
|
|
380
380
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
381
|
-
kubectl_layer=
|
|
381
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
382
382
|
)
|
|
383
383
|
)
|
|
384
384
|
```
|
|
@@ -388,15 +388,15 @@ cluster = eks.Cluster(self, "hello-eks",
|
|
|
388
388
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
|
|
389
389
|
|
|
390
390
|
```python
|
|
391
|
-
from aws_cdk.
|
|
391
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
392
392
|
|
|
393
393
|
|
|
394
394
|
eks.Cluster(self, "MyCluster",
|
|
395
395
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
396
|
-
kubectl_layer=
|
|
396
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
397
397
|
memory=Size.gibibytes(4)
|
|
398
398
|
),
|
|
399
|
-
version=eks.KubernetesVersion.
|
|
399
|
+
version=eks.KubernetesVersion.V1_32
|
|
400
400
|
)
|
|
401
401
|
```
|
|
402
402
|
|
|
@@ -429,7 +429,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
|
|
|
429
429
|
# role: iam.Role
|
|
430
430
|
|
|
431
431
|
eks.Cluster(self, "HelloEKS",
|
|
432
|
-
version=eks.KubernetesVersion.
|
|
432
|
+
version=eks.KubernetesVersion.V1_32,
|
|
433
433
|
masters_role=role
|
|
434
434
|
)
|
|
435
435
|
```
|
|
@@ -450,7 +450,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
450
450
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
451
451
|
cluster = eks.Cluster(self, "MyCluster",
|
|
452
452
|
secrets_encryption_key=secrets_key,
|
|
453
|
-
version=eks.KubernetesVersion.
|
|
453
|
+
version=eks.KubernetesVersion.V1_32
|
|
454
454
|
)
|
|
455
455
|
```
|
|
456
456
|
|
|
@@ -460,7 +460,7 @@ You can also use a similar configuration for running a cluster built using the F
|
|
|
460
460
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
461
461
|
cluster = eks.FargateCluster(self, "MyFargateCluster",
|
|
462
462
|
secrets_encryption_key=secrets_key,
|
|
463
|
-
version=eks.KubernetesVersion.
|
|
463
|
+
version=eks.KubernetesVersion.V1_32
|
|
464
464
|
)
|
|
465
465
|
```
|
|
466
466
|
|
|
@@ -503,7 +503,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
|
|
|
503
503
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
504
504
|
|
|
505
505
|
```python
|
|
506
|
-
from aws_cdk.
|
|
506
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
507
507
|
# vpc: ec2.Vpc
|
|
508
508
|
|
|
509
509
|
|
|
@@ -518,9 +518,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
|
|
|
518
518
|
cluster = eks.Cluster(self, "Cluster",
|
|
519
519
|
vpc=vpc,
|
|
520
520
|
masters_role=cluster_admin_role,
|
|
521
|
-
version=eks.KubernetesVersion.
|
|
521
|
+
version=eks.KubernetesVersion.V1_32,
|
|
522
522
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
523
|
-
kubectl_layer=
|
|
523
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
524
524
|
memory=Size.gibibytes(4)
|
|
525
525
|
)
|
|
526
526
|
)
|
|
@@ -705,7 +705,7 @@ when a cluster is defined:
|
|
|
705
705
|
|
|
706
706
|
```python
|
|
707
707
|
eks.Cluster(self, "MyCluster",
|
|
708
|
-
version=eks.KubernetesVersion.
|
|
708
|
+
version=eks.KubernetesVersion.V1_32,
|
|
709
709
|
prune=False
|
|
710
710
|
)
|
|
711
711
|
```
|
|
@@ -1021,7 +1021,7 @@ property. For example:
|
|
|
1021
1021
|
```python
|
|
1022
1022
|
cluster = eks.Cluster(self, "Cluster",
|
|
1023
1023
|
# ...
|
|
1024
|
-
version=eks.KubernetesVersion.
|
|
1024
|
+
version=eks.KubernetesVersion.V1_32,
|
|
1025
1025
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
1026
1026
|
]
|
|
1027
1027
|
)
|
|
@@ -2025,7 +2025,7 @@ class AlbControllerOptions:
|
|
|
2025
2025
|
Example::
|
|
2026
2026
|
|
|
2027
2027
|
eks.Cluster(self, "HelloEKS",
|
|
2028
|
-
version=eks.KubernetesVersion.
|
|
2028
|
+
version=eks.KubernetesVersion.V1_32,
|
|
2029
2029
|
alb_controller=eks.AlbControllerOptions(
|
|
2030
2030
|
version=eks.AlbControllerVersion.V2_8_2
|
|
2031
2031
|
)
|
|
@@ -2237,7 +2237,7 @@ class AlbControllerVersion(
|
|
|
2237
2237
|
Example::
|
|
2238
2238
|
|
|
2239
2239
|
eks.Cluster(self, "HelloEKS",
|
|
2240
|
-
version=eks.KubernetesVersion.
|
|
2240
|
+
version=eks.KubernetesVersion.V1_32,
|
|
2241
2241
|
alb_controller=eks.AlbControllerOptions(
|
|
2242
2242
|
version=eks.AlbControllerVersion.V2_8_2
|
|
2243
2243
|
)
|
|
@@ -3657,7 +3657,7 @@ class ClusterAttributes:
|
|
|
3657
3657
|
|
|
3658
3658
|
Example::
|
|
3659
3659
|
|
|
3660
|
-
from aws_cdk.
|
|
3660
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
3661
3661
|
|
|
3662
3662
|
|
|
3663
3663
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
@@ -4317,7 +4317,7 @@ class ClusterLoggingTypes(enum.Enum):
|
|
|
4317
4317
|
|
|
4318
4318
|
cluster = eks.Cluster(self, "Cluster",
|
|
4319
4319
|
# ...
|
|
4320
|
-
version=eks.KubernetesVersion.
|
|
4320
|
+
version=eks.KubernetesVersion.V1_32,
|
|
4321
4321
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
4322
4322
|
]
|
|
4323
4323
|
)
|
|
@@ -4433,7 +4433,7 @@ class ClusterProps(ClusterCommonOptions):
|
|
|
4433
4433
|
Example::
|
|
4434
4434
|
|
|
4435
4435
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
4436
|
-
version=eks.KubernetesVersion.
|
|
4436
|
+
version=eks.KubernetesVersion.V1_32,
|
|
4437
4437
|
default_capacity=0
|
|
4438
4438
|
)
|
|
4439
4439
|
|
|
@@ -5025,7 +5025,7 @@ class EndpointAccess(
|
|
|
5025
5025
|
Example::
|
|
5026
5026
|
|
|
5027
5027
|
cluster = eks.Cluster(self, "hello-eks",
|
|
5028
|
-
version=eks.KubernetesVersion.
|
|
5028
|
+
version=eks.KubernetesVersion.V1_32,
|
|
5029
5029
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
5030
5030
|
)
|
|
5031
5031
|
'''
|
|
@@ -5163,7 +5163,7 @@ class FargateClusterProps(ClusterCommonOptions):
|
|
|
5163
5163
|
Example::
|
|
5164
5164
|
|
|
5165
5165
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
5166
|
-
version=eks.KubernetesVersion.
|
|
5166
|
+
version=eks.KubernetesVersion.V1_32
|
|
5167
5167
|
)
|
|
5168
5168
|
'''
|
|
5169
5169
|
if isinstance(alb_controller, dict):
|
|
@@ -7636,7 +7636,7 @@ class KubectlProvider(
|
|
|
7636
7636
|
|
|
7637
7637
|
Example::
|
|
7638
7638
|
|
|
7639
|
-
from aws_cdk.
|
|
7639
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
7640
7640
|
|
|
7641
7641
|
|
|
7642
7642
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
@@ -7786,7 +7786,7 @@ class KubectlProviderAttributes:
|
|
|
7786
7786
|
|
|
7787
7787
|
Example::
|
|
7788
7788
|
|
|
7789
|
-
from aws_cdk.
|
|
7789
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
7790
7790
|
|
|
7791
7791
|
|
|
7792
7792
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
@@ -7886,13 +7886,13 @@ class KubectlProviderOptions:
|
|
|
7886
7886
|
|
|
7887
7887
|
Example::
|
|
7888
7888
|
|
|
7889
|
-
from aws_cdk.
|
|
7889
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
7890
7890
|
|
|
7891
7891
|
|
|
7892
7892
|
cluster = eks.Cluster(self, "hello-eks",
|
|
7893
|
-
version=eks.KubernetesVersion.
|
|
7893
|
+
version=eks.KubernetesVersion.V1_32,
|
|
7894
7894
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
7895
|
-
kubectl_layer=
|
|
7895
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
7896
7896
|
environment={
|
|
7897
7897
|
"http_proxy": "http://proxy.myproxy.com"
|
|
7898
7898
|
}
|
|
@@ -9168,7 +9168,7 @@ class KubernetesVersion(
|
|
|
9168
9168
|
Example::
|
|
9169
9169
|
|
|
9170
9170
|
cluster = eks.Cluster(self, "hello-eks",
|
|
9171
|
-
version=eks.KubernetesVersion.
|
|
9171
|
+
version=eks.KubernetesVersion.V1_32,
|
|
9172
9172
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
9173
9173
|
)
|
|
9174
9174
|
'''
|
|
@@ -9278,6 +9278,19 @@ class KubernetesVersion(
|
|
|
9278
9278
|
'''
|
|
9279
9279
|
return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_31"))
|
|
9280
9280
|
|
|
9281
|
+
@jsii.python.classproperty
|
|
9282
|
+
@jsii.member(jsii_name="V1_32")
|
|
9283
|
+
def V1_32(cls) -> "KubernetesVersion":
|
|
9284
|
+
'''(experimental) Kubernetes version 1.32.
|
|
9285
|
+
|
|
9286
|
+
When creating a ``Cluster`` with this version, you need to also specify the
|
|
9287
|
+
``kubectlLayer`` property with a ``KubectlV32Layer`` from
|
|
9288
|
+
``@aws-cdk/lambda-layer-kubectl-v32``.
|
|
9289
|
+
|
|
9290
|
+
:stability: experimental
|
|
9291
|
+
'''
|
|
9292
|
+
return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_32"))
|
|
9293
|
+
|
|
9281
9294
|
@builtins.property
|
|
9282
9295
|
@jsii.member(jsii_name="version")
|
|
9283
9296
|
def version(self) -> builtins.str:
|
|
@@ -9808,7 +9821,7 @@ class NodegroupOptions:
|
|
|
9808
9821
|
Example::
|
|
9809
9822
|
|
|
9810
9823
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
9811
|
-
version=eks.KubernetesVersion.
|
|
9824
|
+
version=eks.KubernetesVersion.V1_32,
|
|
9812
9825
|
default_capacity=0
|
|
9813
9826
|
)
|
|
9814
9827
|
|
|
@@ -12037,7 +12050,7 @@ class Cluster(
|
|
|
12037
12050
|
Example::
|
|
12038
12051
|
|
|
12039
12052
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
12040
|
-
version=eks.KubernetesVersion.
|
|
12053
|
+
version=eks.KubernetesVersion.V1_32,
|
|
12041
12054
|
default_capacity=0
|
|
12042
12055
|
)
|
|
12043
12056
|
|
|
@@ -12931,7 +12944,7 @@ class FargateCluster(
|
|
|
12931
12944
|
Example::
|
|
12932
12945
|
|
|
12933
12946
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
12934
|
-
version=eks.KubernetesVersion.
|
|
12947
|
+
version=eks.KubernetesVersion.V1_32
|
|
12935
12948
|
)
|
|
12936
12949
|
'''
|
|
12937
12950
|
|
|
@@ -33,9 +33,9 @@ import constructs._jsii
|
|
|
33
33
|
|
|
34
34
|
__jsii_assembly__ = jsii.JSIIAssembly.load(
|
|
35
35
|
"@aws-cdk/aws-eks-v2-alpha",
|
|
36
|
-
"2.
|
|
36
|
+
"2.179.0-alpha.0",
|
|
37
37
|
__name__[0:-6],
|
|
38
|
-
"aws-eks-v2-alpha@2.
|
|
38
|
+
"aws-eks-v2-alpha@2.179.0-alpha.0.jsii.tgz",
|
|
39
39
|
)
|
|
40
40
|
|
|
41
41
|
__all__ = [
|
|
Binary file
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: aws-cdk.aws-eks-v2-alpha
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.179.0a0
|
|
4
4
|
Summary: The CDK Construct Library for AWS::EKS
|
|
5
5
|
Home-page: https://github.com/aws/aws-cdk
|
|
6
6
|
Author: Amazon Web Services
|
|
@@ -23,7 +23,7 @@ Requires-Python: ~=3.8
|
|
|
23
23
|
Description-Content-Type: text/markdown
|
|
24
24
|
License-File: LICENSE
|
|
25
25
|
License-File: NOTICE
|
|
26
|
-
Requires-Dist: aws-cdk-lib<3.0.0,>=2.
|
|
26
|
+
Requires-Dist: aws-cdk-lib<3.0.0,>=2.179.0
|
|
27
27
|
Requires-Dist: constructs<11.0.0,>=10.0.0
|
|
28
28
|
Requires-Dist: jsii<2.0.0,>=1.106.0
|
|
29
29
|
Requires-Dist: publication>=0.0.3
|
|
@@ -63,7 +63,7 @@ Here is the minimal example of defining an AWS EKS cluster
|
|
|
63
63
|
|
|
64
64
|
```python
|
|
65
65
|
cluster = eks.Cluster(self, "hello-eks",
|
|
66
|
-
version=eks.KubernetesVersion.
|
|
66
|
+
version=eks.KubernetesVersion.V1_32
|
|
67
67
|
)
|
|
68
68
|
```
|
|
69
69
|
|
|
@@ -103,7 +103,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
|
|
|
103
103
|
|
|
104
104
|
```python
|
|
105
105
|
eks.Cluster(self, "HelloEKS",
|
|
106
|
-
version=eks.KubernetesVersion.
|
|
106
|
+
version=eks.KubernetesVersion.V1_32
|
|
107
107
|
)
|
|
108
108
|
```
|
|
109
109
|
|
|
@@ -111,7 +111,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
|
|
|
111
111
|
|
|
112
112
|
```python
|
|
113
113
|
eks.FargateCluster(self, "HelloEKS",
|
|
114
|
-
version=eks.KubernetesVersion.
|
|
114
|
+
version=eks.KubernetesVersion.V1_32
|
|
115
115
|
)
|
|
116
116
|
```
|
|
117
117
|
|
|
@@ -120,13 +120,13 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
|
|
|
120
120
|
property is used.**
|
|
121
121
|
|
|
122
122
|
```python
|
|
123
|
-
from aws_cdk.
|
|
123
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
124
124
|
|
|
125
125
|
|
|
126
126
|
eks.Cluster(self, "hello-eks",
|
|
127
|
-
version=eks.KubernetesVersion.
|
|
127
|
+
version=eks.KubernetesVersion.V1_32,
|
|
128
128
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
129
|
-
kubectl_layer=
|
|
129
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
130
130
|
)
|
|
131
131
|
)
|
|
132
132
|
```
|
|
@@ -134,7 +134,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
134
134
|
### Managed node groups
|
|
135
135
|
|
|
136
136
|
Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters.
|
|
137
|
-
With Amazon EKS managed node groups, you don
|
|
137
|
+
With Amazon EKS managed node groups, you don't need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available.
|
|
138
138
|
|
|
139
139
|
> For more details visit [Amazon EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).
|
|
140
140
|
|
|
@@ -146,7 +146,7 @@ At cluster instantiation time, you can customize the number of instances and the
|
|
|
146
146
|
|
|
147
147
|
```python
|
|
148
148
|
eks.Cluster(self, "HelloEKS",
|
|
149
|
-
version=eks.KubernetesVersion.
|
|
149
|
+
version=eks.KubernetesVersion.V1_32,
|
|
150
150
|
default_capacity=5,
|
|
151
151
|
default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
|
|
152
152
|
)
|
|
@@ -158,7 +158,7 @@ Additional customizations are available post instantiation. To apply them, set t
|
|
|
158
158
|
|
|
159
159
|
```python
|
|
160
160
|
cluster = eks.Cluster(self, "HelloEKS",
|
|
161
|
-
version=eks.KubernetesVersion.
|
|
161
|
+
version=eks.KubernetesVersion.V1_32,
|
|
162
162
|
default_capacity=0
|
|
163
163
|
)
|
|
164
164
|
|
|
@@ -210,7 +210,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
|
|
|
210
210
|
|
|
211
211
|
```python
|
|
212
212
|
cluster = eks.FargateCluster(self, "MyCluster",
|
|
213
|
-
version=eks.KubernetesVersion.
|
|
213
|
+
version=eks.KubernetesVersion.V1_32
|
|
214
214
|
)
|
|
215
215
|
```
|
|
216
216
|
|
|
@@ -229,7 +229,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
|
|
|
229
229
|
|
|
230
230
|
```python
|
|
231
231
|
cluster = eks.Cluster(self, "hello-eks",
|
|
232
|
-
version=eks.KubernetesVersion.
|
|
232
|
+
version=eks.KubernetesVersion.V1_32,
|
|
233
233
|
endpoint_access=eks.EndpointAccess.PRIVATE
|
|
234
234
|
)
|
|
235
235
|
```
|
|
@@ -251,7 +251,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
|
|
|
251
251
|
|
|
252
252
|
```python
|
|
253
253
|
eks.Cluster(self, "HelloEKS",
|
|
254
|
-
version=eks.KubernetesVersion.
|
|
254
|
+
version=eks.KubernetesVersion.V1_32,
|
|
255
255
|
alb_controller=eks.AlbControllerOptions(
|
|
256
256
|
version=eks.AlbControllerVersion.V2_8_2
|
|
257
257
|
)
|
|
@@ -293,7 +293,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
|
|
|
293
293
|
|
|
294
294
|
|
|
295
295
|
eks.Cluster(self, "HelloEKS",
|
|
296
|
-
version=eks.KubernetesVersion.
|
|
296
|
+
version=eks.KubernetesVersion.V1_32,
|
|
297
297
|
vpc=vpc,
|
|
298
298
|
vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
|
|
299
299
|
)
|
|
@@ -337,13 +337,13 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
|
|
|
337
337
|
`kubectlLayer` is the only required property in `kubectlProviderOptions`.
|
|
338
338
|
|
|
339
339
|
```python
|
|
340
|
-
from aws_cdk.
|
|
340
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
341
341
|
|
|
342
342
|
|
|
343
343
|
eks.Cluster(self, "hello-eks",
|
|
344
|
-
version=eks.KubernetesVersion.
|
|
344
|
+
version=eks.KubernetesVersion.V1_32,
|
|
345
345
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
346
|
-
kubectl_layer=
|
|
346
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
347
347
|
)
|
|
348
348
|
)
|
|
349
349
|
```
|
|
@@ -353,7 +353,7 @@ eks.Cluster(self, "hello-eks",
|
|
|
353
353
|
If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
|
|
354
354
|
|
|
355
355
|
```python
|
|
356
|
-
from aws_cdk.
|
|
356
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
357
357
|
|
|
358
358
|
|
|
359
359
|
handler_role = iam.Role.from_role_arn(self, "HandlerRole", "arn:aws:iam::123456789012:role/lambda-role")
|
|
@@ -375,13 +375,13 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
|
|
|
375
375
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
376
376
|
|
|
377
377
|
```python
|
|
378
|
-
from aws_cdk.
|
|
378
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
379
379
|
|
|
380
380
|
|
|
381
381
|
cluster = eks.Cluster(self, "hello-eks",
|
|
382
|
-
version=eks.KubernetesVersion.
|
|
382
|
+
version=eks.KubernetesVersion.V1_32,
|
|
383
383
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
384
|
-
kubectl_layer=
|
|
384
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
385
385
|
environment={
|
|
386
386
|
"http_proxy": "http://proxy.myproxy.com"
|
|
387
387
|
}
|
|
@@ -402,13 +402,13 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
402
402
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
403
403
|
|
|
404
404
|
```python
|
|
405
|
-
from aws_cdk.
|
|
405
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
406
406
|
|
|
407
407
|
|
|
408
408
|
cluster = eks.Cluster(self, "hello-eks",
|
|
409
|
-
version=eks.KubernetesVersion.
|
|
409
|
+
version=eks.KubernetesVersion.V1_32,
|
|
410
410
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
411
|
-
kubectl_layer=
|
|
411
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl")
|
|
412
412
|
)
|
|
413
413
|
)
|
|
414
414
|
```
|
|
@@ -418,15 +418,15 @@ cluster = eks.Cluster(self, "hello-eks",
|
|
|
418
418
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
|
|
419
419
|
|
|
420
420
|
```python
|
|
421
|
-
from aws_cdk.
|
|
421
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
422
422
|
|
|
423
423
|
|
|
424
424
|
eks.Cluster(self, "MyCluster",
|
|
425
425
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
426
|
-
kubectl_layer=
|
|
426
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
427
427
|
memory=Size.gibibytes(4)
|
|
428
428
|
),
|
|
429
|
-
version=eks.KubernetesVersion.
|
|
429
|
+
version=eks.KubernetesVersion.V1_32
|
|
430
430
|
)
|
|
431
431
|
```
|
|
432
432
|
|
|
@@ -459,7 +459,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
|
|
|
459
459
|
# role: iam.Role
|
|
460
460
|
|
|
461
461
|
eks.Cluster(self, "HelloEKS",
|
|
462
|
-
version=eks.KubernetesVersion.
|
|
462
|
+
version=eks.KubernetesVersion.V1_32,
|
|
463
463
|
masters_role=role
|
|
464
464
|
)
|
|
465
465
|
```
|
|
@@ -480,7 +480,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
480
480
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
481
481
|
cluster = eks.Cluster(self, "MyCluster",
|
|
482
482
|
secrets_encryption_key=secrets_key,
|
|
483
|
-
version=eks.KubernetesVersion.
|
|
483
|
+
version=eks.KubernetesVersion.V1_32
|
|
484
484
|
)
|
|
485
485
|
```
|
|
486
486
|
|
|
@@ -490,7 +490,7 @@ You can also use a similar configuration for running a cluster built using the F
|
|
|
490
490
|
secrets_key = kms.Key(self, "SecretsKey")
|
|
491
491
|
cluster = eks.FargateCluster(self, "MyFargateCluster",
|
|
492
492
|
secrets_encryption_key=secrets_key,
|
|
493
|
-
version=eks.KubernetesVersion.
|
|
493
|
+
version=eks.KubernetesVersion.V1_32
|
|
494
494
|
)
|
|
495
495
|
```
|
|
496
496
|
|
|
@@ -533,7 +533,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
|
|
|
533
533
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
534
534
|
|
|
535
535
|
```python
|
|
536
|
-
from aws_cdk.
|
|
536
|
+
from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
|
|
537
537
|
# vpc: ec2.Vpc
|
|
538
538
|
|
|
539
539
|
|
|
@@ -548,9 +548,9 @@ eks_admin_role = iam.Role(self, "EKSAdminRole",
|
|
|
548
548
|
cluster = eks.Cluster(self, "Cluster",
|
|
549
549
|
vpc=vpc,
|
|
550
550
|
masters_role=cluster_admin_role,
|
|
551
|
-
version=eks.KubernetesVersion.
|
|
551
|
+
version=eks.KubernetesVersion.V1_32,
|
|
552
552
|
kubectl_provider_options=eks.KubectlProviderOptions(
|
|
553
|
-
kubectl_layer=
|
|
553
|
+
kubectl_layer=KubectlV32Layer(self, "kubectl"),
|
|
554
554
|
memory=Size.gibibytes(4)
|
|
555
555
|
)
|
|
556
556
|
)
|
|
@@ -735,7 +735,7 @@ when a cluster is defined:
|
|
|
735
735
|
|
|
736
736
|
```python
|
|
737
737
|
eks.Cluster(self, "MyCluster",
|
|
738
|
-
version=eks.KubernetesVersion.
|
|
738
|
+
version=eks.KubernetesVersion.V1_32,
|
|
739
739
|
prune=False
|
|
740
740
|
)
|
|
741
741
|
```
|
|
@@ -1051,7 +1051,7 @@ property. For example:
|
|
|
1051
1051
|
```python
|
|
1052
1052
|
cluster = eks.Cluster(self, "Cluster",
|
|
1053
1053
|
# ...
|
|
1054
|
-
version=eks.KubernetesVersion.
|
|
1054
|
+
version=eks.KubernetesVersion.V1_32,
|
|
1055
1055
|
cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
|
|
1056
1056
|
]
|
|
1057
1057
|
)
|
|
@@ -12,4 +12,4 @@ src/aws_cdk.aws_eks_v2_alpha.egg-info/top_level.txt
|
|
|
12
12
|
src/aws_cdk/aws_eks_v2_alpha/__init__.py
|
|
13
13
|
src/aws_cdk/aws_eks_v2_alpha/py.typed
|
|
14
14
|
src/aws_cdk/aws_eks_v2_alpha/_jsii/__init__.py
|
|
15
|
-
src/aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.
|
|
15
|
+
src/aws_cdk/aws_eks_v2_alpha/_jsii/aws-eks-v2-alpha@2.179.0-alpha.0.jsii.tgz
|
|
Binary file
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|