aws-cdk-lib 2.201.0__py3-none-any.whl → 2.203.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

Files changed (69) hide show
  1. aws_cdk/__init__.py +70 -71
  2. aws_cdk/_jsii/__init__.py +1 -1
  3. aws_cdk/_jsii/{aws-cdk-lib@2.201.0.jsii.tgz → aws-cdk-lib@2.203.0.jsii.tgz} +0 -0
  4. aws_cdk/aws_accessanalyzer/__init__.py +310 -4
  5. aws_cdk/aws_aiops/__init__.py +964 -0
  6. aws_cdk/aws_amplify/__init__.py +127 -0
  7. aws_cdk/aws_arczonalshift/__init__.py +8 -8
  8. aws_cdk/aws_athena/__init__.py +12 -11
  9. aws_cdk/aws_b2bi/__init__.py +782 -3
  10. aws_cdk/aws_backup/__init__.py +22 -0
  11. aws_cdk/aws_batch/__init__.py +53 -1
  12. aws_cdk/aws_bedrock/__init__.py +123 -9
  13. aws_cdk/aws_cleanrooms/__init__.py +157 -154
  14. aws_cdk/aws_cloudformation/__init__.py +28 -28
  15. aws_cdk/aws_cloudfront/__init__.py +92 -57
  16. aws_cdk/aws_cloudfront/experimental/__init__.py +42 -3
  17. aws_cdk/aws_cloudwatch/__init__.py +228 -2
  18. aws_cdk/aws_connect/__init__.py +120 -8
  19. aws_cdk/aws_connectcampaignsv2/__init__.py +25 -4
  20. aws_cdk/aws_customerprofiles/__init__.py +150 -30
  21. aws_cdk/aws_datazone/__init__.py +23 -4
  22. aws_cdk/aws_deadline/__init__.py +4 -4
  23. aws_cdk/aws_dsql/__init__.py +148 -0
  24. aws_cdk/aws_ec2/__init__.py +321 -19
  25. aws_cdk/aws_ecr/__init__.py +3 -3
  26. aws_cdk/aws_ecs/__init__.py +48 -13
  27. aws_cdk/aws_efs/__init__.py +17 -6
  28. aws_cdk/aws_eks/__init__.py +180 -158
  29. aws_cdk/aws_elasticloadbalancingv2/__init__.py +4 -2
  30. aws_cdk/aws_emrserverless/__init__.py +118 -0
  31. aws_cdk/aws_fsx/__init__.py +891 -0
  32. aws_cdk/aws_glue/__init__.py +58 -24
  33. aws_cdk/aws_iam/__init__.py +11 -11
  34. aws_cdk/aws_inspectorv2/__init__.py +442 -3
  35. aws_cdk/aws_kendra/__init__.py +10 -5
  36. aws_cdk/aws_kms/__init__.py +24 -12
  37. aws_cdk/aws_lambda/__init__.py +938 -36
  38. aws_cdk/aws_lambda_event_sources/__init__.py +638 -1
  39. aws_cdk/aws_lambda_nodejs/__init__.py +37 -3
  40. aws_cdk/aws_lex/__init__.py +703 -0
  41. aws_cdk/aws_logs/__init__.py +144 -0
  42. aws_cdk/aws_mediatailor/__init__.py +399 -0
  43. aws_cdk/aws_mpa/__init__.py +1475 -0
  44. aws_cdk/aws_msk/__init__.py +21 -2
  45. aws_cdk/aws_mwaa/__init__.py +45 -2
  46. aws_cdk/aws_networkfirewall/__init__.py +4 -2
  47. aws_cdk/aws_networkmanager/__init__.py +51 -3
  48. aws_cdk/aws_opsworkscm/__init__.py +44 -2
  49. aws_cdk/aws_rds/__init__.py +175 -42
  50. aws_cdk/aws_redshiftserverless/__init__.py +632 -0
  51. aws_cdk/aws_route53resolver/__init__.py +58 -10
  52. aws_cdk/aws_s3/__init__.py +19 -1
  53. aws_cdk/aws_s3tables/__init__.py +230 -0
  54. aws_cdk/aws_sagemaker/__init__.py +14 -10
  55. aws_cdk/aws_securityhub/__init__.py +2887 -56
  56. aws_cdk/aws_synthetics/__init__.py +21 -0
  57. aws_cdk/aws_vpclattice/__init__.py +6 -4
  58. aws_cdk/aws_wafv2/__init__.py +849 -18
  59. aws_cdk/aws_workspacesinstances/__init__.py +3243 -0
  60. aws_cdk/cloud_assembly_schema/__init__.py +200 -4
  61. aws_cdk/cx_api/__init__.py +29 -14
  62. aws_cdk/pipelines/__init__.py +178 -41
  63. aws_cdk/triggers/__init__.py +41 -4
  64. {aws_cdk_lib-2.201.0.dist-info → aws_cdk_lib-2.203.0.dist-info}/METADATA +3 -3
  65. {aws_cdk_lib-2.201.0.dist-info → aws_cdk_lib-2.203.0.dist-info}/RECORD +69 -66
  66. {aws_cdk_lib-2.201.0.dist-info → aws_cdk_lib-2.203.0.dist-info}/LICENSE +0 -0
  67. {aws_cdk_lib-2.201.0.dist-info → aws_cdk_lib-2.203.0.dist-info}/NOTICE +0 -0
  68. {aws_cdk_lib-2.201.0.dist-info → aws_cdk_lib-2.203.0.dist-info}/WHEEL +0 -0
  69. {aws_cdk_lib-2.201.0.dist-info → aws_cdk_lib-2.203.0.dist-info}/top_level.txt +0 -0
@@ -79,13 +79,13 @@ This example defines an Amazon EKS cluster with the following configuration:
79
79
  * A Kubernetes pod with a container based on the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) image.
80
80
 
81
81
  ```python
82
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
82
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
83
83
 
84
84
 
85
85
  # provisioning a cluster
86
86
  cluster = eks.Cluster(self, "hello-eks",
87
- version=eks.KubernetesVersion.V1_32,
88
- kubectl_layer=KubectlV32Layer(self, "kubectl")
87
+ version=eks.KubernetesVersion.V1_33,
88
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
89
89
  )
90
90
 
91
91
  # apply a kubernetes manifest to the cluster
@@ -149,24 +149,24 @@ A more detailed breakdown of each is provided further down this README.
149
149
  Creating a new cluster is done using the `Cluster` or `FargateCluster` constructs. The only required properties are the kubernetes `version` and `kubectlLayer`.
150
150
 
151
151
  ```python
152
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
152
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
153
153
 
154
154
 
155
155
  eks.Cluster(self, "HelloEKS",
156
- version=eks.KubernetesVersion.V1_32,
157
- kubectl_layer=KubectlV32Layer(self, "kubectl")
156
+ version=eks.KubernetesVersion.V1_33,
157
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
158
158
  )
159
159
  ```
160
160
 
161
161
  You can also use `FargateCluster` to provision a cluster that uses only fargate workers.
162
162
 
163
163
  ```python
164
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
164
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
165
165
 
166
166
 
167
167
  eks.FargateCluster(self, "HelloEKS",
168
- version=eks.KubernetesVersion.V1_32,
169
- kubectl_layer=KubectlV32Layer(self, "kubectl")
168
+ version=eks.KubernetesVersion.V1_33,
169
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
170
170
  )
171
171
  ```
172
172
 
@@ -189,14 +189,14 @@ By default, this library will allocate a managed node group with 2 *m5.large* in
189
189
  At cluster instantiation time, you can customize the number of instances and their type:
190
190
 
191
191
  ```python
192
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
192
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
193
193
 
194
194
 
195
195
  eks.Cluster(self, "HelloEKS",
196
- version=eks.KubernetesVersion.V1_32,
196
+ version=eks.KubernetesVersion.V1_33,
197
197
  default_capacity=5,
198
198
  default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL),
199
- kubectl_layer=KubectlV32Layer(self, "kubectl")
199
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
200
200
  )
201
201
  ```
202
202
 
@@ -205,13 +205,13 @@ To access the node group that was created on your behalf, you can use `cluster.d
205
205
  Additional customizations are available post instantiation. To apply them, set the default capacity to 0, and use the `cluster.addNodegroupCapacity` method:
206
206
 
207
207
  ```python
208
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
208
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
209
209
 
210
210
 
211
211
  cluster = eks.Cluster(self, "HelloEKS",
212
- version=eks.KubernetesVersion.V1_32,
212
+ version=eks.KubernetesVersion.V1_33,
213
213
  default_capacity=0,
214
- kubectl_layer=KubectlV32Layer(self, "kubectl")
214
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
215
215
  )
216
216
 
217
217
  cluster.add_nodegroup_capacity("custom-node-group",
@@ -290,7 +290,7 @@ Node groups are available with IPv6 configured networks. For custom roles assig
290
290
  > For more details visit [Configuring the Amazon VPC CNI plugin for Kubernetes to use IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role)
291
291
 
292
292
  ```python
293
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
293
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
294
294
 
295
295
 
296
296
  ipv6_management = iam.PolicyDocument(
@@ -315,9 +315,9 @@ eks_cluster_node_group_role = iam.Role(self, "eksClusterNodeGroupRole",
315
315
  )
316
316
 
317
317
  cluster = eks.Cluster(self, "HelloEKS",
318
- version=eks.KubernetesVersion.V1_32,
318
+ version=eks.KubernetesVersion.V1_33,
319
319
  default_capacity=0,
320
- kubectl_layer=KubectlV32Layer(self, "kubectl")
320
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
321
321
  )
322
322
 
323
323
  cluster.add_nodegroup_capacity("custom-node-group",
@@ -426,13 +426,13 @@ has been changed. As a workaround, you need to add a temporary policy to the clu
426
426
  successful replacement. Consider this example if you are renaming the cluster from `foo` to `bar`:
427
427
 
428
428
  ```python
429
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
429
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
430
430
 
431
431
 
432
432
  cluster = eks.Cluster(self, "cluster-to-rename",
433
433
  cluster_name="foo", # rename this to 'bar'
434
- kubectl_layer=KubectlV32Layer(self, "kubectl"),
435
- version=eks.KubernetesVersion.V1_32
434
+ kubectl_layer=KubectlV33Layer(self, "kubectl"),
435
+ version=eks.KubernetesVersion.V1_33
436
436
  )
437
437
 
438
438
  # allow the cluster admin role to delete the cluster 'foo'
@@ -485,12 +485,12 @@ To create an EKS cluster that **only** uses Fargate capacity, you can use `Farga
485
485
  The following code defines an Amazon EKS cluster with a default Fargate Profile that matches all pods from the "kube-system" and "default" namespaces. It is also configured to [run CoreDNS on Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns).
486
486
 
487
487
  ```python
488
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
488
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
489
489
 
490
490
 
491
491
  cluster = eks.FargateCluster(self, "MyCluster",
492
- version=eks.KubernetesVersion.V1_32,
493
- kubectl_layer=KubectlV32Layer(self, "kubectl")
492
+ version=eks.KubernetesVersion.V1_33,
493
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
494
494
  )
495
495
  ```
496
496
 
@@ -570,13 +570,13 @@ To disable bootstrapping altogether (i.e. to fully customize user-data), set `bo
570
570
  You can also configure the cluster to use an auto-scaling group as the default capacity:
571
571
 
572
572
  ```python
573
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
573
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
574
574
 
575
575
 
576
576
  cluster = eks.Cluster(self, "HelloEKS",
577
- version=eks.KubernetesVersion.V1_32,
577
+ version=eks.KubernetesVersion.V1_33,
578
578
  default_capacity_type=eks.DefaultCapacityType.EC2,
579
- kubectl_layer=KubectlV32Layer(self, "kubectl")
579
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
580
580
  )
581
581
  ```
582
582
 
@@ -683,13 +683,13 @@ AWS Identity and Access Management (IAM) and native Kubernetes [Role Based Acces
683
683
  You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) by using the `endpointAccess` property:
684
684
 
685
685
  ```python
686
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
686
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
687
687
 
688
688
 
689
689
  cluster = eks.Cluster(self, "hello-eks",
690
- version=eks.KubernetesVersion.V1_32,
690
+ version=eks.KubernetesVersion.V1_33,
691
691
  endpoint_access=eks.EndpointAccess.PRIVATE, # No access outside of your VPC.
692
- kubectl_layer=KubectlV32Layer(self, "kubectl")
692
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
693
693
  )
694
694
  ```
695
695
 
@@ -709,33 +709,33 @@ From the docs:
709
709
  To deploy the controller on your EKS cluster, configure the `albController` property:
710
710
 
711
711
  ```python
712
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
712
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
713
713
 
714
714
 
715
715
  eks.Cluster(self, "HelloEKS",
716
- version=eks.KubernetesVersion.V1_32,
716
+ version=eks.KubernetesVersion.V1_33,
717
717
  alb_controller=eks.AlbControllerOptions(
718
718
  version=eks.AlbControllerVersion.V2_8_2
719
719
  ),
720
- kubectl_layer=KubectlV32Layer(self, "kubectl")
720
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
721
721
  )
722
722
  ```
723
723
 
724
724
  To provide additional Helm chart values supported by `albController` in CDK, use the `additionalHelmChartValues` property. For example, the following code snippet shows how to set the `enableWafV2` flag:
725
725
 
726
726
  ```python
727
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
727
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
728
728
 
729
729
 
730
730
  eks.Cluster(self, "HelloEKS",
731
- version=eks.KubernetesVersion.V1_32,
731
+ version=eks.KubernetesVersion.V1_33,
732
732
  alb_controller=eks.AlbControllerOptions(
733
733
  version=eks.AlbControllerVersion.V2_8_2,
734
734
  additional_helm_chart_values=eks.AlbControllerHelmChartOptions(
735
735
  enable_wafv2=False
736
736
  )
737
737
  ),
738
- kubectl_layer=KubectlV32Layer(self, "kubectl")
738
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
739
739
  )
740
740
  ```
741
741
 
@@ -772,16 +772,16 @@ if cluster.alb_controller:
772
772
  You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properties:
773
773
 
774
774
  ```python
775
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
775
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
776
776
 
777
777
  # vpc: ec2.Vpc
778
778
 
779
779
 
780
780
  eks.Cluster(self, "HelloEKS",
781
- version=eks.KubernetesVersion.V1_32,
781
+ version=eks.KubernetesVersion.V1_33,
782
782
  vpc=vpc,
783
783
  vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)],
784
- kubectl_layer=KubectlV32Layer(self, "kubectl")
784
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
785
785
  )
786
786
  ```
787
787
 
@@ -825,12 +825,12 @@ The `ClusterHandler` is a set of Lambda functions (`onEventHandler`, `isComplete
825
825
  You can configure the environment of the Cluster Handler functions by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
826
826
 
827
827
  ```python
828
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
828
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
829
829
 
830
830
  # proxy_instance_security_group: ec2.SecurityGroup
831
831
 
832
832
  cluster = eks.Cluster(self, "hello-eks",
833
- version=eks.KubernetesVersion.V1_32,
833
+ version=eks.KubernetesVersion.V1_33,
834
834
  cluster_handler_environment={
835
835
  "https_proxy": "http://proxy.myproxy.com"
836
836
  },
@@ -839,7 +839,7 @@ cluster = eks.Cluster(self, "hello-eks",
839
839
  # Cluster Handler Lambdas so that it can reach the proxy.
840
840
  #
841
841
  cluster_handler_security_group=proxy_instance_security_group,
842
- kubectl_layer=KubectlV32Layer(self, "kubectl")
842
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
843
843
  )
844
844
  ```
845
845
 
@@ -848,7 +848,7 @@ cluster = eks.Cluster(self, "hello-eks",
848
848
  You can optionally choose to configure your cluster to use IPv6 using the [`ipFamily`](https://docs.aws.amazon.com/eks/latest/APIReference/API_KubernetesNetworkConfigRequest.html#AmazonEKS-Type-KubernetesNetworkConfigRequest-ipFamily) definition for your cluster. Note that this will require the underlying subnets to have an associated IPv6 CIDR.
849
849
 
850
850
  ```python
851
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
851
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
852
852
  # vpc: ec2.Vpc
853
853
 
854
854
 
@@ -873,11 +873,11 @@ for subnet in subnets:
873
873
  subnetcount = subnetcount + 1
874
874
 
875
875
  cluster = eks.Cluster(self, "hello-eks",
876
- version=eks.KubernetesVersion.V1_32,
876
+ version=eks.KubernetesVersion.V1_33,
877
877
  vpc=vpc,
878
878
  ip_family=eks.IpFamily.IP_V6,
879
879
  vpc_subnets=[ec2.SubnetSelection(subnets=vpc.public_subnets)],
880
- kubectl_layer=KubectlV32Layer(self, "kubectl")
880
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
881
881
  )
882
882
  ```
883
883
 
@@ -908,15 +908,15 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
908
908
  You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
909
909
 
910
910
  ```python
911
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
911
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
912
912
 
913
913
 
914
914
  cluster = eks.Cluster(self, "hello-eks",
915
- version=eks.KubernetesVersion.V1_32,
915
+ version=eks.KubernetesVersion.V1_33,
916
916
  kubectl_environment={
917
917
  "http_proxy": "http://proxy.myproxy.com"
918
918
  },
919
- kubectl_layer=KubectlV32Layer(self, "kubectl")
919
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
920
920
  )
921
921
  ```
922
922
 
@@ -933,12 +933,12 @@ Depending on which version of kubernetes you're targeting, you will need to use
933
933
  the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
934
934
 
935
935
  ```python
936
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
936
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
937
937
 
938
938
 
939
939
  cluster = eks.Cluster(self, "hello-eks",
940
- version=eks.KubernetesVersion.V1_32,
941
- kubectl_layer=KubectlV32Layer(self, "kubectl")
940
+ version=eks.KubernetesVersion.V1_33,
941
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
942
942
  )
943
943
  ```
944
944
 
@@ -974,7 +974,7 @@ cluster1 = eks.Cluster(self, "MyCluster",
974
974
  kubectl_layer=layer,
975
975
  vpc=vpc,
976
976
  cluster_name="cluster-name",
977
- version=eks.KubernetesVersion.V1_32
977
+ version=eks.KubernetesVersion.V1_33
978
978
  )
979
979
 
980
980
  # or
@@ -990,7 +990,7 @@ cluster2 = eks.Cluster.from_cluster_attributes(self, "MyCluster",
990
990
  By default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function:
991
991
 
992
992
  ```python
993
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
993
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
994
994
 
995
995
  # or
996
996
  # vpc: ec2.Vpc
@@ -998,8 +998,8 @@ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
998
998
 
999
999
  eks.Cluster(self, "MyCluster",
1000
1000
  kubectl_memory=Size.gibibytes(4),
1001
- version=eks.KubernetesVersion.V1_32,
1002
- kubectl_layer=KubectlV32Layer(self, "kubectl")
1001
+ version=eks.KubernetesVersion.V1_33,
1002
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
1003
1003
  )
1004
1004
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
1005
1005
  kubectl_memory=Size.gibibytes(4),
@@ -1034,14 +1034,14 @@ cluster.add_auto_scaling_group_capacity("self-ng-arm",
1034
1034
  When you create a cluster, you can specify a `mastersRole`. The `Cluster` construct will associate this role with the `system:masters` [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) group, giving it super-user access to the cluster.
1035
1035
 
1036
1036
  ```python
1037
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1037
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1038
1038
 
1039
1039
  # role: iam.Role
1040
1040
 
1041
1041
  eks.Cluster(self, "HelloEKS",
1042
- version=eks.KubernetesVersion.V1_32,
1042
+ version=eks.KubernetesVersion.V1_33,
1043
1043
  masters_role=role,
1044
- kubectl_layer=KubectlV32Layer(self, "kubectl")
1044
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
1045
1045
  )
1046
1046
  ```
1047
1047
 
@@ -1087,28 +1087,28 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
1087
1087
  > This setting can only be specified when the cluster is created and cannot be updated.
1088
1088
 
1089
1089
  ```python
1090
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1090
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1091
1091
 
1092
1092
 
1093
1093
  secrets_key = kms.Key(self, "SecretsKey")
1094
1094
  cluster = eks.Cluster(self, "MyCluster",
1095
1095
  secrets_encryption_key=secrets_key,
1096
- version=eks.KubernetesVersion.V1_32,
1097
- kubectl_layer=KubectlV32Layer(self, "kubectl")
1096
+ version=eks.KubernetesVersion.V1_33,
1097
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
1098
1098
  )
1099
1099
  ```
1100
1100
 
1101
1101
  You can also use a similar configuration for running a cluster built using the FargateCluster construct.
1102
1102
 
1103
1103
  ```python
1104
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1104
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1105
1105
 
1106
1106
 
1107
1107
  secrets_key = kms.Key(self, "SecretsKey")
1108
1108
  cluster = eks.FargateCluster(self, "MyFargateCluster",
1109
1109
  secrets_encryption_key=secrets_key,
1110
- version=eks.KubernetesVersion.V1_32,
1111
- kubectl_layer=KubectlV32Layer(self, "kubectl")
1110
+ version=eks.KubernetesVersion.V1_33,
1111
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
1112
1112
  )
1113
1113
  ```
1114
1114
 
@@ -1127,12 +1127,12 @@ When you create an Amazon EKS cluster, you can configure it to leverage the [EKS
1127
1127
  Once you have identified the on-premises node and pod (optional) CIDRs you will use for your hybrid nodes and the workloads running on them, you can specify them during cluster creation using the `remoteNodeNetworks` and `remotePodNetworks` (optional) properties:
1128
1128
 
1129
1129
  ```python
1130
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1130
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1131
1131
 
1132
1132
 
1133
1133
  eks.Cluster(self, "Cluster",
1134
- version=eks.KubernetesVersion.V1_32,
1135
- kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1134
+ version=eks.KubernetesVersion.V1_33,
1135
+ kubectl_layer=KubectlV33Layer(self, "KubectlLayer"),
1136
1136
  remote_node_networks=[eks.RemoteNodeNetwork(
1137
1137
  cidrs=["10.0.0.0/16"]
1138
1138
  )
@@ -1185,7 +1185,7 @@ To access the Kubernetes resources from the console, make sure your viewing prin
1185
1185
  in the `aws-auth` ConfigMap. Some options to consider:
1186
1186
 
1187
1187
  ```python
1188
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1188
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1189
1189
  # cluster: eks.Cluster
1190
1190
  # your_current_role: iam.Role
1191
1191
  # vpc: ec2.Vpc
@@ -1203,7 +1203,7 @@ your_current_role.add_to_policy(iam.PolicyStatement(
1203
1203
 
1204
1204
  ```python
1205
1205
  # Option 2: create your custom mastersRole with scoped assumeBy arn as the Cluster prop. Switch to this role from the AWS console.
1206
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1206
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1207
1207
  # vpc: ec2.Vpc
1208
1208
 
1209
1209
 
@@ -1213,8 +1213,8 @@ masters_role = iam.Role(self, "MastersRole",
1213
1213
 
1214
1214
  cluster = eks.Cluster(self, "EksCluster",
1215
1215
  vpc=vpc,
1216
- version=eks.KubernetesVersion.V1_32,
1217
- kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1216
+ version=eks.KubernetesVersion.V1_33,
1217
+ kubectl_layer=KubectlV33Layer(self, "KubectlLayer"),
1218
1218
  masters_role=masters_role
1219
1219
  )
1220
1220
 
@@ -1253,14 +1253,14 @@ AWS IAM principals from both Amazon EKS access entry APIs and the aws-auth confi
1253
1253
  To specify the `authenticationMode`:
1254
1254
 
1255
1255
  ```python
1256
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1256
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1257
1257
  # vpc: ec2.Vpc
1258
1258
 
1259
1259
 
1260
1260
  eks.Cluster(self, "Cluster",
1261
1261
  vpc=vpc,
1262
- version=eks.KubernetesVersion.V1_32,
1263
- kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1262
+ version=eks.KubernetesVersion.V1_33,
1263
+ kubectl_layer=KubectlV33Layer(self, "KubectlLayer"),
1264
1264
  authentication_mode=eks.AuthenticationMode.API_AND_CONFIG_MAP
1265
1265
  )
1266
1266
  ```
@@ -1305,7 +1305,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
1305
1305
  Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
1306
1306
 
1307
1307
  ```python
1308
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1308
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1309
1309
  # vpc: ec2.Vpc
1310
1310
 
1311
1311
 
@@ -1324,8 +1324,8 @@ eks_admin_view_role = iam.Role(self, "EKSAdminViewRole",
1324
1324
  cluster = eks.Cluster(self, "Cluster",
1325
1325
  vpc=vpc,
1326
1326
  masters_role=cluster_admin_role,
1327
- version=eks.KubernetesVersion.V1_32,
1328
- kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1327
+ version=eks.KubernetesVersion.V1_33,
1328
+ kubectl_layer=KubectlV33Layer(self, "KubectlLayer"),
1329
1329
  authentication_mode=eks.AuthenticationMode.API_AND_CONFIG_MAP
1330
1330
  )
1331
1331
 
@@ -1656,13 +1656,13 @@ Pruning is enabled by default but can be disabled through the `prune` option
1656
1656
  when a cluster is defined:
1657
1657
 
1658
1658
  ```python
1659
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1659
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
1660
1660
 
1661
1661
 
1662
1662
  eks.Cluster(self, "MyCluster",
1663
- version=eks.KubernetesVersion.V1_32,
1663
+ version=eks.KubernetesVersion.V1_33,
1664
1664
  prune=False,
1665
- kubectl_layer=KubectlV32Layer(self, "kubectl")
1665
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
1666
1666
  )
1667
1667
  ```
1668
1668
 
@@ -2061,15 +2061,15 @@ You can enable logging for each one separately using the `clusterLogging`
2061
2061
  property. For example:
2062
2062
 
2063
2063
  ```python
2064
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
2064
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
2065
2065
 
2066
2066
 
2067
2067
  cluster = eks.Cluster(self, "Cluster",
2068
2068
  # ...
2069
- version=eks.KubernetesVersion.V1_32,
2069
+ version=eks.KubernetesVersion.V1_33,
2070
2070
  cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
2071
2071
  ],
2072
- kubectl_layer=KubectlV32Layer(self, "kubectl")
2072
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
2073
2073
  )
2074
2074
  ```
2075
2075
 
@@ -3065,18 +3065,18 @@ class AlbControllerHelmChartOptions:
3065
3065
 
3066
3066
  Example::
3067
3067
 
3068
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
3068
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
3069
3069
 
3070
3070
 
3071
3071
  eks.Cluster(self, "HelloEKS",
3072
- version=eks.KubernetesVersion.V1_32,
3072
+ version=eks.KubernetesVersion.V1_33,
3073
3073
  alb_controller=eks.AlbControllerOptions(
3074
3074
  version=eks.AlbControllerVersion.V2_8_2,
3075
3075
  additional_helm_chart_values=eks.AlbControllerHelmChartOptions(
3076
3076
  enable_wafv2=False
3077
3077
  )
3078
3078
  ),
3079
- kubectl_layer=KubectlV32Layer(self, "kubectl")
3079
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
3080
3080
  )
3081
3081
  '''
3082
3082
  if __debug__:
@@ -3149,15 +3149,15 @@ class AlbControllerOptions:
3149
3149
 
3150
3150
  Example::
3151
3151
 
3152
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
3152
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
3153
3153
 
3154
3154
 
3155
3155
  eks.Cluster(self, "HelloEKS",
3156
- version=eks.KubernetesVersion.V1_32,
3156
+ version=eks.KubernetesVersion.V1_33,
3157
3157
  alb_controller=eks.AlbControllerOptions(
3158
3158
  version=eks.AlbControllerVersion.V2_8_2
3159
3159
  ),
3160
- kubectl_layer=KubectlV32Layer(self, "kubectl")
3160
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
3161
3161
  )
3162
3162
  '''
3163
3163
  if isinstance(additional_helm_chart_values, dict):
@@ -3385,15 +3385,15 @@ class AlbControllerVersion(
3385
3385
 
3386
3386
  Example::
3387
3387
 
3388
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
3388
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
3389
3389
 
3390
3390
 
3391
3391
  eks.Cluster(self, "HelloEKS",
3392
- version=eks.KubernetesVersion.V1_32,
3392
+ version=eks.KubernetesVersion.V1_33,
3393
3393
  alb_controller=eks.AlbControllerOptions(
3394
3394
  version=eks.AlbControllerVersion.V2_8_2
3395
3395
  ),
3396
- kubectl_layer=KubectlV32Layer(self, "kubectl")
3396
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
3397
3397
  )
3398
3398
  '''
3399
3399
 
@@ -3669,14 +3669,14 @@ class AuthenticationMode(enum.Enum):
3669
3669
 
3670
3670
  Example::
3671
3671
 
3672
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
3672
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
3673
3673
  # vpc: ec2.Vpc
3674
3674
 
3675
3675
 
3676
3676
  eks.Cluster(self, "Cluster",
3677
3677
  vpc=vpc,
3678
- version=eks.KubernetesVersion.V1_32,
3679
- kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
3678
+ version=eks.KubernetesVersion.V1_33,
3679
+ kubectl_layer=KubectlV33Layer(self, "KubectlLayer"),
3680
3680
  authentication_mode=eks.AuthenticationMode.API_AND_CONFIG_MAP
3681
3681
  )
3682
3682
  '''
@@ -5587,7 +5587,7 @@ class CfnAddon(
5587
5587
  :param cluster_name: The name of your cluster.
5588
5588
  :param addon_version: The version of the add-on.
5589
5589
  :param configuration_values: The configuration values that you provided.
5590
- :param pod_identity_associations: An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster. For more information, see `Attach an IAM Role to an Amazon EKS add-on using Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html>`_ in the *Amazon EKS User Guide* .
5590
+ :param pod_identity_associations: An array of EKS Pod Identity associations owned by the add-on. Each association maps a role to a service account in a namespace in the cluster. For more information, see `Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html>`_ in the *Amazon EKS User Guide* .
5591
5591
  :param preserve_on_delete: Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM account is associated with the add-on, it isn't removed.
5592
5592
  :param resolve_conflicts: How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are handled based on the value you choose: - *None* – If the self-managed version of the add-on is installed on your cluster, Amazon EKS doesn't change the value. Creation of the add-on might fail. - *Overwrite* – If the self-managed version of the add-on is installed on your cluster and the Amazon EKS default value is different than the existing value, Amazon EKS changes the value to the Amazon EKS default value. - *Preserve* – This is similar to the NONE option. If the self-managed version of the add-on is installed on your cluster Amazon EKS doesn't change the add-on resource properties. Creation of the add-on might fail if conflicts are detected. This option works differently during the update operation. For more information, see ```UpdateAddon`` <https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html>`_ . If you don't currently have the self-managed version of the add-on installed on your cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values to default values, regardless of the option that you specify.
5593
5593
  :param service_account_role_arn: The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see `Amazon EKS node IAM role <https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html>`_ in the *Amazon EKS User Guide* . .. epigraph:: To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see `Enabling IAM roles for service accounts on your cluster <https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html>`_ in the *Amazon EKS User Guide* .
@@ -5718,7 +5718,7 @@ class CfnAddon(
5718
5718
  def pod_identity_associations(
5719
5719
  self,
5720
5720
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, typing.List[typing.Union[_IResolvable_da3f097b, "CfnAddon.PodIdentityAssociationProperty"]]]]:
5721
- '''An array of Pod Identity Assocations owned by the Addon.'''
5721
+ '''An array of EKS Pod Identity associations owned by the add-on.'''
5722
5722
  return typing.cast(typing.Optional[typing.Union[_IResolvable_da3f097b, typing.List[typing.Union[_IResolvable_da3f097b, "CfnAddon.PodIdentityAssociationProperty"]]]], jsii.get(self, "podIdentityAssociations"))
5723
5723
 
5724
5724
  @pod_identity_associations.setter
@@ -5802,7 +5802,7 @@ class CfnAddon(
5802
5802
  ) -> None:
5803
5803
  '''Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.
5804
5804
 
5805
- :param role_arn: The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.
5805
+ :param role_arn: The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.
5806
5806
  :param service_account: The name of the Kubernetes service account inside the cluster to associate the IAM credentials with.
5807
5807
 
5808
5808
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-addon-podidentityassociation.html
@@ -5832,7 +5832,7 @@ class CfnAddon(
5832
5832
  def role_arn(self) -> builtins.str:
5833
5833
  '''The Amazon Resource Name (ARN) of the IAM role to associate with the service account.
5834
5834
 
5835
- The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.
5835
+ The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.
5836
5836
 
5837
5837
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-addon-podidentityassociation.html#cfn-eks-addon-podidentityassociation-rolearn
5838
5838
  '''
@@ -5897,7 +5897,7 @@ class CfnAddonProps:
5897
5897
  :param cluster_name: The name of your cluster.
5898
5898
  :param addon_version: The version of the add-on.
5899
5899
  :param configuration_values: The configuration values that you provided.
5900
- :param pod_identity_associations: An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster. For more information, see `Attach an IAM Role to an Amazon EKS add-on using Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html>`_ in the *Amazon EKS User Guide* .
5900
+ :param pod_identity_associations: An array of EKS Pod Identity associations owned by the add-on. Each association maps a role to a service account in a namespace in the cluster. For more information, see `Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html>`_ in the *Amazon EKS User Guide* .
5901
5901
  :param preserve_on_delete: Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM account is associated with the add-on, it isn't removed.
5902
5902
  :param resolve_conflicts: How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are handled based on the value you choose: - *None* – If the self-managed version of the add-on is installed on your cluster, Amazon EKS doesn't change the value. Creation of the add-on might fail. - *Overwrite* – If the self-managed version of the add-on is installed on your cluster and the Amazon EKS default value is different than the existing value, Amazon EKS changes the value to the Amazon EKS default value. - *Preserve* – This is similar to the NONE option. If the self-managed version of the add-on is installed on your cluster Amazon EKS doesn't change the add-on resource properties. Creation of the add-on might fail if conflicts are detected. This option works differently during the update operation. For more information, see ```UpdateAddon`` <https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html>`_ . If you don't currently have the self-managed version of the add-on installed on your cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values to default values, regardless of the option that you specify.
5903
5903
  :param service_account_role_arn: The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see `Amazon EKS node IAM role <https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html>`_ in the *Amazon EKS User Guide* . .. epigraph:: To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see `Enabling IAM roles for service accounts on your cluster <https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html>`_ in the *Amazon EKS User Guide* .
@@ -6004,11 +6004,11 @@ class CfnAddonProps:
6004
6004
  def pod_identity_associations(
6005
6005
  self,
6006
6006
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, typing.List[typing.Union[_IResolvable_da3f097b, CfnAddon.PodIdentityAssociationProperty]]]]:
6007
- '''An array of Pod Identity Assocations owned by the Addon.
6007
+ '''An array of EKS Pod Identity associations owned by the add-on.
6008
6008
 
6009
- Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.
6009
+ Each association maps a role to a service account in a namespace in the cluster.
6010
6010
 
6011
- For more information, see `Attach an IAM Role to an Amazon EKS add-on using Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html>`_ in the *Amazon EKS User Guide* .
6011
+ For more information, see `Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html>`_ in the *Amazon EKS User Guide* .
6012
6012
 
6013
6013
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-addon.html#cfn-eks-addon-podidentityassociations
6014
6014
  '''
@@ -6228,7 +6228,7 @@ class CfnCluster(
6228
6228
  :param resources_vpc_config: The VPC configuration that's used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`_ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`_ in the *Amazon EKS User Guide* . You must specify at least two subnets. You can specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.
6229
6229
  :param role_arn: The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. For more information, see `Amazon EKS Service IAM Role <https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html>`_ in the **Amazon EKS User Guide** .
6230
6230
  :param access_config: The access configuration for the cluster.
6231
- :param bootstrap_self_managed_addons: If you set this value to ``False`` when creating a cluster, the default networking add-ons will not be installed. The default networking addons include vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.
6231
+ :param bootstrap_self_managed_addons: If you set this value to ``False`` when creating a cluster, the default networking add-ons will not be installed. The default networking add-ons include ``vpc-cni`` , ``coredns`` , and ``kube-proxy`` . Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.
6232
6232
  :param compute_config: Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your AWS account. For more information, see EKS Auto Mode compute capability in the *Amazon EKS User Guide* .
6233
6233
  :param encryption_config: The encryption configuration for the cluster.
6234
6234
  :param force: Set this value to ``true`` to override upgrade-blocking readiness checks when updating a cluster. Default: - false
@@ -7596,8 +7596,8 @@ class CfnCluster(
7596
7596
 
7597
7597
  You can add, change, or remove this configuration after the cluster is created.
7598
7598
 
7599
- :param remote_node_networks: The list of network CIDRs that can contain hybrid nodes. These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. - Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect . - Each host must allow outbound connection to the EKS cluster control plane on TCP ports ``443`` and ``10250`` . - Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations. - Each host must allow TCP and UDP network connectivity to and from other hosts that are running ``CoreDNS`` on UDP port ``53`` for service and pod DNS names.
7600
- :param remote_pod_networks: The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes. These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7599
+ :param remote_node_networks: The list of network CIDRs that can contain hybrid nodes. These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. - Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect . - Each host must allow outbound connection to the EKS cluster control plane on TCP ports ``443`` and ``10250`` . - Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations. - Each host must allow TCP and UDP network connectivity to and from other hosts that are running ``CoreDNS`` on UDP port ``53`` for service and pod DNS names.
7600
+ :param remote_pod_networks: The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes. These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7601
7601
 
7602
7602
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-remotenetworkconfig.html
7603
7603
  :exampleMetadata: fixture=_generated
@@ -7641,7 +7641,7 @@ class CfnCluster(
7641
7641
 
7642
7642
  It must satisfy the following requirements:
7643
7643
 
7644
- - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7644
+ - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7645
7645
  - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7646
7646
  - Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect .
7647
7647
  - Each host must allow outbound connection to the EKS cluster control plane on TCP ports ``443`` and ``10250`` .
@@ -7666,7 +7666,7 @@ class CfnCluster(
7666
7666
 
7667
7667
  It must satisfy the following requirements:
7668
7668
 
7669
- - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7669
+ - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7670
7670
  - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7671
7671
 
7672
7672
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-remotenetworkconfig.html#cfn-eks-cluster-remotenetworkconfig-remotepodnetworks
@@ -7700,14 +7700,14 @@ class CfnCluster(
7700
7700
 
7701
7701
  It must satisfy the following requirements:
7702
7702
 
7703
- - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7703
+ - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7704
7704
  - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7705
7705
  - Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect .
7706
7706
  - Each host must allow outbound connection to the EKS cluster control plane on TCP ports ``443`` and ``10250`` .
7707
7707
  - Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.
7708
7708
  - Each host must allow TCP and UDP network connectivity to and from other hosts that are running ``CoreDNS`` on UDP port ``53`` for service and pod DNS names.
7709
7709
 
7710
- :param cidrs: A network CIDR that can contain hybrid nodes. These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. - Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect . - Each host must allow outbound connection to the EKS cluster control plane on TCP ports ``443`` and ``10250`` . - Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations. - Each host must allow TCP and UDP network connectivity to and from other hosts that are running ``CoreDNS`` on UDP port ``53`` for service and pod DNS names.
7710
+ :param cidrs: A network CIDR that can contain hybrid nodes. These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. - Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect . - Each host must allow outbound connection to the EKS cluster control plane on TCP ports ``443`` and ``10250`` . - Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations. - Each host must allow TCP and UDP network connectivity to and from other hosts that are running ``CoreDNS`` on UDP port ``53`` for service and pod DNS names.
7711
7711
 
7712
7712
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-remotenodenetwork.html
7713
7713
  :exampleMetadata: fixture=_generated
@@ -7739,7 +7739,7 @@ class CfnCluster(
7739
7739
 
7740
7740
  It must satisfy the following requirements:
7741
7741
 
7742
- - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7742
+ - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7743
7743
  - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7744
7744
  - Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect .
7745
7745
  - Each host must allow outbound connection to the EKS cluster control plane on TCP ports ``443`` and ``10250`` .
@@ -7778,10 +7778,10 @@ class CfnCluster(
7778
7778
 
7779
7779
  It must satisfy the following requirements:
7780
7780
 
7781
- - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7781
+ - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7782
7782
  - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7783
7783
 
7784
- :param cidrs: A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes. These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7784
+ :param cidrs: A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes. These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, ``10.2.0.0/16`` ). It must satisfy the following requirements: - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported. - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7785
7785
 
7786
7786
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-remotepodnetwork.html
7787
7787
  :exampleMetadata: fixture=_generated
@@ -7813,7 +7813,7 @@ class CfnCluster(
7813
7813
 
7814
7814
  It must satisfy the following requirements:
7815
7815
 
7816
- - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7816
+ - Each block must be within an ``IPv4`` RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.
7817
7817
  - Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.
7818
7818
 
7819
7819
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-remotepodnetwork.html#cfn-eks-cluster-remotepodnetwork-cidrs
@@ -8213,7 +8213,7 @@ class CfnClusterProps:
8213
8213
  :param resources_vpc_config: The VPC configuration that's used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`_ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`_ in the *Amazon EKS User Guide* . You must specify at least two subnets. You can specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.
8214
8214
  :param role_arn: The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. For more information, see `Amazon EKS Service IAM Role <https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html>`_ in the **Amazon EKS User Guide** .
8215
8215
  :param access_config: The access configuration for the cluster.
8216
- :param bootstrap_self_managed_addons: If you set this value to ``False`` when creating a cluster, the default networking add-ons will not be installed. The default networking addons include vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.
8216
+ :param bootstrap_self_managed_addons: If you set this value to ``False`` when creating a cluster, the default networking add-ons will not be installed. The default networking add-ons include ``vpc-cni`` , ``coredns`` , and ``kube-proxy`` . Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.
8217
8217
  :param compute_config: Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your AWS account. For more information, see EKS Auto Mode compute capability in the *Amazon EKS User Guide* .
8218
8218
  :param encryption_config: The encryption configuration for the cluster.
8219
8219
  :param force: Set this value to ``true`` to override upgrade-blocking readiness checks when updating a cluster. Default: - false
@@ -8417,7 +8417,7 @@ class CfnClusterProps:
8417
8417
  ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
8418
8418
  '''If you set this value to ``False`` when creating a cluster, the default networking add-ons will not be installed.
8419
8419
 
8420
- The default networking addons include vpc-cni, coredns, and kube-proxy.
8420
+ The default networking add-ons include ``vpc-cni`` , ``coredns`` , and ``kube-proxy`` .
8421
8421
 
8422
8422
  Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.
8423
8423
 
@@ -11241,12 +11241,12 @@ class CfnPodIdentityAssociation(
11241
11241
  :param scope: Scope in which this resource is defined.
11242
11242
  :param id: Construct identifier for this resource (unique in its scope).
11243
11243
  :param cluster_name: The name of the cluster that the association is in.
11244
- :param namespace: The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.
11245
- :param role_arn: The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.
11244
+ :param namespace: The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the Pods that use the service account must be in this namespace.
11245
+ :param role_arn: The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.
11246
11246
  :param service_account: The name of the Kubernetes service account inside the cluster to associate the IAM credentials with.
11247
- :param disable_session_tags: The Disable Session Tags of the pod identity association.
11247
+ :param disable_session_tags: The state of the automatic sessions tags. The value of *true* disables these tags. EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to AWS resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see `List of session tags added by EKS Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/pod-id-abac.html#pod-id-abac-tags>`_ in the *Amazon EKS User Guide* .
11248
11248
  :param tags: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or AWS resources. The following basic restrictions apply to tags: - Maximum number of tags per resource – 50 - For each resource, each tag key must be unique, and each tag key can have only one value. - Maximum key length – 128 Unicode characters in UTF-8 - Maximum value length – 256 Unicode characters in UTF-8 - If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : /
11249
- :param target_role_arn: The Target Role Arn of the pod identity association.
11249
+ :param target_role_arn: The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.
11250
11250
  '''
11251
11251
  if __debug__:
11252
11252
  type_hints = typing.get_type_hints(_typecheckingstub__be8311b6089cea26f85c63a586f0c5b063230a1b4a96ffcd4c6c983a331d8652)
@@ -11315,7 +11315,11 @@ class CfnPodIdentityAssociation(
11315
11315
  @builtins.property
11316
11316
  @jsii.member(jsii_name="attrExternalId")
11317
11317
  def attr_external_id(self) -> builtins.str:
11318
- '''The External Id of the pod identity association.
11318
+ '''The unique identifier for this EKS Pod Identity association for a target IAM role.
11319
+
11320
+ You put this value in the trust policy of the target role, in a ``Condition`` to match the ``sts.ExternalId`` . This ensures that the target role can only be assumed by this association. This prevents the *confused deputy problem* . For more information about the confused deputy problem, see `The confused deputy problem <https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html>`_ in the *IAM User Guide* .
11321
+
11322
+ If you want to use the same target role with multiple associations or other roles, use independent statements in the trust policy to allow ``sts:AssumeRole`` access from each role.
11319
11323
 
11320
11324
  :cloudformationAttribute: ExternalId
11321
11325
  '''
@@ -11389,7 +11393,10 @@ class CfnPodIdentityAssociation(
11389
11393
  def disable_session_tags(
11390
11394
  self,
11391
11395
  ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
11392
- '''The Disable Session Tags of the pod identity association.'''
11396
+ '''The state of the automatic sessions tags.
11397
+
11398
+ The value of *true* disables these tags.
11399
+ '''
11393
11400
  return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], jsii.get(self, "disableSessionTags"))
11394
11401
 
11395
11402
  @disable_session_tags.setter
@@ -11418,7 +11425,7 @@ class CfnPodIdentityAssociation(
11418
11425
  @builtins.property
11419
11426
  @jsii.member(jsii_name="targetRoleArn")
11420
11427
  def target_role_arn(self) -> typing.Optional[builtins.str]:
11421
- '''The Target Role Arn of the pod identity association.'''
11428
+ '''The Amazon Resource Name (ARN) of the target IAM role to associate with the service account.'''
11422
11429
  return typing.cast(typing.Optional[builtins.str], jsii.get(self, "targetRoleArn"))
11423
11430
 
11424
11431
  @target_role_arn.setter
@@ -11457,12 +11464,12 @@ class CfnPodIdentityAssociationProps:
11457
11464
  '''Properties for defining a ``CfnPodIdentityAssociation``.
11458
11465
 
11459
11466
  :param cluster_name: The name of the cluster that the association is in.
11460
- :param namespace: The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.
11461
- :param role_arn: The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.
11467
+ :param namespace: The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the Pods that use the service account must be in this namespace.
11468
+ :param role_arn: The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.
11462
11469
  :param service_account: The name of the Kubernetes service account inside the cluster to associate the IAM credentials with.
11463
- :param disable_session_tags: The Disable Session Tags of the pod identity association.
11470
+ :param disable_session_tags: The state of the automatic sessions tags. The value of *true* disables these tags. EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to AWS resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see `List of session tags added by EKS Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/pod-id-abac.html#pod-id-abac-tags>`_ in the *Amazon EKS User Guide* .
11464
11471
  :param tags: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or AWS resources. The following basic restrictions apply to tags: - Maximum number of tags per resource – 50 - For each resource, each tag key must be unique, and each tag key can have only one value. - Maximum key length – 128 Unicode characters in UTF-8 - Maximum value length – 256 Unicode characters in UTF-8 - If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : /
11465
- :param target_role_arn: The Target Role Arn of the pod identity association.
11472
+ :param target_role_arn: The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.
11466
11473
 
11467
11474
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-podidentityassociation.html
11468
11475
  :exampleMetadata: fixture=_generated
@@ -11524,7 +11531,7 @@ class CfnPodIdentityAssociationProps:
11524
11531
  def namespace(self) -> builtins.str:
11525
11532
  '''The name of the Kubernetes namespace inside the cluster to create the association in.
11526
11533
 
11527
- The service account and the pods that use the service account must be in this namespace.
11534
+ The service account and the Pods that use the service account must be in this namespace.
11528
11535
 
11529
11536
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-podidentityassociation.html#cfn-eks-podidentityassociation-namespace
11530
11537
  '''
@@ -11536,7 +11543,7 @@ class CfnPodIdentityAssociationProps:
11536
11543
  def role_arn(self) -> builtins.str:
11537
11544
  '''The Amazon Resource Name (ARN) of the IAM role to associate with the service account.
11538
11545
 
11539
- The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.
11546
+ The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.
11540
11547
 
11541
11548
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-podidentityassociation.html#cfn-eks-podidentityassociation-rolearn
11542
11549
  '''
@@ -11558,7 +11565,9 @@ class CfnPodIdentityAssociationProps:
11558
11565
  def disable_session_tags(
11559
11566
  self,
11560
11567
  ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
11561
- '''The Disable Session Tags of the pod identity association.
11568
+ '''The state of the automatic sessions tags. The value of *true* disables these tags.
11569
+
11570
+ EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to AWS resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see `List of session tags added by EKS Pod Identity <https://docs.aws.amazon.com/eks/latest/userguide/pod-id-abac.html#pod-id-abac-tags>`_ in the *Amazon EKS User Guide* .
11562
11571
 
11563
11572
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-podidentityassociation.html#cfn-eks-podidentityassociation-disablesessiontags
11564
11573
  '''
@@ -11592,7 +11601,9 @@ class CfnPodIdentityAssociationProps:
11592
11601
 
11593
11602
  @builtins.property
11594
11603
  def target_role_arn(self) -> typing.Optional[builtins.str]:
11595
- '''The Target Role Arn of the pod identity association.
11604
+ '''The Amazon Resource Name (ARN) of the target IAM role to associate with the service account.
11605
+
11606
+ This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.
11596
11607
 
11597
11608
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-podidentityassociation.html#cfn-eks-podidentityassociation-targetrolearn
11598
11609
  '''
@@ -12041,15 +12052,15 @@ class ClusterLoggingTypes(enum.Enum):
12041
12052
 
12042
12053
  Example::
12043
12054
 
12044
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
12055
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
12045
12056
 
12046
12057
 
12047
12058
  cluster = eks.Cluster(self, "Cluster",
12048
12059
  # ...
12049
- version=eks.KubernetesVersion.V1_32,
12060
+ version=eks.KubernetesVersion.V1_33,
12050
12061
  cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
12051
12062
  ],
12052
- kubectl_layer=KubectlV32Layer(self, "kubectl")
12063
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
12053
12064
  )
12054
12065
  '''
12055
12066
 
@@ -12289,13 +12300,13 @@ class DefaultCapacityType(enum.Enum):
12289
12300
 
12290
12301
  Example::
12291
12302
 
12292
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
12303
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
12293
12304
 
12294
12305
 
12295
12306
  cluster = eks.Cluster(self, "HelloEKS",
12296
- version=eks.KubernetesVersion.V1_32,
12307
+ version=eks.KubernetesVersion.V1_33,
12297
12308
  default_capacity_type=eks.DefaultCapacityType.EC2,
12298
- kubectl_layer=KubectlV32Layer(self, "kubectl")
12309
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
12299
12310
  )
12300
12311
  '''
12301
12312
 
@@ -12462,13 +12473,13 @@ class EndpointAccess(
12462
12473
 
12463
12474
  Example::
12464
12475
 
12465
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
12476
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
12466
12477
 
12467
12478
 
12468
12479
  cluster = eks.Cluster(self, "hello-eks",
12469
- version=eks.KubernetesVersion.V1_32,
12480
+ version=eks.KubernetesVersion.V1_33,
12470
12481
  endpoint_access=eks.EndpointAccess.PRIVATE, # No access outside of your VPC.
12471
- kubectl_layer=KubectlV32Layer(self, "kubectl")
12482
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
12472
12483
  )
12473
12484
  '''
12474
12485
 
@@ -14642,7 +14653,7 @@ class IpFamily(enum.Enum):
14642
14653
 
14643
14654
  Example::
14644
14655
 
14645
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
14656
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
14646
14657
  # vpc: ec2.Vpc
14647
14658
 
14648
14659
 
@@ -14667,11 +14678,11 @@ class IpFamily(enum.Enum):
14667
14678
  subnetcount = subnetcount + 1
14668
14679
 
14669
14680
  cluster = eks.Cluster(self, "hello-eks",
14670
- version=eks.KubernetesVersion.V1_32,
14681
+ version=eks.KubernetesVersion.V1_33,
14671
14682
  vpc=vpc,
14672
14683
  ip_family=eks.IpFamily.IP_V6,
14673
14684
  vpc_subnets=[ec2.SubnetSelection(subnets=vpc.public_subnets)],
14674
- kubectl_layer=KubectlV32Layer(self, "kubectl")
14685
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
14675
14686
  )
14676
14687
  '''
14677
14688
 
@@ -15798,7 +15809,7 @@ class KubernetesVersion(
15798
15809
 
15799
15810
  Example::
15800
15811
 
15801
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
15812
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
15802
15813
 
15803
15814
  # or
15804
15815
  # vpc: ec2.Vpc
@@ -15806,8 +15817,8 @@ class KubernetesVersion(
15806
15817
 
15807
15818
  eks.Cluster(self, "MyCluster",
15808
15819
  kubectl_memory=Size.gibibytes(4),
15809
- version=eks.KubernetesVersion.V1_32,
15810
- kubectl_layer=KubectlV32Layer(self, "kubectl")
15820
+ version=eks.KubernetesVersion.V1_33,
15821
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
15811
15822
  )
15812
15823
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
15813
15824
  kubectl_memory=Size.gibibytes(4),
@@ -16037,6 +16048,17 @@ class KubernetesVersion(
16037
16048
  '''
16038
16049
  return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_32"))
16039
16050
 
16051
+ @jsii.python.classproperty
16052
+ @jsii.member(jsii_name="V1_33")
16053
+ def V1_33(cls) -> "KubernetesVersion":
16054
+ '''Kubernetes version 1.33.
16055
+
16056
+ When creating a ``Cluster`` with this version, you need to also specify the
16057
+ ``kubectlLayer`` property with a ``KubectlV33Layer`` from
16058
+ ``@aws-cdk/lambda-layer-kubectl-v33``.
16059
+ '''
16060
+ return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_33"))
16061
+
16040
16062
  @builtins.property
16041
16063
  @jsii.member(jsii_name="version")
16042
16064
  def version(self) -> builtins.str:
@@ -18661,7 +18683,7 @@ class Cluster(
18661
18683
 
18662
18684
  Example::
18663
18685
 
18664
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
18686
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
18665
18687
 
18666
18688
  # or
18667
18689
  # vpc: ec2.Vpc
@@ -18669,8 +18691,8 @@ class Cluster(
18669
18691
 
18670
18692
  eks.Cluster(self, "MyCluster",
18671
18693
  kubectl_memory=Size.gibibytes(4),
18672
- version=eks.KubernetesVersion.V1_32,
18673
- kubectl_layer=KubectlV32Layer(self, "kubectl")
18694
+ version=eks.KubernetesVersion.V1_33,
18695
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
18674
18696
  )
18675
18697
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
18676
18698
  kubectl_memory=Size.gibibytes(4),
@@ -20455,7 +20477,7 @@ class ClusterProps(ClusterOptions):
20455
20477
 
20456
20478
  Example::
20457
20479
 
20458
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
20480
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
20459
20481
 
20460
20482
  # or
20461
20483
  # vpc: ec2.Vpc
@@ -20463,8 +20485,8 @@ class ClusterProps(ClusterOptions):
20463
20485
 
20464
20486
  eks.Cluster(self, "MyCluster",
20465
20487
  kubectl_memory=Size.gibibytes(4),
20466
- version=eks.KubernetesVersion.V1_32,
20467
- kubectl_layer=KubectlV32Layer(self, "kubectl")
20488
+ version=eks.KubernetesVersion.V1_33,
20489
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
20468
20490
  )
20469
20491
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
20470
20492
  kubectl_memory=Size.gibibytes(4),
@@ -21023,12 +21045,12 @@ class FargateCluster(
21023
21045
 
21024
21046
  Example::
21025
21047
 
21026
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
21048
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
21027
21049
 
21028
21050
 
21029
21051
  cluster = eks.FargateCluster(self, "MyCluster",
21030
- version=eks.KubernetesVersion.V1_32,
21031
- kubectl_layer=KubectlV32Layer(self, "kubectl")
21052
+ version=eks.KubernetesVersion.V1_33,
21053
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
21032
21054
  )
21033
21055
  '''
21034
21056
 
@@ -21262,12 +21284,12 @@ class FargateClusterProps(ClusterOptions):
21262
21284
 
21263
21285
  Example::
21264
21286
 
21265
- from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
21287
+ from aws_cdk.lambda_layer_kubectl_v33 import KubectlV33Layer
21266
21288
 
21267
21289
 
21268
21290
  cluster = eks.FargateCluster(self, "MyCluster",
21269
- version=eks.KubernetesVersion.V1_32,
21270
- kubectl_layer=KubectlV32Layer(self, "kubectl")
21291
+ version=eks.KubernetesVersion.V1_33,
21292
+ kubectl_layer=KubectlV33Layer(self, "kubectl")
21271
21293
  )
21272
21294
  '''
21273
21295
  if isinstance(alb_controller, dict):