aws-cdk-lib 2.178.1__py3-none-any.whl → 2.179.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

Files changed (34) hide show
  1. aws_cdk/__init__.py +69 -35
  2. aws_cdk/_jsii/__init__.py +1 -2
  3. aws_cdk/_jsii/{aws-cdk-lib@2.178.1.jsii.tgz → aws-cdk-lib@2.179.0.jsii.tgz} +0 -0
  4. aws_cdk/aws_apigateway/__init__.py +170 -29
  5. aws_cdk/aws_apigatewayv2/__init__.py +151 -32
  6. aws_cdk/aws_apigatewayv2_integrations/__init__.py +348 -0
  7. aws_cdk/aws_applicationautoscaling/__init__.py +8 -8
  8. aws_cdk/aws_appsync/__init__.py +6 -4
  9. aws_cdk/aws_cloudfront/__init__.py +5 -5
  10. aws_cdk/aws_codebuild/__init__.py +216 -0
  11. aws_cdk/aws_codepipeline/__init__.py +89 -28
  12. aws_cdk/aws_codepipeline_actions/__init__.py +526 -62
  13. aws_cdk/aws_cognito/__init__.py +676 -20
  14. aws_cdk/aws_ec2/__init__.py +25 -9
  15. aws_cdk/aws_ecs/__init__.py +8 -8
  16. aws_cdk/aws_eks/__init__.py +555 -179
  17. aws_cdk/aws_elasticloadbalancingv2/__init__.py +99 -0
  18. aws_cdk/aws_events/__init__.py +9 -15
  19. aws_cdk/aws_events_targets/__init__.py +303 -16
  20. aws_cdk/aws_iam/__init__.py +3 -3
  21. aws_cdk/aws_ivs/__init__.py +241 -73
  22. aws_cdk/aws_logs/__init__.py +62 -13
  23. aws_cdk/aws_pinpoint/__init__.py +14 -9
  24. aws_cdk/aws_rds/__init__.py +168 -24
  25. aws_cdk/aws_s3/__init__.py +9 -9
  26. aws_cdk/aws_stepfunctions_tasks/__init__.py +127 -21
  27. aws_cdk/pipelines/__init__.py +2 -2
  28. {aws_cdk_lib-2.178.1.dist-info → aws_cdk_lib-2.179.0.dist-info}/METADATA +1 -2
  29. {aws_cdk_lib-2.178.1.dist-info → aws_cdk_lib-2.179.0.dist-info}/RECORD +33 -34
  30. aws_cdk/lambda_layer_kubectl/__init__.py +0 -107
  31. {aws_cdk_lib-2.178.1.dist-info → aws_cdk_lib-2.179.0.dist-info}/LICENSE +0 -0
  32. {aws_cdk_lib-2.178.1.dist-info → aws_cdk_lib-2.179.0.dist-info}/NOTICE +0 -0
  33. {aws_cdk_lib-2.178.1.dist-info → aws_cdk_lib-2.179.0.dist-info}/WHEEL +0 -0
  34. {aws_cdk_lib-2.178.1.dist-info → aws_cdk_lib-2.179.0.dist-info}/top_level.txt +0 -0
@@ -39,6 +39,7 @@ In addition, the library also supports defining Kubernetes resource manifests wi
39
39
  * [ARM64 Support](#arm64-support)
40
40
  * [Masters Role](#masters-role)
41
41
  * [Encryption](#encryption)
42
+ * [Hybrid nodes](#hybrid-nodes)
42
43
  * [Permissions and Security](#permissions-and-security)
43
44
 
44
45
  * [AWS IAM Mapping](#aws-iam-mapping)
@@ -78,13 +79,13 @@ This example defines an Amazon EKS cluster with the following configuration:
78
79
  * A Kubernetes pod with a container based on the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) image.
79
80
 
80
81
  ```python
81
- from aws_cdk.lambda_layer_kubectl_v31 import KubectlV31Layer
82
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
82
83
 
83
84
 
84
85
  # provisioning a cluster
85
86
  cluster = eks.Cluster(self, "hello-eks",
86
- version=eks.KubernetesVersion.V1_31,
87
- kubectl_layer=KubectlV31Layer(self, "kubectl")
87
+ version=eks.KubernetesVersion.V1_32,
88
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
88
89
  )
89
90
 
90
91
  # apply a kubernetes manifest to the cluster
@@ -145,19 +146,27 @@ A more detailed breakdown of each is provided further down this README.
145
146
 
146
147
  ## Provisioning clusters
147
148
 
148
- Creating a new cluster is done using the `Cluster` or `FargateCluster` constructs. The only required property is the kubernetes `version`.
149
+ Creating a new cluster is done using the `Cluster` or `FargateCluster` constructs. The only required properties are the kubernetes `version` and `kubectlLayer`.
149
150
 
150
151
  ```python
152
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
153
+
154
+
151
155
  eks.Cluster(self, "HelloEKS",
152
- version=eks.KubernetesVersion.V1_31
156
+ version=eks.KubernetesVersion.V1_32,
157
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
153
158
  )
154
159
  ```
155
160
 
156
161
  You can also use `FargateCluster` to provision a cluster that uses only fargate workers.
157
162
 
158
163
  ```python
164
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
165
+
166
+
159
167
  eks.FargateCluster(self, "HelloEKS",
160
- version=eks.KubernetesVersion.V1_31
168
+ version=eks.KubernetesVersion.V1_32,
169
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
161
170
  )
162
171
  ```
163
172
 
@@ -169,7 +178,7 @@ Capacity is the amount and the type of worker nodes that are available to the cl
169
178
  ### Managed node groups
170
179
 
171
180
  Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters.
172
- With Amazon EKS managed node groups, you dont need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available.
181
+ With Amazon EKS managed node groups, you don't need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available.
173
182
 
174
183
  > For more details visit [Amazon EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).
175
184
 
@@ -180,10 +189,14 @@ By default, this library will allocate a managed node group with 2 *m5.large* in
180
189
  At cluster instantiation time, you can customize the number of instances and their type:
181
190
 
182
191
  ```python
192
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
193
+
194
+
183
195
  eks.Cluster(self, "HelloEKS",
184
- version=eks.KubernetesVersion.V1_31,
196
+ version=eks.KubernetesVersion.V1_32,
185
197
  default_capacity=5,
186
- default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL)
198
+ default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL),
199
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
187
200
  )
188
201
  ```
189
202
 
@@ -192,9 +205,13 @@ To access the node group that was created on your behalf, you can use `cluster.d
192
205
  Additional customizations are available post instantiation. To apply them, set the default capacity to 0, and use the `cluster.addNodegroupCapacity` method:
193
206
 
194
207
  ```python
208
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
209
+
210
+
195
211
  cluster = eks.Cluster(self, "HelloEKS",
196
- version=eks.KubernetesVersion.V1_31,
197
- default_capacity=0
212
+ version=eks.KubernetesVersion.V1_32,
213
+ default_capacity=0,
214
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
198
215
  )
199
216
 
200
217
  cluster.add_nodegroup_capacity("custom-node-group",
@@ -273,6 +290,9 @@ Node groups are available with IPv6 configured networks. For custom roles assig
273
290
  > For more details visit [Configuring the Amazon VPC CNI plugin for Kubernetes to use IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role)
274
291
 
275
292
  ```python
293
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
294
+
295
+
276
296
  ipv6_management = iam.PolicyDocument(
277
297
  statements=[iam.PolicyStatement(
278
298
  resources=["arn:aws:ec2:*:*:network-interface/*"],
@@ -295,8 +315,9 @@ eks_cluster_node_group_role = iam.Role(self, "eksClusterNodeGroupRole",
295
315
  )
296
316
 
297
317
  cluster = eks.Cluster(self, "HelloEKS",
298
- version=eks.KubernetesVersion.V1_31,
299
- default_capacity=0
318
+ version=eks.KubernetesVersion.V1_32,
319
+ default_capacity=0,
320
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
300
321
  )
301
322
 
302
323
  cluster.add_nodegroup_capacity("custom-node-group",
@@ -405,9 +426,13 @@ has been changed. As a workaround, you need to add a temporary policy to the clu
405
426
  successful replacement. Consider this example if you are renaming the cluster from `foo` to `bar`:
406
427
 
407
428
  ```python
429
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
430
+
431
+
408
432
  cluster = eks.Cluster(self, "cluster-to-rename",
409
433
  cluster_name="foo", # rename this to 'bar'
410
- version=eks.KubernetesVersion.V1_31
434
+ kubectl_layer=KubectlV32Layer(self, "kubectl"),
435
+ version=eks.KubernetesVersion.V1_32
411
436
  )
412
437
 
413
438
  # allow the cluster admin role to delete the cluster 'foo'
@@ -460,8 +485,12 @@ To create an EKS cluster that **only** uses Fargate capacity, you can use `Farga
460
485
  The following code defines an Amazon EKS cluster with a default Fargate Profile that matches all pods from the "kube-system" and "default" namespaces. It is also configured to [run CoreDNS on Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns).
461
486
 
462
487
  ```python
488
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
489
+
490
+
463
491
  cluster = eks.FargateCluster(self, "MyCluster",
464
- version=eks.KubernetesVersion.V1_31
492
+ version=eks.KubernetesVersion.V1_32,
493
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
465
494
  )
466
495
  ```
467
496
 
@@ -541,9 +570,13 @@ To disable bootstrapping altogether (i.e. to fully customize user-data), set `bo
541
570
  You can also configure the cluster to use an auto-scaling group as the default capacity:
542
571
 
543
572
  ```python
573
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
574
+
575
+
544
576
  cluster = eks.Cluster(self, "HelloEKS",
545
- version=eks.KubernetesVersion.V1_31,
546
- default_capacity_type=eks.DefaultCapacityType.EC2
577
+ version=eks.KubernetesVersion.V1_32,
578
+ default_capacity_type=eks.DefaultCapacityType.EC2,
579
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
547
580
  )
548
581
  ```
549
582
 
@@ -650,9 +683,13 @@ AWS Identity and Access Management (IAM) and native Kubernetes [Role Based Acces
650
683
  You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) by using the `endpointAccess` property:
651
684
 
652
685
  ```python
686
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
687
+
688
+
653
689
  cluster = eks.Cluster(self, "hello-eks",
654
- version=eks.KubernetesVersion.V1_31,
655
- endpoint_access=eks.EndpointAccess.PRIVATE
690
+ version=eks.KubernetesVersion.V1_32,
691
+ endpoint_access=eks.EndpointAccess.PRIVATE, # No access outside of your VPC.
692
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
656
693
  )
657
694
  ```
658
695
 
@@ -672,11 +709,15 @@ From the docs:
672
709
  To deploy the controller on your EKS cluster, configure the `albController` property:
673
710
 
674
711
  ```python
712
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
713
+
714
+
675
715
  eks.Cluster(self, "HelloEKS",
676
- version=eks.KubernetesVersion.V1_31,
716
+ version=eks.KubernetesVersion.V1_32,
677
717
  alb_controller=eks.AlbControllerOptions(
678
718
  version=eks.AlbControllerVersion.V2_8_2
679
- )
719
+ ),
720
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
680
721
  )
681
722
  ```
682
723
 
@@ -713,13 +754,16 @@ if cluster.alb_controller:
713
754
  You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properties:
714
755
 
715
756
  ```python
757
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
758
+
716
759
  # vpc: ec2.Vpc
717
760
 
718
761
 
719
762
  eks.Cluster(self, "HelloEKS",
720
- version=eks.KubernetesVersion.V1_31,
763
+ version=eks.KubernetesVersion.V1_32,
721
764
  vpc=vpc,
722
- vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)]
765
+ vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)],
766
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
723
767
  )
724
768
  ```
725
769
 
@@ -763,10 +807,12 @@ The `ClusterHandler` is a set of Lambda functions (`onEventHandler`, `isComplete
763
807
  You can configure the environment of the Cluster Handler functions by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
764
808
 
765
809
  ```python
810
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
811
+
766
812
  # proxy_instance_security_group: ec2.SecurityGroup
767
813
 
768
814
  cluster = eks.Cluster(self, "hello-eks",
769
- version=eks.KubernetesVersion.V1_31,
815
+ version=eks.KubernetesVersion.V1_32,
770
816
  cluster_handler_environment={
771
817
  "https_proxy": "http://proxy.myproxy.com"
772
818
  },
@@ -774,7 +820,8 @@ cluster = eks.Cluster(self, "hello-eks",
774
820
  # If the proxy is not open publicly, you can pass a security group to the
775
821
  # Cluster Handler Lambdas so that it can reach the proxy.
776
822
  #
777
- cluster_handler_security_group=proxy_instance_security_group
823
+ cluster_handler_security_group=proxy_instance_security_group,
824
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
778
825
  )
779
826
  ```
780
827
 
@@ -783,6 +830,7 @@ cluster = eks.Cluster(self, "hello-eks",
783
830
  You can optionally choose to configure your cluster to use IPv6 using the [`ipFamily`](https://docs.aws.amazon.com/eks/latest/APIReference/API_KubernetesNetworkConfigRequest.html#AmazonEKS-Type-KubernetesNetworkConfigRequest-ipFamily) definition for your cluster. Note that this will require the underlying subnets to have an associated IPv6 CIDR.
784
831
 
785
832
  ```python
833
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
786
834
  # vpc: ec2.Vpc
787
835
 
788
836
 
@@ -807,10 +855,11 @@ for subnet in subnets:
807
855
  subnetcount = subnetcount + 1
808
856
 
809
857
  cluster = eks.Cluster(self, "hello-eks",
810
- version=eks.KubernetesVersion.V1_31,
858
+ version=eks.KubernetesVersion.V1_32,
811
859
  vpc=vpc,
812
860
  ip_family=eks.IpFamily.IP_V6,
813
- vpc_subnets=[ec2.SubnetSelection(subnets=vpc.public_subnets)]
861
+ vpc_subnets=[ec2.SubnetSelection(subnets=vpc.public_subnets)],
862
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
814
863
  )
815
864
  ```
816
865
 
@@ -841,11 +890,15 @@ cluster = eks.Cluster.from_cluster_attributes(self, "Cluster",
841
890
  You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
842
891
 
843
892
  ```python
893
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
894
+
895
+
844
896
  cluster = eks.Cluster(self, "hello-eks",
845
- version=eks.KubernetesVersion.V1_31,
897
+ version=eks.KubernetesVersion.V1_32,
846
898
  kubectl_environment={
847
899
  "http_proxy": "http://proxy.myproxy.com"
848
- }
900
+ },
901
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
849
902
  )
850
903
  ```
851
904
 
@@ -862,12 +915,12 @@ Depending on which version of kubernetes you're targeting, you will need to use
862
915
  the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
863
916
 
864
917
  ```python
865
- from aws_cdk.lambda_layer_kubectl_v31 import KubectlV31Layer
918
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
866
919
 
867
920
 
868
921
  cluster = eks.Cluster(self, "hello-eks",
869
- version=eks.KubernetesVersion.V1_31,
870
- kubectl_layer=KubectlV31Layer(self, "kubectl")
922
+ version=eks.KubernetesVersion.V1_32,
923
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
871
924
  )
872
925
  ```
873
926
 
@@ -903,7 +956,7 @@ cluster1 = eks.Cluster(self, "MyCluster",
903
956
  kubectl_layer=layer,
904
957
  vpc=vpc,
905
958
  cluster_name="cluster-name",
906
- version=eks.KubernetesVersion.V1_31
959
+ version=eks.KubernetesVersion.V1_32
907
960
  )
908
961
 
909
962
  # or
@@ -919,11 +972,16 @@ cluster2 = eks.Cluster.from_cluster_attributes(self, "MyCluster",
919
972
  By default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function:
920
973
 
921
974
  ```python
975
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
976
+
922
977
  # or
923
978
  # vpc: ec2.Vpc
979
+
980
+
924
981
  eks.Cluster(self, "MyCluster",
925
982
  kubectl_memory=Size.gibibytes(4),
926
- version=eks.KubernetesVersion.V1_31
983
+ version=eks.KubernetesVersion.V1_32,
984
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
927
985
  )
928
986
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
929
987
  kubectl_memory=Size.gibibytes(4),
@@ -958,11 +1016,14 @@ cluster.add_auto_scaling_group_capacity("self-ng-arm",
958
1016
  When you create a cluster, you can specify a `mastersRole`. The `Cluster` construct will associate this role with the `system:masters` [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) group, giving it super-user access to the cluster.
959
1017
 
960
1018
  ```python
1019
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1020
+
961
1021
  # role: iam.Role
962
1022
 
963
1023
  eks.Cluster(self, "HelloEKS",
964
- version=eks.KubernetesVersion.V1_31,
965
- masters_role=role
1024
+ version=eks.KubernetesVersion.V1_32,
1025
+ masters_role=role,
1026
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
966
1027
  )
967
1028
  ```
968
1029
 
@@ -1008,20 +1069,28 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
1008
1069
  > This setting can only be specified when the cluster is created and cannot be updated.
1009
1070
 
1010
1071
  ```python
1072
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1073
+
1074
+
1011
1075
  secrets_key = kms.Key(self, "SecretsKey")
1012
1076
  cluster = eks.Cluster(self, "MyCluster",
1013
1077
  secrets_encryption_key=secrets_key,
1014
- version=eks.KubernetesVersion.V1_31
1078
+ version=eks.KubernetesVersion.V1_32,
1079
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
1015
1080
  )
1016
1081
  ```
1017
1082
 
1018
1083
  You can also use a similar configuration for running a cluster built using the FargateCluster construct.
1019
1084
 
1020
1085
  ```python
1086
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1087
+
1088
+
1021
1089
  secrets_key = kms.Key(self, "SecretsKey")
1022
1090
  cluster = eks.FargateCluster(self, "MyFargateCluster",
1023
1091
  secrets_encryption_key=secrets_key,
1024
- version=eks.KubernetesVersion.V1_31
1092
+ version=eks.KubernetesVersion.V1_32,
1093
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
1025
1094
  )
1026
1095
  ```
1027
1096
 
@@ -1033,6 +1102,30 @@ The Amazon Resource Name (ARN) for that CMK can be retrieved.
1033
1102
  cluster_encryption_config_key_arn = cluster.cluster_encryption_config_key_arn
1034
1103
  ```
1035
1104
 
1105
+ ### Hybrid Nodes
1106
+
1107
+ When you create an Amazon EKS cluster, you can configure it to leverage the [EKS Hybrid Nodes](https://aws.amazon.com/eks/hybrid-nodes/) feature, allowing you to use your on-premises and edge infrastructure as nodes in your EKS cluster. Refer to the Hyrid Nodes [networking documentation](https://docs.aws.amazon.com/eks/latest/userguide/hybrid-nodes-networking.html) to configure your on-premises network, node and pod CIDRs, access control, etc before creating your EKS Cluster.
1108
+
1109
+ Once you have identified the on-premises node and pod (optional) CIDRs you will use for your hybrid nodes and the workloads running on them, you can specify them during cluster creation using the `remoteNodeNetworks` and `remotePodNetworks` (optional) properties:
1110
+
1111
+ ```python
1112
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1113
+
1114
+
1115
+ eks.Cluster(self, "Cluster",
1116
+ version=eks.KubernetesVersion.V1_32,
1117
+ kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1118
+ remote_node_networks=[eks.RemoteNodeNetwork(
1119
+ cidrs=["10.0.0.0/16"]
1120
+ )
1121
+ ],
1122
+ remote_pod_networks=[eks.RemotePodNetwork(
1123
+ cidrs=["192.168.0.0/16"]
1124
+ )
1125
+ ]
1126
+ )
1127
+ ```
1128
+
1036
1129
  ## Permissions and Security
1037
1130
 
1038
1131
  Amazon EKS provides several mechanism of securing the cluster and granting permissions to specific IAM users and roles.
@@ -1068,7 +1161,7 @@ To access the Kubernetes resources from the console, make sure your viewing prin
1068
1161
  in the `aws-auth` ConfigMap. Some options to consider:
1069
1162
 
1070
1163
  ```python
1071
- from aws_cdk.lambda_layer_kubectl_v31 import KubectlV31Layer
1164
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1072
1165
  # cluster: eks.Cluster
1073
1166
  # your_current_role: iam.Role
1074
1167
  # vpc: ec2.Vpc
@@ -1086,7 +1179,7 @@ your_current_role.add_to_policy(iam.PolicyStatement(
1086
1179
 
1087
1180
  ```python
1088
1181
  # Option 2: create your custom mastersRole with scoped assumeBy arn as the Cluster prop. Switch to this role from the AWS console.
1089
- from aws_cdk.lambda_layer_kubectl_v31 import KubectlV31Layer
1182
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1090
1183
  # vpc: ec2.Vpc
1091
1184
 
1092
1185
 
@@ -1096,8 +1189,8 @@ masters_role = iam.Role(self, "MastersRole",
1096
1189
 
1097
1190
  cluster = eks.Cluster(self, "EksCluster",
1098
1191
  vpc=vpc,
1099
- version=eks.KubernetesVersion.V1_31,
1100
- kubectl_layer=KubectlV31Layer(self, "KubectlLayer"),
1192
+ version=eks.KubernetesVersion.V1_32,
1193
+ kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1101
1194
  masters_role=masters_role
1102
1195
  )
1103
1196
 
@@ -1136,14 +1229,14 @@ AWS IAM principals from both Amazon EKS access entry APIs and the aws-auth confi
1136
1229
  To specify the `authenticationMode`:
1137
1230
 
1138
1231
  ```python
1139
- from aws_cdk.lambda_layer_kubectl_v31 import KubectlV31Layer
1232
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1140
1233
  # vpc: ec2.Vpc
1141
1234
 
1142
1235
 
1143
1236
  eks.Cluster(self, "Cluster",
1144
1237
  vpc=vpc,
1145
- version=eks.KubernetesVersion.V1_31,
1146
- kubectl_layer=KubectlV31Layer(self, "KubectlLayer"),
1238
+ version=eks.KubernetesVersion.V1_32,
1239
+ kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1147
1240
  authentication_mode=eks.AuthenticationMode.API_AND_CONFIG_MAP
1148
1241
  )
1149
1242
  ```
@@ -1188,7 +1281,7 @@ eks.AccessPolicy.from_access_policy_name("AmazonEKSAdminPolicy",
1188
1281
  Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
1189
1282
 
1190
1283
  ```python
1191
- from aws_cdk.lambda_layer_kubectl_v31 import KubectlV31Layer
1284
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1192
1285
  # vpc: ec2.Vpc
1193
1286
 
1194
1287
 
@@ -1207,8 +1300,8 @@ eks_admin_view_role = iam.Role(self, "EKSAdminViewRole",
1207
1300
  cluster = eks.Cluster(self, "Cluster",
1208
1301
  vpc=vpc,
1209
1302
  masters_role=cluster_admin_role,
1210
- version=eks.KubernetesVersion.V1_31,
1211
- kubectl_layer=KubectlV31Layer(self, "KubectlLayer"),
1303
+ version=eks.KubernetesVersion.V1_32,
1304
+ kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
1212
1305
  authentication_mode=eks.AuthenticationMode.API_AND_CONFIG_MAP
1213
1306
  )
1214
1307
 
@@ -1539,9 +1632,13 @@ Pruning is enabled by default but can be disabled through the `prune` option
1539
1632
  when a cluster is defined:
1540
1633
 
1541
1634
  ```python
1635
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
1636
+
1637
+
1542
1638
  eks.Cluster(self, "MyCluster",
1543
- version=eks.KubernetesVersion.V1_31,
1544
- prune=False
1639
+ version=eks.KubernetesVersion.V1_32,
1640
+ prune=False,
1641
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
1545
1642
  )
1546
1643
  ```
1547
1644
 
@@ -1937,11 +2034,15 @@ You can enable logging for each one separately using the `clusterLogging`
1937
2034
  property. For example:
1938
2035
 
1939
2036
  ```python
2037
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
2038
+
2039
+
1940
2040
  cluster = eks.Cluster(self, "Cluster",
1941
2041
  # ...
1942
- version=eks.KubernetesVersion.V1_31,
2042
+ version=eks.KubernetesVersion.V1_32,
1943
2043
  cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
1944
- ]
2044
+ ],
2045
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
1945
2046
  )
1946
2047
  ```
1947
2048
 
@@ -2892,11 +2993,15 @@ class AlbControllerOptions:
2892
2993
 
2893
2994
  Example::
2894
2995
 
2996
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
2997
+
2998
+
2895
2999
  eks.Cluster(self, "HelloEKS",
2896
- version=eks.KubernetesVersion.V1_31,
3000
+ version=eks.KubernetesVersion.V1_32,
2897
3001
  alb_controller=eks.AlbControllerOptions(
2898
3002
  version=eks.AlbControllerVersion.V2_8_2
2899
- )
3003
+ ),
3004
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
2900
3005
  )
2901
3006
  '''
2902
3007
  if __debug__:
@@ -3085,11 +3190,15 @@ class AlbControllerVersion(
3085
3190
 
3086
3191
  Example::
3087
3192
 
3193
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
3194
+
3195
+
3088
3196
  eks.Cluster(self, "HelloEKS",
3089
- version=eks.KubernetesVersion.V1_31,
3197
+ version=eks.KubernetesVersion.V1_32,
3090
3198
  alb_controller=eks.AlbControllerOptions(
3091
3199
  version=eks.AlbControllerVersion.V2_8_2
3092
- )
3200
+ ),
3201
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
3093
3202
  )
3094
3203
  '''
3095
3204
 
@@ -3365,14 +3474,14 @@ class AuthenticationMode(enum.Enum):
3365
3474
 
3366
3475
  Example::
3367
3476
 
3368
- from aws_cdk.lambda_layer_kubectl_v31 import KubectlV31Layer
3477
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
3369
3478
  # vpc: ec2.Vpc
3370
3479
 
3371
3480
 
3372
3481
  eks.Cluster(self, "Cluster",
3373
3482
  vpc=vpc,
3374
- version=eks.KubernetesVersion.V1_31,
3375
- kubectl_layer=KubectlV31Layer(self, "KubectlLayer"),
3483
+ version=eks.KubernetesVersion.V1_32,
3484
+ kubectl_layer=KubectlV32Layer(self, "KubectlLayer"),
3376
3485
  authentication_mode=eks.AuthenticationMode.API_AND_CONFIG_MAP
3377
3486
  )
3378
3487
  '''
@@ -11215,7 +11324,7 @@ class ClusterAttributes:
11215
11324
  :param ip_family: Specify which IP family is used to assign Kubernetes pod and service IP addresses. Default: - IpFamily.IP_V4
11216
11325
  :param kubectl_environment: Environment variables to use when running ``kubectl`` against this cluster. Default: - no additional variables
11217
11326
  :param kubectl_lambda_role: An IAM role that can perform kubectl operations against this cluster. The role should be mapped to the ``system:masters`` Kubernetes RBAC role. This role is directly passed to the lambda handler that sends Kube Ctl commands to the cluster. Default: - if not specified, the default role created by a lambda function will be used.
11218
- :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. If you don't supply this value ``kubectl`` 1.20 will be used, but that version is most likely too old. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - a default layer with Kubectl 1.20 and helm 3.8.
11327
+ :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - No default layer will be provided
11219
11328
  :param kubectl_memory: Amount of memory to allocate to the provider's lambda function. Default: Size.gibibytes(1)
11220
11329
  :param kubectl_private_subnet_ids: Subnets to host the ``kubectl`` compute resources. If not specified, the k8s endpoint is expected to be accessible publicly. Default: - k8s endpoint is expected to be accessible publicly
11221
11330
  :param kubectl_provider: KubectlProvider for issuing kubectl commands. Default: - Default CDK provider
@@ -11430,15 +11539,14 @@ class ClusterAttributes:
11430
11539
  This layer is used by the kubectl handler to apply manifests and install
11431
11540
  helm charts. You must pick an appropriate releases of one of the
11432
11541
  ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of
11433
- Kubernetes you have chosen. If you don't supply this value ``kubectl``
11434
- 1.20 will be used, but that version is most likely too old.
11542
+ Kubernetes you have chosen.
11435
11543
 
11436
11544
  The handler expects the layer to include the following executables::
11437
11545
 
11438
11546
  /opt/helm/helm
11439
11547
  /opt/kubectl/kubectl
11440
11548
 
11441
- :default: - a default layer with Kubectl 1.20 and helm 3.8.
11549
+ :default: - No default layer will be provided
11442
11550
  '''
11443
11551
  result = self._values.get("kubectl_layer")
11444
11552
  return typing.cast(typing.Optional[_ILayerVersion_5ac127c8], result)
@@ -11581,11 +11689,15 @@ class ClusterLoggingTypes(enum.Enum):
11581
11689
 
11582
11690
  Example::
11583
11691
 
11692
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
11693
+
11694
+
11584
11695
  cluster = eks.Cluster(self, "Cluster",
11585
11696
  # ...
11586
- version=eks.KubernetesVersion.V1_31,
11697
+ version=eks.KubernetesVersion.V1_32,
11587
11698
  cluster_logging=[eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER
11588
- ]
11699
+ ],
11700
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
11589
11701
  )
11590
11702
  '''
11591
11703
 
@@ -11821,9 +11933,13 @@ class DefaultCapacityType(enum.Enum):
11821
11933
 
11822
11934
  Example::
11823
11935
 
11936
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
11937
+
11938
+
11824
11939
  cluster = eks.Cluster(self, "HelloEKS",
11825
- version=eks.KubernetesVersion.V1_31,
11826
- default_capacity_type=eks.DefaultCapacityType.EC2
11940
+ version=eks.KubernetesVersion.V1_32,
11941
+ default_capacity_type=eks.DefaultCapacityType.EC2,
11942
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
11827
11943
  )
11828
11944
  '''
11829
11945
 
@@ -11990,9 +12106,13 @@ class EndpointAccess(
11990
12106
 
11991
12107
  Example::
11992
12108
 
12109
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
12110
+
12111
+
11993
12112
  cluster = eks.Cluster(self, "hello-eks",
11994
- version=eks.KubernetesVersion.V1_31,
11995
- endpoint_access=eks.EndpointAccess.PRIVATE
12113
+ version=eks.KubernetesVersion.V1_32,
12114
+ endpoint_access=eks.EndpointAccess.PRIVATE, # No access outside of your VPC.
12115
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
11996
12116
  )
11997
12117
  '''
11998
12118
 
@@ -13399,10 +13519,7 @@ class ICluster(_IResource_c80c4260, _IConnectable_10015a05, typing_extensions.Pr
13399
13519
  @builtins.property
13400
13520
  @jsii.member(jsii_name="kubectlLayer")
13401
13521
  def kubectl_layer(self) -> typing.Optional[_ILayerVersion_5ac127c8]:
13402
- '''An AWS Lambda layer that includes ``kubectl`` and ``helm``.
13403
-
13404
- If not defined, a default layer will be used containing Kubectl 1.20 and Helm 3.8
13405
- '''
13522
+ '''An AWS Lambda layer that includes ``kubectl`` and ``helm``.'''
13406
13523
  ...
13407
13524
 
13408
13525
  @builtins.property
@@ -13777,10 +13894,7 @@ class _IClusterProxy(
13777
13894
  @builtins.property
13778
13895
  @jsii.member(jsii_name="kubectlLayer")
13779
13896
  def kubectl_layer(self) -> typing.Optional[_ILayerVersion_5ac127c8]:
13780
- '''An AWS Lambda layer that includes ``kubectl`` and ``helm``.
13781
-
13782
- If not defined, a default layer will be used containing Kubectl 1.20 and Helm 3.8
13783
- '''
13897
+ '''An AWS Lambda layer that includes ``kubectl`` and ``helm``.'''
13784
13898
  return typing.cast(typing.Optional[_ILayerVersion_5ac127c8], jsii.get(self, "kubectlLayer"))
13785
13899
 
13786
13900
  @builtins.property
@@ -14172,6 +14286,7 @@ class IpFamily(enum.Enum):
14172
14286
 
14173
14287
  Example::
14174
14288
 
14289
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
14175
14290
  # vpc: ec2.Vpc
14176
14291
 
14177
14292
 
@@ -14196,10 +14311,11 @@ class IpFamily(enum.Enum):
14196
14311
  subnetcount = subnetcount + 1
14197
14312
 
14198
14313
  cluster = eks.Cluster(self, "hello-eks",
14199
- version=eks.KubernetesVersion.V1_31,
14314
+ version=eks.KubernetesVersion.V1_32,
14200
14315
  vpc=vpc,
14201
14316
  ip_family=eks.IpFamily.IP_V6,
14202
- vpc_subnets=[ec2.SubnetSelection(subnets=vpc.public_subnets)]
14317
+ vpc_subnets=[ec2.SubnetSelection(subnets=vpc.public_subnets)],
14318
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
14203
14319
  )
14204
14320
  '''
14205
14321
 
@@ -15320,11 +15436,16 @@ class KubernetesVersion(
15320
15436
 
15321
15437
  Example::
15322
15438
 
15439
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
15440
+
15323
15441
  # or
15324
15442
  # vpc: ec2.Vpc
15443
+
15444
+
15325
15445
  eks.Cluster(self, "MyCluster",
15326
15446
  kubectl_memory=Size.gibibytes(4),
15327
- version=eks.KubernetesVersion.V1_31
15447
+ version=eks.KubernetesVersion.V1_32,
15448
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
15328
15449
  )
15329
15450
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
15330
15451
  kubectl_memory=Size.gibibytes(4),
@@ -15543,6 +15664,17 @@ class KubernetesVersion(
15543
15664
  '''
15544
15665
  return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_31"))
15545
15666
 
15667
+ @jsii.python.classproperty
15668
+ @jsii.member(jsii_name="V1_32")
15669
+ def V1_32(cls) -> "KubernetesVersion":
15670
+ '''Kubernetes version 1.32.
15671
+
15672
+ When creating a ``Cluster`` with this version, you need to also specify the
15673
+ ``kubectlLayer`` property with a ``KubectlV32Layer`` from
15674
+ ``@aws-cdk/lambda-layer-kubectl-v32``.
15675
+ '''
15676
+ return typing.cast("KubernetesVersion", jsii.sget(cls, "V1_32"))
15677
+
15546
15678
  @builtins.property
15547
15679
  @jsii.member(jsii_name="version")
15548
15680
  def version(self) -> builtins.str:
@@ -16999,6 +17131,110 @@ class PatchType(enum.Enum):
16999
17131
  '''Strategic merge patch.'''
17000
17132
 
17001
17133
 
17134
+ @jsii.data_type(
17135
+ jsii_type="aws-cdk-lib.aws_eks.RemoteNodeNetwork",
17136
+ jsii_struct_bases=[],
17137
+ name_mapping={"cidrs": "cidrs"},
17138
+ )
17139
+ class RemoteNodeNetwork:
17140
+ def __init__(self, *, cidrs: typing.Sequence[builtins.str]) -> None:
17141
+ '''Network configuration of nodes run on-premises with EKS Hybrid Nodes.
17142
+
17143
+ :param cidrs: Specifies the list of remote node CIDRs.
17144
+
17145
+ :exampleMetadata: fixture=_generated
17146
+
17147
+ Example::
17148
+
17149
+ # The code below shows an example of how to instantiate this type.
17150
+ # The values are placeholders you should change.
17151
+ from aws_cdk import aws_eks as eks
17152
+
17153
+ remote_node_network = eks.RemoteNodeNetwork(
17154
+ cidrs=["cidrs"]
17155
+ )
17156
+ '''
17157
+ if __debug__:
17158
+ type_hints = typing.get_type_hints(_typecheckingstub__600789f5d1adc105e950fc1e01201ea975b89bb797b63227b757a633425a0f09)
17159
+ check_type(argname="argument cidrs", value=cidrs, expected_type=type_hints["cidrs"])
17160
+ self._values: typing.Dict[builtins.str, typing.Any] = {
17161
+ "cidrs": cidrs,
17162
+ }
17163
+
17164
+ @builtins.property
17165
+ def cidrs(self) -> typing.List[builtins.str]:
17166
+ '''Specifies the list of remote node CIDRs.
17167
+
17168
+ :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-remotenodenetwork.html#cfn-eks-cluster-remotenodenetwork-cidrs
17169
+ '''
17170
+ result = self._values.get("cidrs")
17171
+ assert result is not None, "Required property 'cidrs' is missing"
17172
+ return typing.cast(typing.List[builtins.str], result)
17173
+
17174
+ def __eq__(self, rhs: typing.Any) -> builtins.bool:
17175
+ return isinstance(rhs, self.__class__) and rhs._values == self._values
17176
+
17177
+ def __ne__(self, rhs: typing.Any) -> builtins.bool:
17178
+ return not (rhs == self)
17179
+
17180
+ def __repr__(self) -> str:
17181
+ return "RemoteNodeNetwork(%s)" % ", ".join(
17182
+ k + "=" + repr(v) for k, v in self._values.items()
17183
+ )
17184
+
17185
+
17186
+ @jsii.data_type(
17187
+ jsii_type="aws-cdk-lib.aws_eks.RemotePodNetwork",
17188
+ jsii_struct_bases=[],
17189
+ name_mapping={"cidrs": "cidrs"},
17190
+ )
17191
+ class RemotePodNetwork:
17192
+ def __init__(self, *, cidrs: typing.Sequence[builtins.str]) -> None:
17193
+ '''Network configuration of pods run on-premises with EKS Hybrid Nodes.
17194
+
17195
+ :param cidrs: Specifies the list of remote pod CIDRs.
17196
+
17197
+ :exampleMetadata: fixture=_generated
17198
+
17199
+ Example::
17200
+
17201
+ # The code below shows an example of how to instantiate this type.
17202
+ # The values are placeholders you should change.
17203
+ from aws_cdk import aws_eks as eks
17204
+
17205
+ remote_pod_network = eks.RemotePodNetwork(
17206
+ cidrs=["cidrs"]
17207
+ )
17208
+ '''
17209
+ if __debug__:
17210
+ type_hints = typing.get_type_hints(_typecheckingstub__f9878a6e6680b6c2c6cb0db908c65c1de65fe68965909386c87176ba98e30705)
17211
+ check_type(argname="argument cidrs", value=cidrs, expected_type=type_hints["cidrs"])
17212
+ self._values: typing.Dict[builtins.str, typing.Any] = {
17213
+ "cidrs": cidrs,
17214
+ }
17215
+
17216
+ @builtins.property
17217
+ def cidrs(self) -> typing.List[builtins.str]:
17218
+ '''Specifies the list of remote pod CIDRs.
17219
+
17220
+ :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-remotepodnetwork.html#cfn-eks-cluster-remotepodnetwork-cidrs
17221
+ '''
17222
+ result = self._values.get("cidrs")
17223
+ assert result is not None, "Required property 'cidrs' is missing"
17224
+ return typing.cast(typing.List[builtins.str], result)
17225
+
17226
+ def __eq__(self, rhs: typing.Any) -> builtins.bool:
17227
+ return isinstance(rhs, self.__class__) and rhs._values == self._values
17228
+
17229
+ def __ne__(self, rhs: typing.Any) -> builtins.bool:
17230
+ return not (rhs == self)
17231
+
17232
+ def __repr__(self) -> str:
17233
+ return "RemotePodNetwork(%s)" % ", ".join(
17234
+ k + "=" + repr(v) for k, v in self._values.items()
17235
+ )
17236
+
17237
+
17002
17238
  @jsii.data_type(
17003
17239
  jsii_type="aws-cdk-lib.aws_eks.Selector",
17004
17240
  jsii_struct_bases=[],
@@ -17986,11 +18222,16 @@ class Cluster(
17986
18222
 
17987
18223
  Example::
17988
18224
 
18225
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
18226
+
17989
18227
  # or
17990
18228
  # vpc: ec2.Vpc
18229
+
18230
+
17991
18231
  eks.Cluster(self, "MyCluster",
17992
18232
  kubectl_memory=Size.gibibytes(4),
17993
- version=eks.KubernetesVersion.V1_31
18233
+ version=eks.KubernetesVersion.V1_32,
18234
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
17994
18235
  )
17995
18236
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
17996
18237
  kubectl_memory=Size.gibibytes(4),
@@ -18010,6 +18251,7 @@ class Cluster(
18010
18251
  default_capacity_type: typing.Optional[DefaultCapacityType] = None,
18011
18252
  kubectl_lambda_role: typing.Optional[_IRole_235f5d8e] = None,
18012
18253
  tags: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
18254
+ kubectl_layer: _ILayerVersion_5ac127c8,
18013
18255
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
18014
18256
  authentication_mode: typing.Optional[AuthenticationMode] = None,
18015
18257
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -18020,13 +18262,14 @@ class Cluster(
18020
18262
  endpoint_access: typing.Optional[EndpointAccess] = None,
18021
18263
  ip_family: typing.Optional[IpFamily] = None,
18022
18264
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
18023
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
18024
18265
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
18025
18266
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
18026
18267
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
18027
18268
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
18028
18269
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
18029
18270
  prune: typing.Optional[builtins.bool] = None,
18271
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
18272
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
18030
18273
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
18031
18274
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
18032
18275
  version: KubernetesVersion,
@@ -18048,6 +18291,7 @@ class Cluster(
18048
18291
  :param default_capacity_type: The default capacity type for the cluster. Default: NODEGROUP
18049
18292
  :param kubectl_lambda_role: The IAM role to pass to the Kubectl Lambda Handler. Default: - Default Lambda IAM Execution Role
18050
18293
  :param tags: The tags assigned to the EKS cluster. Default: - none
18294
+ :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl
18051
18295
  :param alb_controller: Install the AWS Load Balancer Controller onto the cluster. Default: - The controller is not installed.
18052
18296
  :param authentication_mode: The desired authentication mode for the cluster. Default: AuthenticationMode.CONFIG_MAP
18053
18297
  :param awscli_layer: An AWS Lambda layer that contains the ``aws`` CLI. The handler expects the layer to include the following executables:: /opt/awscli/aws Default: - a default layer with the AWS CLI 1.x
@@ -18058,13 +18302,14 @@ class Cluster(
18058
18302
  :param endpoint_access: Configure access to the Kubernetes API server endpoint.. Default: EndpointAccess.PUBLIC_AND_PRIVATE
18059
18303
  :param ip_family: Specify which IP family is used to assign Kubernetes pod and service IP addresses. Default: - IpFamily.IP_V4
18060
18304
  :param kubectl_environment: Environment variables for the kubectl execution. Only relevant for kubectl enabled clusters. Default: - No environment variables.
18061
- :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. If you don't supply this value ``kubectl`` 1.20 will be used, but that version is most likely too old. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - a default layer with Kubectl 1.20.
18062
18305
  :param kubectl_memory: Amount of memory to allocate to the provider's lambda function. Default: Size.gibibytes(1)
18063
18306
  :param masters_role: An IAM role that will be added to the ``system:masters`` Kubernetes RBAC group. Default: - no masters role.
18064
18307
  :param on_event_layer: An AWS Lambda Layer which includes the NPM dependency ``proxy-agent``. This layer is used by the onEvent handler to route AWS SDK requests through a proxy. By default, the provider will use the layer included in the "aws-lambda-layer-node-proxy-agent" SAR application which is available in all commercial regions. To deploy the layer locally define it in your app as follows:: const layer = new lambda.LayerVersion(this, 'proxy-agent-layer', { code: lambda.Code.fromAsset(`${__dirname}/layer.zip`), compatibleRuntimes: [lambda.Runtime.NODEJS_LATEST], }); Default: - a layer bundled with this module.
18065
18308
  :param output_masters_role_arn: Determines whether a CloudFormation output with the ARN of the "masters" IAM role will be synthesized (if ``mastersRole`` is specified). Default: false
18066
18309
  :param place_cluster_handler_in_vpc: If set to true, the cluster handler functions will be placed in the private subnets of the cluster vpc, subject to the ``vpcSubnets`` selection strategy. Default: false
18067
18310
  :param prune: Indicates whether Kubernetes resources added through ``addManifest()`` can be automatically pruned. When this is enabled (default), prune labels will be allocated and injected to each resource. These labels will then be used when issuing the ``kubectl apply`` operation with the ``--prune`` switch. Default: true
18311
+ :param remote_node_networks: IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster. Default: - none
18312
+ :param remote_pod_networks: IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes. Default: - none
18068
18313
  :param secrets_encryption_key: KMS secret for envelope encryption for Kubernetes secrets. Default: - By default, Kubernetes stores all secret object data within etcd and all etcd volumes used by Amazon EKS are encrypted at the disk-level using AWS-Managed encryption keys.
18069
18314
  :param service_ipv4_cidr: The CIDR block to assign Kubernetes service IP addresses from. Default: - Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks
18070
18315
  :param version: The Kubernetes version to run in the cluster.
@@ -18087,6 +18332,7 @@ class Cluster(
18087
18332
  default_capacity_type=default_capacity_type,
18088
18333
  kubectl_lambda_role=kubectl_lambda_role,
18089
18334
  tags=tags,
18335
+ kubectl_layer=kubectl_layer,
18090
18336
  alb_controller=alb_controller,
18091
18337
  authentication_mode=authentication_mode,
18092
18338
  awscli_layer=awscli_layer,
@@ -18097,13 +18343,14 @@ class Cluster(
18097
18343
  endpoint_access=endpoint_access,
18098
18344
  ip_family=ip_family,
18099
18345
  kubectl_environment=kubectl_environment,
18100
- kubectl_layer=kubectl_layer,
18101
18346
  kubectl_memory=kubectl_memory,
18102
18347
  masters_role=masters_role,
18103
18348
  on_event_layer=on_event_layer,
18104
18349
  output_masters_role_arn=output_masters_role_arn,
18105
18350
  place_cluster_handler_in_vpc=place_cluster_handler_in_vpc,
18106
18351
  prune=prune,
18352
+ remote_node_networks=remote_node_networks,
18353
+ remote_pod_networks=remote_pod_networks,
18107
18354
  secrets_encryption_key=secrets_encryption_key,
18108
18355
  service_ipv4_cidr=service_ipv4_cidr,
18109
18356
  version=version,
@@ -18161,7 +18408,7 @@ class Cluster(
18161
18408
  :param ip_family: Specify which IP family is used to assign Kubernetes pod and service IP addresses. Default: - IpFamily.IP_V4
18162
18409
  :param kubectl_environment: Environment variables to use when running ``kubectl`` against this cluster. Default: - no additional variables
18163
18410
  :param kubectl_lambda_role: An IAM role that can perform kubectl operations against this cluster. The role should be mapped to the ``system:masters`` Kubernetes RBAC role. This role is directly passed to the lambda handler that sends Kube Ctl commands to the cluster. Default: - if not specified, the default role created by a lambda function will be used.
18164
- :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. If you don't supply this value ``kubectl`` 1.20 will be used, but that version is most likely too old. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - a default layer with Kubectl 1.20 and helm 3.8.
18411
+ :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - No default layer will be provided
18165
18412
  :param kubectl_memory: Amount of memory to allocate to the provider's lambda function. Default: Size.gibibytes(1)
18166
18413
  :param kubectl_private_subnet_ids: Subnets to host the ``kubectl`` compute resources. If not specified, the k8s endpoint is expected to be accessible publicly. Default: - k8s endpoint is expected to be accessible publicly
18167
18414
  :param kubectl_provider: KubectlProvider for issuing kubectl commands. Default: - Default CDK provider
@@ -18953,10 +19200,7 @@ class Cluster(
18953
19200
  @builtins.property
18954
19201
  @jsii.member(jsii_name="kubectlLayer")
18955
19202
  def kubectl_layer(self) -> typing.Optional[_ILayerVersion_5ac127c8]:
18956
- '''An AWS Lambda layer that includes ``kubectl`` and ``helm``.
18957
-
18958
- If not defined, a default layer will be used containing Kubectl 1.20 and Helm 3.8
18959
- '''
19203
+ '''An AWS Lambda layer that includes ``kubectl`` and ``helm``.'''
18960
19204
  return typing.cast(typing.Optional[_ILayerVersion_5ac127c8], jsii.get(self, "kubectlLayer"))
18961
19205
 
18962
19206
  @builtins.property
@@ -19023,6 +19267,7 @@ class Cluster(
19023
19267
  "security_group": "securityGroup",
19024
19268
  "vpc": "vpc",
19025
19269
  "vpc_subnets": "vpcSubnets",
19270
+ "kubectl_layer": "kubectlLayer",
19026
19271
  "alb_controller": "albController",
19027
19272
  "authentication_mode": "authenticationMode",
19028
19273
  "awscli_layer": "awscliLayer",
@@ -19033,13 +19278,14 @@ class Cluster(
19033
19278
  "endpoint_access": "endpointAccess",
19034
19279
  "ip_family": "ipFamily",
19035
19280
  "kubectl_environment": "kubectlEnvironment",
19036
- "kubectl_layer": "kubectlLayer",
19037
19281
  "kubectl_memory": "kubectlMemory",
19038
19282
  "masters_role": "mastersRole",
19039
19283
  "on_event_layer": "onEventLayer",
19040
19284
  "output_masters_role_arn": "outputMastersRoleArn",
19041
19285
  "place_cluster_handler_in_vpc": "placeClusterHandlerInVpc",
19042
19286
  "prune": "prune",
19287
+ "remote_node_networks": "remoteNodeNetworks",
19288
+ "remote_pod_networks": "remotePodNetworks",
19043
19289
  "secrets_encryption_key": "secretsEncryptionKey",
19044
19290
  "service_ipv4_cidr": "serviceIpv4Cidr",
19045
19291
  },
@@ -19056,6 +19302,7 @@ class ClusterOptions(CommonClusterOptions):
19056
19302
  security_group: typing.Optional[_ISecurityGroup_acf8a799] = None,
19057
19303
  vpc: typing.Optional[_IVpc_f30d5663] = None,
19058
19304
  vpc_subnets: typing.Optional[typing.Sequence[typing.Union[_SubnetSelection_e57d76df, typing.Dict[builtins.str, typing.Any]]]] = None,
19305
+ kubectl_layer: _ILayerVersion_5ac127c8,
19059
19306
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
19060
19307
  authentication_mode: typing.Optional[AuthenticationMode] = None,
19061
19308
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -19066,13 +19313,14 @@ class ClusterOptions(CommonClusterOptions):
19066
19313
  endpoint_access: typing.Optional[EndpointAccess] = None,
19067
19314
  ip_family: typing.Optional[IpFamily] = None,
19068
19315
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
19069
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
19070
19316
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
19071
19317
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
19072
19318
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
19073
19319
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
19074
19320
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
19075
19321
  prune: typing.Optional[builtins.bool] = None,
19322
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
19323
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
19076
19324
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
19077
19325
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
19078
19326
  ) -> None:
@@ -19086,6 +19334,7 @@ class ClusterOptions(CommonClusterOptions):
19086
19334
  :param security_group: Security Group to use for Control Plane ENIs. Default: - A security group is automatically created
19087
19335
  :param vpc: The VPC in which to create the Cluster. Default: - a VPC with default configuration will be created and can be accessed through ``cluster.vpc``.
19088
19336
  :param vpc_subnets: Where to place EKS Control Plane ENIs. For example, to only select private subnets, supply the following: ``vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }]`` Default: - All public and private subnets
19337
+ :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl
19089
19338
  :param alb_controller: Install the AWS Load Balancer Controller onto the cluster. Default: - The controller is not installed.
19090
19339
  :param authentication_mode: The desired authentication mode for the cluster. Default: AuthenticationMode.CONFIG_MAP
19091
19340
  :param awscli_layer: An AWS Lambda layer that contains the ``aws`` CLI. The handler expects the layer to include the following executables:: /opt/awscli/aws Default: - a default layer with the AWS CLI 1.x
@@ -19096,13 +19345,14 @@ class ClusterOptions(CommonClusterOptions):
19096
19345
  :param endpoint_access: Configure access to the Kubernetes API server endpoint.. Default: EndpointAccess.PUBLIC_AND_PRIVATE
19097
19346
  :param ip_family: Specify which IP family is used to assign Kubernetes pod and service IP addresses. Default: - IpFamily.IP_V4
19098
19347
  :param kubectl_environment: Environment variables for the kubectl execution. Only relevant for kubectl enabled clusters. Default: - No environment variables.
19099
- :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. If you don't supply this value ``kubectl`` 1.20 will be used, but that version is most likely too old. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - a default layer with Kubectl 1.20.
19100
19348
  :param kubectl_memory: Amount of memory to allocate to the provider's lambda function. Default: Size.gibibytes(1)
19101
19349
  :param masters_role: An IAM role that will be added to the ``system:masters`` Kubernetes RBAC group. Default: - no masters role.
19102
19350
  :param on_event_layer: An AWS Lambda Layer which includes the NPM dependency ``proxy-agent``. This layer is used by the onEvent handler to route AWS SDK requests through a proxy. By default, the provider will use the layer included in the "aws-lambda-layer-node-proxy-agent" SAR application which is available in all commercial regions. To deploy the layer locally define it in your app as follows:: const layer = new lambda.LayerVersion(this, 'proxy-agent-layer', { code: lambda.Code.fromAsset(`${__dirname}/layer.zip`), compatibleRuntimes: [lambda.Runtime.NODEJS_LATEST], }); Default: - a layer bundled with this module.
19103
19351
  :param output_masters_role_arn: Determines whether a CloudFormation output with the ARN of the "masters" IAM role will be synthesized (if ``mastersRole`` is specified). Default: false
19104
19352
  :param place_cluster_handler_in_vpc: If set to true, the cluster handler functions will be placed in the private subnets of the cluster vpc, subject to the ``vpcSubnets`` selection strategy. Default: false
19105
19353
  :param prune: Indicates whether Kubernetes resources added through ``addManifest()`` can be automatically pruned. When this is enabled (default), prune labels will be allocated and injected to each resource. These labels will then be used when issuing the ``kubectl apply`` operation with the ``--prune`` switch. Default: true
19354
+ :param remote_node_networks: IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster. Default: - none
19355
+ :param remote_pod_networks: IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes. Default: - none
19106
19356
  :param secrets_encryption_key: KMS secret for envelope encryption for Kubernetes secrets. Default: - By default, Kubernetes stores all secret object data within etcd and all etcd volumes used by Amazon EKS are encrypted at the disk-level using AWS-Managed encryption keys.
19107
19357
  :param service_ipv4_cidr: The CIDR block to assign Kubernetes service IP addresses from. Default: - Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks
19108
19358
 
@@ -19133,6 +19383,7 @@ class ClusterOptions(CommonClusterOptions):
19133
19383
  # vpc: ec2.Vpc
19134
19384
 
19135
19385
  cluster_options = eks.ClusterOptions(
19386
+ kubectl_layer=layer_version,
19136
19387
  version=kubernetes_version,
19137
19388
 
19138
19389
  # the properties below are optional
@@ -19157,7 +19408,6 @@ class ClusterOptions(CommonClusterOptions):
19157
19408
  kubectl_environment={
19158
19409
  "kubectl_environment_key": "kubectlEnvironment"
19159
19410
  },
19160
- kubectl_layer=layer_version,
19161
19411
  kubectl_memory=size,
19162
19412
  masters_role=role,
19163
19413
  on_event_layer=layer_version,
@@ -19166,6 +19416,12 @@ class ClusterOptions(CommonClusterOptions):
19166
19416
  output_masters_role_arn=False,
19167
19417
  place_cluster_handler_in_vpc=False,
19168
19418
  prune=False,
19419
+ remote_node_networks=[eks.RemoteNodeNetwork(
19420
+ cidrs=["cidrs"]
19421
+ )],
19422
+ remote_pod_networks=[eks.RemotePodNetwork(
19423
+ cidrs=["cidrs"]
19424
+ )],
19169
19425
  role=role,
19170
19426
  secrets_encryption_key=key,
19171
19427
  security_group=security_group,
@@ -19193,6 +19449,7 @@ class ClusterOptions(CommonClusterOptions):
19193
19449
  check_type(argname="argument security_group", value=security_group, expected_type=type_hints["security_group"])
19194
19450
  check_type(argname="argument vpc", value=vpc, expected_type=type_hints["vpc"])
19195
19451
  check_type(argname="argument vpc_subnets", value=vpc_subnets, expected_type=type_hints["vpc_subnets"])
19452
+ check_type(argname="argument kubectl_layer", value=kubectl_layer, expected_type=type_hints["kubectl_layer"])
19196
19453
  check_type(argname="argument alb_controller", value=alb_controller, expected_type=type_hints["alb_controller"])
19197
19454
  check_type(argname="argument authentication_mode", value=authentication_mode, expected_type=type_hints["authentication_mode"])
19198
19455
  check_type(argname="argument awscli_layer", value=awscli_layer, expected_type=type_hints["awscli_layer"])
@@ -19203,17 +19460,19 @@ class ClusterOptions(CommonClusterOptions):
19203
19460
  check_type(argname="argument endpoint_access", value=endpoint_access, expected_type=type_hints["endpoint_access"])
19204
19461
  check_type(argname="argument ip_family", value=ip_family, expected_type=type_hints["ip_family"])
19205
19462
  check_type(argname="argument kubectl_environment", value=kubectl_environment, expected_type=type_hints["kubectl_environment"])
19206
- check_type(argname="argument kubectl_layer", value=kubectl_layer, expected_type=type_hints["kubectl_layer"])
19207
19463
  check_type(argname="argument kubectl_memory", value=kubectl_memory, expected_type=type_hints["kubectl_memory"])
19208
19464
  check_type(argname="argument masters_role", value=masters_role, expected_type=type_hints["masters_role"])
19209
19465
  check_type(argname="argument on_event_layer", value=on_event_layer, expected_type=type_hints["on_event_layer"])
19210
19466
  check_type(argname="argument output_masters_role_arn", value=output_masters_role_arn, expected_type=type_hints["output_masters_role_arn"])
19211
19467
  check_type(argname="argument place_cluster_handler_in_vpc", value=place_cluster_handler_in_vpc, expected_type=type_hints["place_cluster_handler_in_vpc"])
19212
19468
  check_type(argname="argument prune", value=prune, expected_type=type_hints["prune"])
19469
+ check_type(argname="argument remote_node_networks", value=remote_node_networks, expected_type=type_hints["remote_node_networks"])
19470
+ check_type(argname="argument remote_pod_networks", value=remote_pod_networks, expected_type=type_hints["remote_pod_networks"])
19213
19471
  check_type(argname="argument secrets_encryption_key", value=secrets_encryption_key, expected_type=type_hints["secrets_encryption_key"])
19214
19472
  check_type(argname="argument service_ipv4_cidr", value=service_ipv4_cidr, expected_type=type_hints["service_ipv4_cidr"])
19215
19473
  self._values: typing.Dict[builtins.str, typing.Any] = {
19216
19474
  "version": version,
19475
+ "kubectl_layer": kubectl_layer,
19217
19476
  }
19218
19477
  if cluster_name is not None:
19219
19478
  self._values["cluster_name"] = cluster_name
@@ -19249,8 +19508,6 @@ class ClusterOptions(CommonClusterOptions):
19249
19508
  self._values["ip_family"] = ip_family
19250
19509
  if kubectl_environment is not None:
19251
19510
  self._values["kubectl_environment"] = kubectl_environment
19252
- if kubectl_layer is not None:
19253
- self._values["kubectl_layer"] = kubectl_layer
19254
19511
  if kubectl_memory is not None:
19255
19512
  self._values["kubectl_memory"] = kubectl_memory
19256
19513
  if masters_role is not None:
@@ -19263,6 +19520,10 @@ class ClusterOptions(CommonClusterOptions):
19263
19520
  self._values["place_cluster_handler_in_vpc"] = place_cluster_handler_in_vpc
19264
19521
  if prune is not None:
19265
19522
  self._values["prune"] = prune
19523
+ if remote_node_networks is not None:
19524
+ self._values["remote_node_networks"] = remote_node_networks
19525
+ if remote_pod_networks is not None:
19526
+ self._values["remote_pod_networks"] = remote_pod_networks
19266
19527
  if secrets_encryption_key is not None:
19267
19528
  self._values["secrets_encryption_key"] = secrets_encryption_key
19268
19529
  if service_ipv4_cidr is not None:
@@ -19345,6 +19606,24 @@ class ClusterOptions(CommonClusterOptions):
19345
19606
  result = self._values.get("vpc_subnets")
19346
19607
  return typing.cast(typing.Optional[typing.List[_SubnetSelection_e57d76df]], result)
19347
19608
 
19609
+ @builtins.property
19610
+ def kubectl_layer(self) -> _ILayerVersion_5ac127c8:
19611
+ '''An AWS Lambda Layer which includes ``kubectl`` and Helm.
19612
+
19613
+ This layer is used by the kubectl handler to apply manifests and install
19614
+ helm charts. You must pick an appropriate releases of one of the
19615
+ ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of
19616
+ Kubernetes you have chosen.
19617
+
19618
+ The handler expects the layer to include the following executables::
19619
+
19620
+ /opt/helm/helm
19621
+ /opt/kubectl/kubectl
19622
+ '''
19623
+ result = self._values.get("kubectl_layer")
19624
+ assert result is not None, "Required property 'kubectl_layer' is missing"
19625
+ return typing.cast(_ILayerVersion_5ac127c8, result)
19626
+
19348
19627
  @builtins.property
19349
19628
  def alb_controller(self) -> typing.Optional[AlbControllerOptions]:
19350
19629
  '''Install the AWS Load Balancer Controller onto the cluster.
@@ -19457,26 +19736,6 @@ class ClusterOptions(CommonClusterOptions):
19457
19736
  result = self._values.get("kubectl_environment")
19458
19737
  return typing.cast(typing.Optional[typing.Mapping[builtins.str, builtins.str]], result)
19459
19738
 
19460
- @builtins.property
19461
- def kubectl_layer(self) -> typing.Optional[_ILayerVersion_5ac127c8]:
19462
- '''An AWS Lambda Layer which includes ``kubectl`` and Helm.
19463
-
19464
- This layer is used by the kubectl handler to apply manifests and install
19465
- helm charts. You must pick an appropriate releases of one of the
19466
- ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of
19467
- Kubernetes you have chosen. If you don't supply this value ``kubectl``
19468
- 1.20 will be used, but that version is most likely too old.
19469
-
19470
- The handler expects the layer to include the following executables::
19471
-
19472
- /opt/helm/helm
19473
- /opt/kubectl/kubectl
19474
-
19475
- :default: - a default layer with Kubectl 1.20.
19476
- '''
19477
- result = self._values.get("kubectl_layer")
19478
- return typing.cast(typing.Optional[_ILayerVersion_5ac127c8], result)
19479
-
19480
19739
  @builtins.property
19481
19740
  def kubectl_memory(self) -> typing.Optional[_Size_7b441c34]:
19482
19741
  '''Amount of memory to allocate to the provider's lambda function.
@@ -19551,6 +19810,24 @@ class ClusterOptions(CommonClusterOptions):
19551
19810
  result = self._values.get("prune")
19552
19811
  return typing.cast(typing.Optional[builtins.bool], result)
19553
19812
 
19813
+ @builtins.property
19814
+ def remote_node_networks(self) -> typing.Optional[typing.List[RemoteNodeNetwork]]:
19815
+ '''IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster.
19816
+
19817
+ :default: - none
19818
+ '''
19819
+ result = self._values.get("remote_node_networks")
19820
+ return typing.cast(typing.Optional[typing.List[RemoteNodeNetwork]], result)
19821
+
19822
+ @builtins.property
19823
+ def remote_pod_networks(self) -> typing.Optional[typing.List[RemotePodNetwork]]:
19824
+ '''IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes.
19825
+
19826
+ :default: - none
19827
+ '''
19828
+ result = self._values.get("remote_pod_networks")
19829
+ return typing.cast(typing.Optional[typing.List[RemotePodNetwork]], result)
19830
+
19554
19831
  @builtins.property
19555
19832
  def secrets_encryption_key(self) -> typing.Optional[_IKey_5f11635f]:
19556
19833
  '''KMS secret for envelope encryption for Kubernetes secrets.
@@ -19602,6 +19879,7 @@ class ClusterOptions(CommonClusterOptions):
19602
19879
  "security_group": "securityGroup",
19603
19880
  "vpc": "vpc",
19604
19881
  "vpc_subnets": "vpcSubnets",
19882
+ "kubectl_layer": "kubectlLayer",
19605
19883
  "alb_controller": "albController",
19606
19884
  "authentication_mode": "authenticationMode",
19607
19885
  "awscli_layer": "awscliLayer",
@@ -19612,13 +19890,14 @@ class ClusterOptions(CommonClusterOptions):
19612
19890
  "endpoint_access": "endpointAccess",
19613
19891
  "ip_family": "ipFamily",
19614
19892
  "kubectl_environment": "kubectlEnvironment",
19615
- "kubectl_layer": "kubectlLayer",
19616
19893
  "kubectl_memory": "kubectlMemory",
19617
19894
  "masters_role": "mastersRole",
19618
19895
  "on_event_layer": "onEventLayer",
19619
19896
  "output_masters_role_arn": "outputMastersRoleArn",
19620
19897
  "place_cluster_handler_in_vpc": "placeClusterHandlerInVpc",
19621
19898
  "prune": "prune",
19899
+ "remote_node_networks": "remoteNodeNetworks",
19900
+ "remote_pod_networks": "remotePodNetworks",
19622
19901
  "secrets_encryption_key": "secretsEncryptionKey",
19623
19902
  "service_ipv4_cidr": "serviceIpv4Cidr",
19624
19903
  "bootstrap_cluster_creator_admin_permissions": "bootstrapClusterCreatorAdminPermissions",
@@ -19641,6 +19920,7 @@ class ClusterProps(ClusterOptions):
19641
19920
  security_group: typing.Optional[_ISecurityGroup_acf8a799] = None,
19642
19921
  vpc: typing.Optional[_IVpc_f30d5663] = None,
19643
19922
  vpc_subnets: typing.Optional[typing.Sequence[typing.Union[_SubnetSelection_e57d76df, typing.Dict[builtins.str, typing.Any]]]] = None,
19923
+ kubectl_layer: _ILayerVersion_5ac127c8,
19644
19924
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
19645
19925
  authentication_mode: typing.Optional[AuthenticationMode] = None,
19646
19926
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -19651,13 +19931,14 @@ class ClusterProps(ClusterOptions):
19651
19931
  endpoint_access: typing.Optional[EndpointAccess] = None,
19652
19932
  ip_family: typing.Optional[IpFamily] = None,
19653
19933
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
19654
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
19655
19934
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
19656
19935
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
19657
19936
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
19658
19937
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
19659
19938
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
19660
19939
  prune: typing.Optional[builtins.bool] = None,
19940
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
19941
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
19661
19942
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
19662
19943
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
19663
19944
  bootstrap_cluster_creator_admin_permissions: typing.Optional[builtins.bool] = None,
@@ -19677,6 +19958,7 @@ class ClusterProps(ClusterOptions):
19677
19958
  :param security_group: Security Group to use for Control Plane ENIs. Default: - A security group is automatically created
19678
19959
  :param vpc: The VPC in which to create the Cluster. Default: - a VPC with default configuration will be created and can be accessed through ``cluster.vpc``.
19679
19960
  :param vpc_subnets: Where to place EKS Control Plane ENIs. For example, to only select private subnets, supply the following: ``vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }]`` Default: - All public and private subnets
19961
+ :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl
19680
19962
  :param alb_controller: Install the AWS Load Balancer Controller onto the cluster. Default: - The controller is not installed.
19681
19963
  :param authentication_mode: The desired authentication mode for the cluster. Default: AuthenticationMode.CONFIG_MAP
19682
19964
  :param awscli_layer: An AWS Lambda layer that contains the ``aws`` CLI. The handler expects the layer to include the following executables:: /opt/awscli/aws Default: - a default layer with the AWS CLI 1.x
@@ -19687,13 +19969,14 @@ class ClusterProps(ClusterOptions):
19687
19969
  :param endpoint_access: Configure access to the Kubernetes API server endpoint.. Default: EndpointAccess.PUBLIC_AND_PRIVATE
19688
19970
  :param ip_family: Specify which IP family is used to assign Kubernetes pod and service IP addresses. Default: - IpFamily.IP_V4
19689
19971
  :param kubectl_environment: Environment variables for the kubectl execution. Only relevant for kubectl enabled clusters. Default: - No environment variables.
19690
- :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. If you don't supply this value ``kubectl`` 1.20 will be used, but that version is most likely too old. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - a default layer with Kubectl 1.20.
19691
19972
  :param kubectl_memory: Amount of memory to allocate to the provider's lambda function. Default: Size.gibibytes(1)
19692
19973
  :param masters_role: An IAM role that will be added to the ``system:masters`` Kubernetes RBAC group. Default: - no masters role.
19693
19974
  :param on_event_layer: An AWS Lambda Layer which includes the NPM dependency ``proxy-agent``. This layer is used by the onEvent handler to route AWS SDK requests through a proxy. By default, the provider will use the layer included in the "aws-lambda-layer-node-proxy-agent" SAR application which is available in all commercial regions. To deploy the layer locally define it in your app as follows:: const layer = new lambda.LayerVersion(this, 'proxy-agent-layer', { code: lambda.Code.fromAsset(`${__dirname}/layer.zip`), compatibleRuntimes: [lambda.Runtime.NODEJS_LATEST], }); Default: - a layer bundled with this module.
19694
19975
  :param output_masters_role_arn: Determines whether a CloudFormation output with the ARN of the "masters" IAM role will be synthesized (if ``mastersRole`` is specified). Default: false
19695
19976
  :param place_cluster_handler_in_vpc: If set to true, the cluster handler functions will be placed in the private subnets of the cluster vpc, subject to the ``vpcSubnets`` selection strategy. Default: false
19696
19977
  :param prune: Indicates whether Kubernetes resources added through ``addManifest()`` can be automatically pruned. When this is enabled (default), prune labels will be allocated and injected to each resource. These labels will then be used when issuing the ``kubectl apply`` operation with the ``--prune`` switch. Default: true
19978
+ :param remote_node_networks: IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster. Default: - none
19979
+ :param remote_pod_networks: IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes. Default: - none
19697
19980
  :param secrets_encryption_key: KMS secret for envelope encryption for Kubernetes secrets. Default: - By default, Kubernetes stores all secret object data within etcd and all etcd volumes used by Amazon EKS are encrypted at the disk-level using AWS-Managed encryption keys.
19698
19981
  :param service_ipv4_cidr: The CIDR block to assign Kubernetes service IP addresses from. Default: - Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks
19699
19982
  :param bootstrap_cluster_creator_admin_permissions: Whether or not IAM principal of the cluster creator was set as a cluster admin access entry during cluster creation time. Changing this value after the cluster has been created will result in the cluster being replaced. Default: true
@@ -19707,11 +19990,16 @@ class ClusterProps(ClusterOptions):
19707
19990
 
19708
19991
  Example::
19709
19992
 
19993
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
19994
+
19710
19995
  # or
19711
19996
  # vpc: ec2.Vpc
19997
+
19998
+
19712
19999
  eks.Cluster(self, "MyCluster",
19713
20000
  kubectl_memory=Size.gibibytes(4),
19714
- version=eks.KubernetesVersion.V1_31
20001
+ version=eks.KubernetesVersion.V1_32,
20002
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
19715
20003
  )
19716
20004
  eks.Cluster.from_cluster_attributes(self, "MyCluster",
19717
20005
  kubectl_memory=Size.gibibytes(4),
@@ -19731,6 +20019,7 @@ class ClusterProps(ClusterOptions):
19731
20019
  check_type(argname="argument security_group", value=security_group, expected_type=type_hints["security_group"])
19732
20020
  check_type(argname="argument vpc", value=vpc, expected_type=type_hints["vpc"])
19733
20021
  check_type(argname="argument vpc_subnets", value=vpc_subnets, expected_type=type_hints["vpc_subnets"])
20022
+ check_type(argname="argument kubectl_layer", value=kubectl_layer, expected_type=type_hints["kubectl_layer"])
19734
20023
  check_type(argname="argument alb_controller", value=alb_controller, expected_type=type_hints["alb_controller"])
19735
20024
  check_type(argname="argument authentication_mode", value=authentication_mode, expected_type=type_hints["authentication_mode"])
19736
20025
  check_type(argname="argument awscli_layer", value=awscli_layer, expected_type=type_hints["awscli_layer"])
@@ -19741,13 +20030,14 @@ class ClusterProps(ClusterOptions):
19741
20030
  check_type(argname="argument endpoint_access", value=endpoint_access, expected_type=type_hints["endpoint_access"])
19742
20031
  check_type(argname="argument ip_family", value=ip_family, expected_type=type_hints["ip_family"])
19743
20032
  check_type(argname="argument kubectl_environment", value=kubectl_environment, expected_type=type_hints["kubectl_environment"])
19744
- check_type(argname="argument kubectl_layer", value=kubectl_layer, expected_type=type_hints["kubectl_layer"])
19745
20033
  check_type(argname="argument kubectl_memory", value=kubectl_memory, expected_type=type_hints["kubectl_memory"])
19746
20034
  check_type(argname="argument masters_role", value=masters_role, expected_type=type_hints["masters_role"])
19747
20035
  check_type(argname="argument on_event_layer", value=on_event_layer, expected_type=type_hints["on_event_layer"])
19748
20036
  check_type(argname="argument output_masters_role_arn", value=output_masters_role_arn, expected_type=type_hints["output_masters_role_arn"])
19749
20037
  check_type(argname="argument place_cluster_handler_in_vpc", value=place_cluster_handler_in_vpc, expected_type=type_hints["place_cluster_handler_in_vpc"])
19750
20038
  check_type(argname="argument prune", value=prune, expected_type=type_hints["prune"])
20039
+ check_type(argname="argument remote_node_networks", value=remote_node_networks, expected_type=type_hints["remote_node_networks"])
20040
+ check_type(argname="argument remote_pod_networks", value=remote_pod_networks, expected_type=type_hints["remote_pod_networks"])
19751
20041
  check_type(argname="argument secrets_encryption_key", value=secrets_encryption_key, expected_type=type_hints["secrets_encryption_key"])
19752
20042
  check_type(argname="argument service_ipv4_cidr", value=service_ipv4_cidr, expected_type=type_hints["service_ipv4_cidr"])
19753
20043
  check_type(argname="argument bootstrap_cluster_creator_admin_permissions", value=bootstrap_cluster_creator_admin_permissions, expected_type=type_hints["bootstrap_cluster_creator_admin_permissions"])
@@ -19758,6 +20048,7 @@ class ClusterProps(ClusterOptions):
19758
20048
  check_type(argname="argument tags", value=tags, expected_type=type_hints["tags"])
19759
20049
  self._values: typing.Dict[builtins.str, typing.Any] = {
19760
20050
  "version": version,
20051
+ "kubectl_layer": kubectl_layer,
19761
20052
  }
19762
20053
  if cluster_name is not None:
19763
20054
  self._values["cluster_name"] = cluster_name
@@ -19793,8 +20084,6 @@ class ClusterProps(ClusterOptions):
19793
20084
  self._values["ip_family"] = ip_family
19794
20085
  if kubectl_environment is not None:
19795
20086
  self._values["kubectl_environment"] = kubectl_environment
19796
- if kubectl_layer is not None:
19797
- self._values["kubectl_layer"] = kubectl_layer
19798
20087
  if kubectl_memory is not None:
19799
20088
  self._values["kubectl_memory"] = kubectl_memory
19800
20089
  if masters_role is not None:
@@ -19807,6 +20096,10 @@ class ClusterProps(ClusterOptions):
19807
20096
  self._values["place_cluster_handler_in_vpc"] = place_cluster_handler_in_vpc
19808
20097
  if prune is not None:
19809
20098
  self._values["prune"] = prune
20099
+ if remote_node_networks is not None:
20100
+ self._values["remote_node_networks"] = remote_node_networks
20101
+ if remote_pod_networks is not None:
20102
+ self._values["remote_pod_networks"] = remote_pod_networks
19810
20103
  if secrets_encryption_key is not None:
19811
20104
  self._values["secrets_encryption_key"] = secrets_encryption_key
19812
20105
  if service_ipv4_cidr is not None:
@@ -19901,6 +20194,24 @@ class ClusterProps(ClusterOptions):
19901
20194
  result = self._values.get("vpc_subnets")
19902
20195
  return typing.cast(typing.Optional[typing.List[_SubnetSelection_e57d76df]], result)
19903
20196
 
20197
+ @builtins.property
20198
+ def kubectl_layer(self) -> _ILayerVersion_5ac127c8:
20199
+ '''An AWS Lambda Layer which includes ``kubectl`` and Helm.
20200
+
20201
+ This layer is used by the kubectl handler to apply manifests and install
20202
+ helm charts. You must pick an appropriate releases of one of the
20203
+ ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of
20204
+ Kubernetes you have chosen.
20205
+
20206
+ The handler expects the layer to include the following executables::
20207
+
20208
+ /opt/helm/helm
20209
+ /opt/kubectl/kubectl
20210
+ '''
20211
+ result = self._values.get("kubectl_layer")
20212
+ assert result is not None, "Required property 'kubectl_layer' is missing"
20213
+ return typing.cast(_ILayerVersion_5ac127c8, result)
20214
+
19904
20215
  @builtins.property
19905
20216
  def alb_controller(self) -> typing.Optional[AlbControllerOptions]:
19906
20217
  '''Install the AWS Load Balancer Controller onto the cluster.
@@ -20013,26 +20324,6 @@ class ClusterProps(ClusterOptions):
20013
20324
  result = self._values.get("kubectl_environment")
20014
20325
  return typing.cast(typing.Optional[typing.Mapping[builtins.str, builtins.str]], result)
20015
20326
 
20016
- @builtins.property
20017
- def kubectl_layer(self) -> typing.Optional[_ILayerVersion_5ac127c8]:
20018
- '''An AWS Lambda Layer which includes ``kubectl`` and Helm.
20019
-
20020
- This layer is used by the kubectl handler to apply manifests and install
20021
- helm charts. You must pick an appropriate releases of one of the
20022
- ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of
20023
- Kubernetes you have chosen. If you don't supply this value ``kubectl``
20024
- 1.20 will be used, but that version is most likely too old.
20025
-
20026
- The handler expects the layer to include the following executables::
20027
-
20028
- /opt/helm/helm
20029
- /opt/kubectl/kubectl
20030
-
20031
- :default: - a default layer with Kubectl 1.20.
20032
- '''
20033
- result = self._values.get("kubectl_layer")
20034
- return typing.cast(typing.Optional[_ILayerVersion_5ac127c8], result)
20035
-
20036
20327
  @builtins.property
20037
20328
  def kubectl_memory(self) -> typing.Optional[_Size_7b441c34]:
20038
20329
  '''Amount of memory to allocate to the provider's lambda function.
@@ -20107,6 +20398,24 @@ class ClusterProps(ClusterOptions):
20107
20398
  result = self._values.get("prune")
20108
20399
  return typing.cast(typing.Optional[builtins.bool], result)
20109
20400
 
20401
+ @builtins.property
20402
+ def remote_node_networks(self) -> typing.Optional[typing.List[RemoteNodeNetwork]]:
20403
+ '''IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster.
20404
+
20405
+ :default: - none
20406
+ '''
20407
+ result = self._values.get("remote_node_networks")
20408
+ return typing.cast(typing.Optional[typing.List[RemoteNodeNetwork]], result)
20409
+
20410
+ @builtins.property
20411
+ def remote_pod_networks(self) -> typing.Optional[typing.List[RemotePodNetwork]]:
20412
+ '''IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes.
20413
+
20414
+ :default: - none
20415
+ '''
20416
+ result = self._values.get("remote_pod_networks")
20417
+ return typing.cast(typing.Optional[typing.List[RemotePodNetwork]], result)
20418
+
20110
20419
  @builtins.property
20111
20420
  def secrets_encryption_key(self) -> typing.Optional[_IKey_5f11635f]:
20112
20421
  '''KMS secret for envelope encryption for Kubernetes secrets.
@@ -20228,8 +20537,12 @@ class FargateCluster(
20228
20537
 
20229
20538
  Example::
20230
20539
 
20540
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
20541
+
20542
+
20231
20543
  cluster = eks.FargateCluster(self, "MyCluster",
20232
- version=eks.KubernetesVersion.V1_31
20544
+ version=eks.KubernetesVersion.V1_32,
20545
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
20233
20546
  )
20234
20547
  '''
20235
20548
 
@@ -20239,6 +20552,7 @@ class FargateCluster(
20239
20552
  id: builtins.str,
20240
20553
  *,
20241
20554
  default_profile: typing.Optional[typing.Union[FargateProfileOptions, typing.Dict[builtins.str, typing.Any]]] = None,
20555
+ kubectl_layer: _ILayerVersion_5ac127c8,
20242
20556
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
20243
20557
  authentication_mode: typing.Optional[AuthenticationMode] = None,
20244
20558
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -20249,13 +20563,14 @@ class FargateCluster(
20249
20563
  endpoint_access: typing.Optional[EndpointAccess] = None,
20250
20564
  ip_family: typing.Optional[IpFamily] = None,
20251
20565
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
20252
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
20253
20566
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
20254
20567
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
20255
20568
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
20256
20569
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
20257
20570
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
20258
20571
  prune: typing.Optional[builtins.bool] = None,
20572
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
20573
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
20259
20574
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
20260
20575
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
20261
20576
  version: KubernetesVersion,
@@ -20271,6 +20586,7 @@ class FargateCluster(
20271
20586
  :param scope: -
20272
20587
  :param id: -
20273
20588
  :param default_profile: Fargate Profile to create along with the cluster. Default: - A profile called "default" with 'default' and 'kube-system' selectors will be created if this is left undefined.
20589
+ :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl
20274
20590
  :param alb_controller: Install the AWS Load Balancer Controller onto the cluster. Default: - The controller is not installed.
20275
20591
  :param authentication_mode: The desired authentication mode for the cluster. Default: AuthenticationMode.CONFIG_MAP
20276
20592
  :param awscli_layer: An AWS Lambda layer that contains the ``aws`` CLI. The handler expects the layer to include the following executables:: /opt/awscli/aws Default: - a default layer with the AWS CLI 1.x
@@ -20281,13 +20597,14 @@ class FargateCluster(
20281
20597
  :param endpoint_access: Configure access to the Kubernetes API server endpoint.. Default: EndpointAccess.PUBLIC_AND_PRIVATE
20282
20598
  :param ip_family: Specify which IP family is used to assign Kubernetes pod and service IP addresses. Default: - IpFamily.IP_V4
20283
20599
  :param kubectl_environment: Environment variables for the kubectl execution. Only relevant for kubectl enabled clusters. Default: - No environment variables.
20284
- :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. If you don't supply this value ``kubectl`` 1.20 will be used, but that version is most likely too old. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - a default layer with Kubectl 1.20.
20285
20600
  :param kubectl_memory: Amount of memory to allocate to the provider's lambda function. Default: Size.gibibytes(1)
20286
20601
  :param masters_role: An IAM role that will be added to the ``system:masters`` Kubernetes RBAC group. Default: - no masters role.
20287
20602
  :param on_event_layer: An AWS Lambda Layer which includes the NPM dependency ``proxy-agent``. This layer is used by the onEvent handler to route AWS SDK requests through a proxy. By default, the provider will use the layer included in the "aws-lambda-layer-node-proxy-agent" SAR application which is available in all commercial regions. To deploy the layer locally define it in your app as follows:: const layer = new lambda.LayerVersion(this, 'proxy-agent-layer', { code: lambda.Code.fromAsset(`${__dirname}/layer.zip`), compatibleRuntimes: [lambda.Runtime.NODEJS_LATEST], }); Default: - a layer bundled with this module.
20288
20603
  :param output_masters_role_arn: Determines whether a CloudFormation output with the ARN of the "masters" IAM role will be synthesized (if ``mastersRole`` is specified). Default: false
20289
20604
  :param place_cluster_handler_in_vpc: If set to true, the cluster handler functions will be placed in the private subnets of the cluster vpc, subject to the ``vpcSubnets`` selection strategy. Default: false
20290
20605
  :param prune: Indicates whether Kubernetes resources added through ``addManifest()`` can be automatically pruned. When this is enabled (default), prune labels will be allocated and injected to each resource. These labels will then be used when issuing the ``kubectl apply`` operation with the ``--prune`` switch. Default: true
20606
+ :param remote_node_networks: IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster. Default: - none
20607
+ :param remote_pod_networks: IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes. Default: - none
20291
20608
  :param secrets_encryption_key: KMS secret for envelope encryption for Kubernetes secrets. Default: - By default, Kubernetes stores all secret object data within etcd and all etcd volumes used by Amazon EKS are encrypted at the disk-level using AWS-Managed encryption keys.
20292
20609
  :param service_ipv4_cidr: The CIDR block to assign Kubernetes service IP addresses from. Default: - Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks
20293
20610
  :param version: The Kubernetes version to run in the cluster.
@@ -20305,6 +20622,7 @@ class FargateCluster(
20305
20622
  check_type(argname="argument id", value=id, expected_type=type_hints["id"])
20306
20623
  props = FargateClusterProps(
20307
20624
  default_profile=default_profile,
20625
+ kubectl_layer=kubectl_layer,
20308
20626
  alb_controller=alb_controller,
20309
20627
  authentication_mode=authentication_mode,
20310
20628
  awscli_layer=awscli_layer,
@@ -20315,13 +20633,14 @@ class FargateCluster(
20315
20633
  endpoint_access=endpoint_access,
20316
20634
  ip_family=ip_family,
20317
20635
  kubectl_environment=kubectl_environment,
20318
- kubectl_layer=kubectl_layer,
20319
20636
  kubectl_memory=kubectl_memory,
20320
20637
  masters_role=masters_role,
20321
20638
  on_event_layer=on_event_layer,
20322
20639
  output_masters_role_arn=output_masters_role_arn,
20323
20640
  place_cluster_handler_in_vpc=place_cluster_handler_in_vpc,
20324
20641
  prune=prune,
20642
+ remote_node_networks=remote_node_networks,
20643
+ remote_pod_networks=remote_pod_networks,
20325
20644
  secrets_encryption_key=secrets_encryption_key,
20326
20645
  service_ipv4_cidr=service_ipv4_cidr,
20327
20646
  version=version,
@@ -20355,6 +20674,7 @@ class FargateCluster(
20355
20674
  "security_group": "securityGroup",
20356
20675
  "vpc": "vpc",
20357
20676
  "vpc_subnets": "vpcSubnets",
20677
+ "kubectl_layer": "kubectlLayer",
20358
20678
  "alb_controller": "albController",
20359
20679
  "authentication_mode": "authenticationMode",
20360
20680
  "awscli_layer": "awscliLayer",
@@ -20365,13 +20685,14 @@ class FargateCluster(
20365
20685
  "endpoint_access": "endpointAccess",
20366
20686
  "ip_family": "ipFamily",
20367
20687
  "kubectl_environment": "kubectlEnvironment",
20368
- "kubectl_layer": "kubectlLayer",
20369
20688
  "kubectl_memory": "kubectlMemory",
20370
20689
  "masters_role": "mastersRole",
20371
20690
  "on_event_layer": "onEventLayer",
20372
20691
  "output_masters_role_arn": "outputMastersRoleArn",
20373
20692
  "place_cluster_handler_in_vpc": "placeClusterHandlerInVpc",
20374
20693
  "prune": "prune",
20694
+ "remote_node_networks": "remoteNodeNetworks",
20695
+ "remote_pod_networks": "remotePodNetworks",
20375
20696
  "secrets_encryption_key": "secretsEncryptionKey",
20376
20697
  "service_ipv4_cidr": "serviceIpv4Cidr",
20377
20698
  "default_profile": "defaultProfile",
@@ -20389,6 +20710,7 @@ class FargateClusterProps(ClusterOptions):
20389
20710
  security_group: typing.Optional[_ISecurityGroup_acf8a799] = None,
20390
20711
  vpc: typing.Optional[_IVpc_f30d5663] = None,
20391
20712
  vpc_subnets: typing.Optional[typing.Sequence[typing.Union[_SubnetSelection_e57d76df, typing.Dict[builtins.str, typing.Any]]]] = None,
20713
+ kubectl_layer: _ILayerVersion_5ac127c8,
20392
20714
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
20393
20715
  authentication_mode: typing.Optional[AuthenticationMode] = None,
20394
20716
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -20399,13 +20721,14 @@ class FargateClusterProps(ClusterOptions):
20399
20721
  endpoint_access: typing.Optional[EndpointAccess] = None,
20400
20722
  ip_family: typing.Optional[IpFamily] = None,
20401
20723
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
20402
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
20403
20724
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
20404
20725
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
20405
20726
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
20406
20727
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
20407
20728
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
20408
20729
  prune: typing.Optional[builtins.bool] = None,
20730
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
20731
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
20409
20732
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
20410
20733
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
20411
20734
  default_profile: typing.Optional[typing.Union[FargateProfileOptions, typing.Dict[builtins.str, typing.Any]]] = None,
@@ -20420,6 +20743,7 @@ class FargateClusterProps(ClusterOptions):
20420
20743
  :param security_group: Security Group to use for Control Plane ENIs. Default: - A security group is automatically created
20421
20744
  :param vpc: The VPC in which to create the Cluster. Default: - a VPC with default configuration will be created and can be accessed through ``cluster.vpc``.
20422
20745
  :param vpc_subnets: Where to place EKS Control Plane ENIs. For example, to only select private subnets, supply the following: ``vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }]`` Default: - All public and private subnets
20746
+ :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl
20423
20747
  :param alb_controller: Install the AWS Load Balancer Controller onto the cluster. Default: - The controller is not installed.
20424
20748
  :param authentication_mode: The desired authentication mode for the cluster. Default: AuthenticationMode.CONFIG_MAP
20425
20749
  :param awscli_layer: An AWS Lambda layer that contains the ``aws`` CLI. The handler expects the layer to include the following executables:: /opt/awscli/aws Default: - a default layer with the AWS CLI 1.x
@@ -20430,13 +20754,14 @@ class FargateClusterProps(ClusterOptions):
20430
20754
  :param endpoint_access: Configure access to the Kubernetes API server endpoint.. Default: EndpointAccess.PUBLIC_AND_PRIVATE
20431
20755
  :param ip_family: Specify which IP family is used to assign Kubernetes pod and service IP addresses. Default: - IpFamily.IP_V4
20432
20756
  :param kubectl_environment: Environment variables for the kubectl execution. Only relevant for kubectl enabled clusters. Default: - No environment variables.
20433
- :param kubectl_layer: An AWS Lambda Layer which includes ``kubectl`` and Helm. This layer is used by the kubectl handler to apply manifests and install helm charts. You must pick an appropriate releases of one of the ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of Kubernetes you have chosen. If you don't supply this value ``kubectl`` 1.20 will be used, but that version is most likely too old. The handler expects the layer to include the following executables:: /opt/helm/helm /opt/kubectl/kubectl Default: - a default layer with Kubectl 1.20.
20434
20757
  :param kubectl_memory: Amount of memory to allocate to the provider's lambda function. Default: Size.gibibytes(1)
20435
20758
  :param masters_role: An IAM role that will be added to the ``system:masters`` Kubernetes RBAC group. Default: - no masters role.
20436
20759
  :param on_event_layer: An AWS Lambda Layer which includes the NPM dependency ``proxy-agent``. This layer is used by the onEvent handler to route AWS SDK requests through a proxy. By default, the provider will use the layer included in the "aws-lambda-layer-node-proxy-agent" SAR application which is available in all commercial regions. To deploy the layer locally define it in your app as follows:: const layer = new lambda.LayerVersion(this, 'proxy-agent-layer', { code: lambda.Code.fromAsset(`${__dirname}/layer.zip`), compatibleRuntimes: [lambda.Runtime.NODEJS_LATEST], }); Default: - a layer bundled with this module.
20437
20760
  :param output_masters_role_arn: Determines whether a CloudFormation output with the ARN of the "masters" IAM role will be synthesized (if ``mastersRole`` is specified). Default: false
20438
20761
  :param place_cluster_handler_in_vpc: If set to true, the cluster handler functions will be placed in the private subnets of the cluster vpc, subject to the ``vpcSubnets`` selection strategy. Default: false
20439
20762
  :param prune: Indicates whether Kubernetes resources added through ``addManifest()`` can be automatically pruned. When this is enabled (default), prune labels will be allocated and injected to each resource. These labels will then be used when issuing the ``kubectl apply`` operation with the ``--prune`` switch. Default: true
20763
+ :param remote_node_networks: IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster. Default: - none
20764
+ :param remote_pod_networks: IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes. Default: - none
20440
20765
  :param secrets_encryption_key: KMS secret for envelope encryption for Kubernetes secrets. Default: - By default, Kubernetes stores all secret object data within etcd and all etcd volumes used by Amazon EKS are encrypted at the disk-level using AWS-Managed encryption keys.
20441
20766
  :param service_ipv4_cidr: The CIDR block to assign Kubernetes service IP addresses from. Default: - Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks
20442
20767
  :param default_profile: Fargate Profile to create along with the cluster. Default: - A profile called "default" with 'default' and 'kube-system' selectors will be created if this is left undefined.
@@ -20445,8 +20770,12 @@ class FargateClusterProps(ClusterOptions):
20445
20770
 
20446
20771
  Example::
20447
20772
 
20773
+ from aws_cdk.lambda_layer_kubectl_v32 import KubectlV32Layer
20774
+
20775
+
20448
20776
  cluster = eks.FargateCluster(self, "MyCluster",
20449
- version=eks.KubernetesVersion.V1_31
20777
+ version=eks.KubernetesVersion.V1_32,
20778
+ kubectl_layer=KubectlV32Layer(self, "kubectl")
20450
20779
  )
20451
20780
  '''
20452
20781
  if isinstance(alb_controller, dict):
@@ -20463,6 +20792,7 @@ class FargateClusterProps(ClusterOptions):
20463
20792
  check_type(argname="argument security_group", value=security_group, expected_type=type_hints["security_group"])
20464
20793
  check_type(argname="argument vpc", value=vpc, expected_type=type_hints["vpc"])
20465
20794
  check_type(argname="argument vpc_subnets", value=vpc_subnets, expected_type=type_hints["vpc_subnets"])
20795
+ check_type(argname="argument kubectl_layer", value=kubectl_layer, expected_type=type_hints["kubectl_layer"])
20466
20796
  check_type(argname="argument alb_controller", value=alb_controller, expected_type=type_hints["alb_controller"])
20467
20797
  check_type(argname="argument authentication_mode", value=authentication_mode, expected_type=type_hints["authentication_mode"])
20468
20798
  check_type(argname="argument awscli_layer", value=awscli_layer, expected_type=type_hints["awscli_layer"])
@@ -20473,18 +20803,20 @@ class FargateClusterProps(ClusterOptions):
20473
20803
  check_type(argname="argument endpoint_access", value=endpoint_access, expected_type=type_hints["endpoint_access"])
20474
20804
  check_type(argname="argument ip_family", value=ip_family, expected_type=type_hints["ip_family"])
20475
20805
  check_type(argname="argument kubectl_environment", value=kubectl_environment, expected_type=type_hints["kubectl_environment"])
20476
- check_type(argname="argument kubectl_layer", value=kubectl_layer, expected_type=type_hints["kubectl_layer"])
20477
20806
  check_type(argname="argument kubectl_memory", value=kubectl_memory, expected_type=type_hints["kubectl_memory"])
20478
20807
  check_type(argname="argument masters_role", value=masters_role, expected_type=type_hints["masters_role"])
20479
20808
  check_type(argname="argument on_event_layer", value=on_event_layer, expected_type=type_hints["on_event_layer"])
20480
20809
  check_type(argname="argument output_masters_role_arn", value=output_masters_role_arn, expected_type=type_hints["output_masters_role_arn"])
20481
20810
  check_type(argname="argument place_cluster_handler_in_vpc", value=place_cluster_handler_in_vpc, expected_type=type_hints["place_cluster_handler_in_vpc"])
20482
20811
  check_type(argname="argument prune", value=prune, expected_type=type_hints["prune"])
20812
+ check_type(argname="argument remote_node_networks", value=remote_node_networks, expected_type=type_hints["remote_node_networks"])
20813
+ check_type(argname="argument remote_pod_networks", value=remote_pod_networks, expected_type=type_hints["remote_pod_networks"])
20483
20814
  check_type(argname="argument secrets_encryption_key", value=secrets_encryption_key, expected_type=type_hints["secrets_encryption_key"])
20484
20815
  check_type(argname="argument service_ipv4_cidr", value=service_ipv4_cidr, expected_type=type_hints["service_ipv4_cidr"])
20485
20816
  check_type(argname="argument default_profile", value=default_profile, expected_type=type_hints["default_profile"])
20486
20817
  self._values: typing.Dict[builtins.str, typing.Any] = {
20487
20818
  "version": version,
20819
+ "kubectl_layer": kubectl_layer,
20488
20820
  }
20489
20821
  if cluster_name is not None:
20490
20822
  self._values["cluster_name"] = cluster_name
@@ -20520,8 +20852,6 @@ class FargateClusterProps(ClusterOptions):
20520
20852
  self._values["ip_family"] = ip_family
20521
20853
  if kubectl_environment is not None:
20522
20854
  self._values["kubectl_environment"] = kubectl_environment
20523
- if kubectl_layer is not None:
20524
- self._values["kubectl_layer"] = kubectl_layer
20525
20855
  if kubectl_memory is not None:
20526
20856
  self._values["kubectl_memory"] = kubectl_memory
20527
20857
  if masters_role is not None:
@@ -20534,6 +20864,10 @@ class FargateClusterProps(ClusterOptions):
20534
20864
  self._values["place_cluster_handler_in_vpc"] = place_cluster_handler_in_vpc
20535
20865
  if prune is not None:
20536
20866
  self._values["prune"] = prune
20867
+ if remote_node_networks is not None:
20868
+ self._values["remote_node_networks"] = remote_node_networks
20869
+ if remote_pod_networks is not None:
20870
+ self._values["remote_pod_networks"] = remote_pod_networks
20537
20871
  if secrets_encryption_key is not None:
20538
20872
  self._values["secrets_encryption_key"] = secrets_encryption_key
20539
20873
  if service_ipv4_cidr is not None:
@@ -20618,6 +20952,24 @@ class FargateClusterProps(ClusterOptions):
20618
20952
  result = self._values.get("vpc_subnets")
20619
20953
  return typing.cast(typing.Optional[typing.List[_SubnetSelection_e57d76df]], result)
20620
20954
 
20955
+ @builtins.property
20956
+ def kubectl_layer(self) -> _ILayerVersion_5ac127c8:
20957
+ '''An AWS Lambda Layer which includes ``kubectl`` and Helm.
20958
+
20959
+ This layer is used by the kubectl handler to apply manifests and install
20960
+ helm charts. You must pick an appropriate releases of one of the
20961
+ ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of
20962
+ Kubernetes you have chosen.
20963
+
20964
+ The handler expects the layer to include the following executables::
20965
+
20966
+ /opt/helm/helm
20967
+ /opt/kubectl/kubectl
20968
+ '''
20969
+ result = self._values.get("kubectl_layer")
20970
+ assert result is not None, "Required property 'kubectl_layer' is missing"
20971
+ return typing.cast(_ILayerVersion_5ac127c8, result)
20972
+
20621
20973
  @builtins.property
20622
20974
  def alb_controller(self) -> typing.Optional[AlbControllerOptions]:
20623
20975
  '''Install the AWS Load Balancer Controller onto the cluster.
@@ -20730,26 +21082,6 @@ class FargateClusterProps(ClusterOptions):
20730
21082
  result = self._values.get("kubectl_environment")
20731
21083
  return typing.cast(typing.Optional[typing.Mapping[builtins.str, builtins.str]], result)
20732
21084
 
20733
- @builtins.property
20734
- def kubectl_layer(self) -> typing.Optional[_ILayerVersion_5ac127c8]:
20735
- '''An AWS Lambda Layer which includes ``kubectl`` and Helm.
20736
-
20737
- This layer is used by the kubectl handler to apply manifests and install
20738
- helm charts. You must pick an appropriate releases of one of the
20739
- ``@aws-cdk/layer-kubectl-vXX`` packages, that works with the version of
20740
- Kubernetes you have chosen. If you don't supply this value ``kubectl``
20741
- 1.20 will be used, but that version is most likely too old.
20742
-
20743
- The handler expects the layer to include the following executables::
20744
-
20745
- /opt/helm/helm
20746
- /opt/kubectl/kubectl
20747
-
20748
- :default: - a default layer with Kubectl 1.20.
20749
- '''
20750
- result = self._values.get("kubectl_layer")
20751
- return typing.cast(typing.Optional[_ILayerVersion_5ac127c8], result)
20752
-
20753
21085
  @builtins.property
20754
21086
  def kubectl_memory(self) -> typing.Optional[_Size_7b441c34]:
20755
21087
  '''Amount of memory to allocate to the provider's lambda function.
@@ -20824,6 +21156,24 @@ class FargateClusterProps(ClusterOptions):
20824
21156
  result = self._values.get("prune")
20825
21157
  return typing.cast(typing.Optional[builtins.bool], result)
20826
21158
 
21159
+ @builtins.property
21160
+ def remote_node_networks(self) -> typing.Optional[typing.List[RemoteNodeNetwork]]:
21161
+ '''IPv4 CIDR blocks defining the expected address range of hybrid nodes that will join the cluster.
21162
+
21163
+ :default: - none
21164
+ '''
21165
+ result = self._values.get("remote_node_networks")
21166
+ return typing.cast(typing.Optional[typing.List[RemoteNodeNetwork]], result)
21167
+
21168
+ @builtins.property
21169
+ def remote_pod_networks(self) -> typing.Optional[typing.List[RemotePodNetwork]]:
21170
+ '''IPv4 CIDR blocks for Pods running Kubernetes webhooks on hybrid nodes.
21171
+
21172
+ :default: - none
21173
+ '''
21174
+ result = self._values.get("remote_pod_networks")
21175
+ return typing.cast(typing.Optional[typing.List[RemotePodNetwork]], result)
21176
+
20827
21177
  @builtins.property
20828
21178
  def secrets_encryption_key(self) -> typing.Optional[_IKey_5f11635f]:
20829
21179
  '''KMS secret for envelope encryption for Kubernetes secrets.
@@ -21038,6 +21388,8 @@ __all__ = [
21038
21388
  "OpenIdConnectProvider",
21039
21389
  "OpenIdConnectProviderProps",
21040
21390
  "PatchType",
21391
+ "RemoteNodeNetwork",
21392
+ "RemotePodNetwork",
21041
21393
  "Selector",
21042
21394
  "ServiceAccount",
21043
21395
  "ServiceAccountOptions",
@@ -22743,6 +23095,20 @@ def _typecheckingstub__c02764139ca6306efb78e2db6695149f8ddc6b3e8adb63a11131864ce
22743
23095
  """Type checking stubs"""
22744
23096
  pass
22745
23097
 
23098
+ def _typecheckingstub__600789f5d1adc105e950fc1e01201ea975b89bb797b63227b757a633425a0f09(
23099
+ *,
23100
+ cidrs: typing.Sequence[builtins.str],
23101
+ ) -> None:
23102
+ """Type checking stubs"""
23103
+ pass
23104
+
23105
+ def _typecheckingstub__f9878a6e6680b6c2c6cb0db908c65c1de65fe68965909386c87176ba98e30705(
23106
+ *,
23107
+ cidrs: typing.Sequence[builtins.str],
23108
+ ) -> None:
23109
+ """Type checking stubs"""
23110
+ pass
23111
+
22746
23112
  def _typecheckingstub__594b3f5a610588bf33bb1a98e98b19b5ddfb0609f59e93022c2cec8d2a17f411(
22747
23113
  *,
22748
23114
  namespace: builtins.str,
@@ -22889,6 +23255,7 @@ def _typecheckingstub__786576ad54eacdb9ab8e92277c0fd07f813bc56d4243937f3b5a85c0c
22889
23255
  default_capacity_type: typing.Optional[DefaultCapacityType] = None,
22890
23256
  kubectl_lambda_role: typing.Optional[_IRole_235f5d8e] = None,
22891
23257
  tags: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
23258
+ kubectl_layer: _ILayerVersion_5ac127c8,
22892
23259
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
22893
23260
  authentication_mode: typing.Optional[AuthenticationMode] = None,
22894
23261
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -22899,13 +23266,14 @@ def _typecheckingstub__786576ad54eacdb9ab8e92277c0fd07f813bc56d4243937f3b5a85c0c
22899
23266
  endpoint_access: typing.Optional[EndpointAccess] = None,
22900
23267
  ip_family: typing.Optional[IpFamily] = None,
22901
23268
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
22902
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
22903
23269
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
22904
23270
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
22905
23271
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
22906
23272
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
22907
23273
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
22908
23274
  prune: typing.Optional[builtins.bool] = None,
23275
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23276
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
22909
23277
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
22910
23278
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
22911
23279
  version: KubernetesVersion,
@@ -23125,6 +23493,7 @@ def _typecheckingstub__0b45b97fda36b43e872f90f9fe4cde65de855b50b3acfd236c1f400ef
23125
23493
  security_group: typing.Optional[_ISecurityGroup_acf8a799] = None,
23126
23494
  vpc: typing.Optional[_IVpc_f30d5663] = None,
23127
23495
  vpc_subnets: typing.Optional[typing.Sequence[typing.Union[_SubnetSelection_e57d76df, typing.Dict[builtins.str, typing.Any]]]] = None,
23496
+ kubectl_layer: _ILayerVersion_5ac127c8,
23128
23497
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
23129
23498
  authentication_mode: typing.Optional[AuthenticationMode] = None,
23130
23499
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -23135,13 +23504,14 @@ def _typecheckingstub__0b45b97fda36b43e872f90f9fe4cde65de855b50b3acfd236c1f400ef
23135
23504
  endpoint_access: typing.Optional[EndpointAccess] = None,
23136
23505
  ip_family: typing.Optional[IpFamily] = None,
23137
23506
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
23138
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23139
23507
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
23140
23508
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
23141
23509
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23142
23510
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
23143
23511
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
23144
23512
  prune: typing.Optional[builtins.bool] = None,
23513
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23514
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23145
23515
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
23146
23516
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
23147
23517
  ) -> None:
@@ -23158,6 +23528,7 @@ def _typecheckingstub__ce7a73a63de29ba5e5b5cd5cabde7aca1c4bc7d119de52fc4c0f11d99
23158
23528
  security_group: typing.Optional[_ISecurityGroup_acf8a799] = None,
23159
23529
  vpc: typing.Optional[_IVpc_f30d5663] = None,
23160
23530
  vpc_subnets: typing.Optional[typing.Sequence[typing.Union[_SubnetSelection_e57d76df, typing.Dict[builtins.str, typing.Any]]]] = None,
23531
+ kubectl_layer: _ILayerVersion_5ac127c8,
23161
23532
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
23162
23533
  authentication_mode: typing.Optional[AuthenticationMode] = None,
23163
23534
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -23168,13 +23539,14 @@ def _typecheckingstub__ce7a73a63de29ba5e5b5cd5cabde7aca1c4bc7d119de52fc4c0f11d99
23168
23539
  endpoint_access: typing.Optional[EndpointAccess] = None,
23169
23540
  ip_family: typing.Optional[IpFamily] = None,
23170
23541
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
23171
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23172
23542
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
23173
23543
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
23174
23544
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23175
23545
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
23176
23546
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
23177
23547
  prune: typing.Optional[builtins.bool] = None,
23548
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23549
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23178
23550
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
23179
23551
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
23180
23552
  bootstrap_cluster_creator_admin_permissions: typing.Optional[builtins.bool] = None,
@@ -23192,6 +23564,7 @@ def _typecheckingstub__ae166d791f5d5176f3386726c22bc44afedf5d336437a3513e3740387
23192
23564
  id: builtins.str,
23193
23565
  *,
23194
23566
  default_profile: typing.Optional[typing.Union[FargateProfileOptions, typing.Dict[builtins.str, typing.Any]]] = None,
23567
+ kubectl_layer: _ILayerVersion_5ac127c8,
23195
23568
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
23196
23569
  authentication_mode: typing.Optional[AuthenticationMode] = None,
23197
23570
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -23202,13 +23575,14 @@ def _typecheckingstub__ae166d791f5d5176f3386726c22bc44afedf5d336437a3513e3740387
23202
23575
  endpoint_access: typing.Optional[EndpointAccess] = None,
23203
23576
  ip_family: typing.Optional[IpFamily] = None,
23204
23577
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
23205
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23206
23578
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
23207
23579
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
23208
23580
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23209
23581
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
23210
23582
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
23211
23583
  prune: typing.Optional[builtins.bool] = None,
23584
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23585
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23212
23586
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
23213
23587
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
23214
23588
  version: KubernetesVersion,
@@ -23233,6 +23607,7 @@ def _typecheckingstub__f11c7f989209f6213cb855d2846bb0b2b79a6a2b85eb0d65939e981df
23233
23607
  security_group: typing.Optional[_ISecurityGroup_acf8a799] = None,
23234
23608
  vpc: typing.Optional[_IVpc_f30d5663] = None,
23235
23609
  vpc_subnets: typing.Optional[typing.Sequence[typing.Union[_SubnetSelection_e57d76df, typing.Dict[builtins.str, typing.Any]]]] = None,
23610
+ kubectl_layer: _ILayerVersion_5ac127c8,
23236
23611
  alb_controller: typing.Optional[typing.Union[AlbControllerOptions, typing.Dict[builtins.str, typing.Any]]] = None,
23237
23612
  authentication_mode: typing.Optional[AuthenticationMode] = None,
23238
23613
  awscli_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
@@ -23243,13 +23618,14 @@ def _typecheckingstub__f11c7f989209f6213cb855d2846bb0b2b79a6a2b85eb0d65939e981df
23243
23618
  endpoint_access: typing.Optional[EndpointAccess] = None,
23244
23619
  ip_family: typing.Optional[IpFamily] = None,
23245
23620
  kubectl_environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
23246
- kubectl_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23247
23621
  kubectl_memory: typing.Optional[_Size_7b441c34] = None,
23248
23622
  masters_role: typing.Optional[_IRole_235f5d8e] = None,
23249
23623
  on_event_layer: typing.Optional[_ILayerVersion_5ac127c8] = None,
23250
23624
  output_masters_role_arn: typing.Optional[builtins.bool] = None,
23251
23625
  place_cluster_handler_in_vpc: typing.Optional[builtins.bool] = None,
23252
23626
  prune: typing.Optional[builtins.bool] = None,
23627
+ remote_node_networks: typing.Optional[typing.Sequence[typing.Union[RemoteNodeNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23628
+ remote_pod_networks: typing.Optional[typing.Sequence[typing.Union[RemotePodNetwork, typing.Dict[builtins.str, typing.Any]]]] = None,
23253
23629
  secrets_encryption_key: typing.Optional[_IKey_5f11635f] = None,
23254
23630
  service_ipv4_cidr: typing.Optional[builtins.str] = None,
23255
23631
  default_profile: typing.Optional[typing.Union[FargateProfileOptions, typing.Dict[builtins.str, typing.Any]]] = None,