konokenj.cdk-api-mcp-server 0.53.0__py3-none-any.whl → 0.55.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. cdk_api_mcp_server/__about__.py +1 -1
  2. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-eks-v2-alpha/README.md +45 -45
  3. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-imagebuilder-alpha/README.md +298 -0
  4. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-sagemaker-alpha/README.md +32 -0
  5. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/mixins-preview/README.md +167 -5
  6. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/README.md/README.md +2 -0
  7. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigateway/README.md +25 -0
  8. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigateway/integ.lambda-permission-consolidation.ts +55 -0
  9. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-integrations/README.md +35 -0
  10. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-integrations/integ.lambda-permission-consolidation.ts +45 -0
  11. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-cognito/README.md +2 -2
  12. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/README.md +26 -0
  13. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.dynamodb.add-to-resource-policy.ts +17 -0
  14. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ecs/integ.placement-strategies.ts +32 -8
  15. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/README.md +86 -86
  16. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.eks-al2023-nodegroup.ts +1 -1
  17. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.fargate-cluster.ts +1 -1
  18. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/integ.runtime.inlinecode.ts +7 -0
  19. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-opensearchservice/integ.opensearch.ebs.ts +1 -1
  20. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-rds/README.md +1 -1
  21. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-rds/integ.cluster-cloudwatch-logs-exports.ts +56 -0
  22. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-route53/README.md +32 -31
  23. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-route53/integ.zone-delegation-iam-stack.ts +66 -0
  24. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-s3-deployment/integ.bucket-deployment-big-response.ts +4 -0
  25. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-s3-deployment/integ.bucket-deployment-data.ts +15 -0
  26. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-s3-deployment/integ.bucket-deployment-large-file.ts +3 -0
  27. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-secretsmanager/integ.secret.dynamic-reference-key.ts +38 -0
  28. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/README.md +14 -3
  29. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-arm64.ts +27 -0
  30. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-default.ts +25 -0
  31. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-mixed-arch.ts +35 -0
  32. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-x86.ts +27 -0
  33. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/custom-resources/README.md +56 -0
  34. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/custom-resources/integ.external-id.ts +80 -0
  35. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/interfaces/README.md +33 -0
  36. {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.55.0.dist-info}/METADATA +2 -2
  37. {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.55.0.dist-info}/RECORD +40 -28
  38. {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.55.0.dist-info}/WHEEL +0 -0
  39. {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.55.0.dist-info}/entry_points.txt +0 -0
  40. {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.55.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -69,12 +69,12 @@ This example defines an Amazon EKS cluster with the following configuration:
69
69
  * A Kubernetes pod with a container based on the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) image.
70
70
 
71
71
  ```ts
72
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
72
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
73
73
 
74
74
  // provisioning a cluster
75
75
  const cluster = new eks.Cluster(this, 'hello-eks', {
76
- version: eks.KubernetesVersion.V1_33,
77
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
76
+ version: eks.KubernetesVersion.V1_34,
77
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
78
78
  });
79
79
 
80
80
  // apply a kubernetes manifest to the cluster
@@ -139,11 +139,11 @@ A more detailed breakdown of each is provided further down this README.
139
139
  Creating a new cluster is done using the `Cluster` or `FargateCluster` constructs. The only required properties are the kubernetes `version` and `kubectlLayer`.
140
140
 
141
141
  ```ts
142
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
142
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
143
143
 
144
144
  new eks.Cluster(this, 'HelloEKS', {
145
- version: eks.KubernetesVersion.V1_33,
146
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
145
+ version: eks.KubernetesVersion.V1_34,
146
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
147
147
  });
148
148
  ```
149
149
 
@@ -157,12 +157,12 @@ This can happen in one of three situations:
157
157
  This affects the EKS cluster itself, the custom resource that created the cluster, associated IAM roles, node groups, security groups, VPC and any other CloudFormation resources managed by this construct.
158
158
 
159
159
  ```ts
160
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
160
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
161
161
  import * as core from 'aws-cdk-lib/core';
162
162
 
163
163
  new eks.Cluster(this, 'HelloEKS', {
164
- version: eks.KubernetesVersion.V1_33,
165
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
164
+ version: eks.KubernetesVersion.V1_34,
165
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
166
166
  removalPolicy: core.RemovalPolicy.RETAIN, // Keep all resources created by the construct.
167
167
  });
168
168
  ```
@@ -170,11 +170,11 @@ new eks.Cluster(this, 'HelloEKS', {
170
170
  You can also use `FargateCluster` to provision a cluster that uses only fargate workers.
171
171
 
172
172
  ```ts
173
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
173
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
174
174
 
175
175
  new eks.FargateCluster(this, 'HelloEKS', {
176
- version: eks.KubernetesVersion.V1_33,
177
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
176
+ version: eks.KubernetesVersion.V1_34,
177
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
178
178
  });
179
179
  ```
180
180
 
@@ -197,13 +197,13 @@ By default, this library will allocate a managed node group with 2 *m5.large* in
197
197
  At cluster instantiation time, you can customize the number of instances and their type:
198
198
 
199
199
  ```ts
200
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
200
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
201
201
 
202
202
  new eks.Cluster(this, 'HelloEKS', {
203
- version: eks.KubernetesVersion.V1_33,
203
+ version: eks.KubernetesVersion.V1_34,
204
204
  defaultCapacity: 5,
205
205
  defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL),
206
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
206
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
207
207
  });
208
208
  ```
209
209
 
@@ -212,12 +212,12 @@ To access the node group that was created on your behalf, you can use `cluster.d
212
212
  Additional customizations are available post instantiation. To apply them, set the default capacity to 0, and use the `cluster.addNodegroupCapacity` method:
213
213
 
214
214
  ```ts
215
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
215
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
216
216
 
217
217
  const cluster = new eks.Cluster(this, 'HelloEKS', {
218
- version: eks.KubernetesVersion.V1_33,
218
+ version: eks.KubernetesVersion.V1_34,
219
219
  defaultCapacity: 0,
220
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
220
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
221
221
  });
222
222
 
223
223
  cluster.addNodegroupCapacity('custom-node-group', {
@@ -293,7 +293,7 @@ Node groups are available with IPv6 configured networks. For custom roles assig
293
293
  > For more details visit [Configuring the Amazon VPC CNI plugin for Kubernetes to use IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role)
294
294
 
295
295
  ```ts
296
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
296
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
297
297
 
298
298
  const ipv6Management = new iam.PolicyDocument({
299
299
  statements: [new iam.PolicyStatement({
@@ -319,9 +319,9 @@ const eksClusterNodeGroupRole = new iam.Role(this, 'eksClusterNodeGroupRole', {
319
319
  });
320
320
 
321
321
  const cluster = new eks.Cluster(this, 'HelloEKS', {
322
- version: eks.KubernetesVersion.V1_33,
322
+ version: eks.KubernetesVersion.V1_34,
323
323
  defaultCapacity: 0,
324
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
324
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
325
325
  });
326
326
 
327
327
  cluster.addNodegroupCapacity('custom-node-group', {
@@ -433,12 +433,12 @@ has been changed. As a workaround, you need to add a temporary policy to the clu
433
433
  successful replacement. Consider this example if you are renaming the cluster from `foo` to `bar`:
434
434
 
435
435
  ```ts
436
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
436
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
437
437
 
438
438
  const cluster = new eks.Cluster(this, 'cluster-to-rename', {
439
439
  clusterName: 'foo', // rename this to 'bar'
440
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
441
- version: eks.KubernetesVersion.V1_33,
440
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
441
+ version: eks.KubernetesVersion.V1_34,
442
442
  });
443
443
 
444
444
  // allow the cluster admin role to delete the cluster 'foo'
@@ -491,11 +491,11 @@ To create an EKS cluster that **only** uses Fargate capacity, you can use `Farga
491
491
  The following code defines an Amazon EKS cluster with a default Fargate Profile that matches all pods from the "kube-system" and "default" namespaces. It is also configured to [run CoreDNS on Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns).
492
492
 
493
493
  ```ts
494
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
494
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
495
495
 
496
496
  const cluster = new eks.FargateCluster(this, 'MyCluster', {
497
- version: eks.KubernetesVersion.V1_33,
498
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
497
+ version: eks.KubernetesVersion.V1_34,
498
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
499
499
  });
500
500
  ```
501
501
 
@@ -571,12 +571,12 @@ To disable bootstrapping altogether (i.e. to fully customize user-data), set `bo
571
571
  You can also configure the cluster to use an auto-scaling group as the default capacity:
572
572
 
573
573
  ```ts
574
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
574
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
575
575
 
576
576
  const cluster = new eks.Cluster(this, 'HelloEKS', {
577
- version: eks.KubernetesVersion.V1_33,
577
+ version: eks.KubernetesVersion.V1_34,
578
578
  defaultCapacityType: eks.DefaultCapacityType.EC2,
579
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
579
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
580
580
  });
581
581
  ```
582
582
 
@@ -678,12 +678,12 @@ AWS Identity and Access Management (IAM) and native Kubernetes [Role Based Acces
678
678
  You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) by using the `endpointAccess` property:
679
679
 
680
680
  ```ts
681
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
681
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
682
682
 
683
683
  const cluster = new eks.Cluster(this, 'hello-eks', {
684
- version: eks.KubernetesVersion.V1_33,
684
+ version: eks.KubernetesVersion.V1_34,
685
685
  endpointAccess: eks.EndpointAccess.PRIVATE, // No access outside of your VPC.
686
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
686
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
687
687
  });
688
688
  ```
689
689
 
@@ -703,31 +703,31 @@ From the docs:
703
703
  To deploy the controller on your EKS cluster, configure the `albController` property:
704
704
 
705
705
  ```ts
706
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
706
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
707
707
 
708
708
  new eks.Cluster(this, 'HelloEKS', {
709
- version: eks.KubernetesVersion.V1_33,
709
+ version: eks.KubernetesVersion.V1_34,
710
710
  albController: {
711
711
  version: eks.AlbControllerVersion.V2_8_2,
712
712
  },
713
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
713
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
714
714
  });
715
715
  ```
716
716
 
717
717
  To provide additional Helm chart values supported by `albController` in CDK, use the `additionalHelmChartValues` property. For example, the following code snippet shows how to set the `enableWafV2` flag:
718
718
 
719
719
  ```ts
720
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
720
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
721
721
 
722
722
  new eks.Cluster(this, 'HelloEKS', {
723
- version: eks.KubernetesVersion.V1_33,
723
+ version: eks.KubernetesVersion.V1_34,
724
724
  albController: {
725
725
  version: eks.AlbControllerVersion.V2_8_2,
726
726
  additionalHelmChartValues: {
727
727
  enableWafv2: false
728
728
  }
729
729
  },
730
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
730
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
731
731
  });
732
732
  ```
733
733
 
@@ -764,15 +764,15 @@ if (cluster.albController) {
764
764
  You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properties:
765
765
 
766
766
  ```ts
767
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
767
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
768
768
 
769
769
  declare const vpc: ec2.Vpc;
770
770
 
771
771
  new eks.Cluster(this, 'HelloEKS', {
772
- version: eks.KubernetesVersion.V1_33,
772
+ version: eks.KubernetesVersion.V1_34,
773
773
  vpc,
774
774
  vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }],
775
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
775
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
776
776
  });
777
777
  ```
778
778
 
@@ -815,11 +815,11 @@ The `ClusterHandler` is a set of Lambda functions (`onEventHandler`, `isComplete
815
815
  You can configure the environment of the Cluster Handler functions by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
816
816
 
817
817
  ```ts
818
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
818
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
819
819
 
820
820
  declare const proxyInstanceSecurityGroup: ec2.SecurityGroup;
821
821
  const cluster = new eks.Cluster(this, 'hello-eks', {
822
- version: eks.KubernetesVersion.V1_33,
822
+ version: eks.KubernetesVersion.V1_34,
823
823
  clusterHandlerEnvironment: {
824
824
  https_proxy: 'http://proxy.myproxy.com',
825
825
  },
@@ -828,7 +828,7 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
828
828
  * Cluster Handler Lambdas so that it can reach the proxy.
829
829
  */
830
830
  clusterHandlerSecurityGroup: proxyInstanceSecurityGroup,
831
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
831
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
832
832
  });
833
833
  ```
834
834
 
@@ -837,7 +837,7 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
837
837
  You can optionally choose to configure your cluster to use IPv6 using the [`ipFamily`](https://docs.aws.amazon.com/eks/latest/APIReference/API_KubernetesNetworkConfigRequest.html#AmazonEKS-Type-KubernetesNetworkConfigRequest-ipFamily) definition for your cluster. Note that this will require the underlying subnets to have an associated IPv6 CIDR.
838
838
 
839
839
  ```ts
840
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
840
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
841
841
  declare const vpc: ec2.Vpc;
842
842
 
843
843
  function associateSubnetWithV6Cidr(vpc: ec2.Vpc, count: number, subnet: ec2.ISubnet) {
@@ -863,11 +863,11 @@ for (let subnet of subnets) {
863
863
  }
864
864
 
865
865
  const cluster = new eks.Cluster(this, 'hello-eks', {
866
- version: eks.KubernetesVersion.V1_33,
866
+ version: eks.KubernetesVersion.V1_34,
867
867
  vpc: vpc,
868
868
  ipFamily: eks.IpFamily.IP_V6,
869
869
  vpcSubnets: [{ subnets: vpc.publicSubnets }],
870
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
870
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
871
871
  });
872
872
  ```
873
873
 
@@ -898,14 +898,14 @@ const cluster = eks.Cluster.fromClusterAttributes(this, 'Cluster', {
898
898
  You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
899
899
 
900
900
  ```ts
901
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
901
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
902
902
 
903
903
  const cluster = new eks.Cluster(this, 'hello-eks', {
904
- version: eks.KubernetesVersion.V1_33,
904
+ version: eks.KubernetesVersion.V1_34,
905
905
  kubectlEnvironment: {
906
906
  'http_proxy': 'http://proxy.myproxy.com',
907
907
  },
908
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
908
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
909
909
  });
910
910
  ```
911
911
 
@@ -922,11 +922,11 @@ Depending on which version of kubernetes you're targeting, you will need to use
922
922
  the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
923
923
 
924
924
  ```ts
925
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
925
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
926
926
 
927
927
  const cluster = new eks.Cluster(this, 'hello-eks', {
928
- version: eks.KubernetesVersion.V1_33,
929
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
928
+ version: eks.KubernetesVersion.V1_34,
929
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
930
930
  });
931
931
  ```
932
932
 
@@ -961,7 +961,7 @@ const cluster1 = new eks.Cluster(this, 'MyCluster', {
961
961
  kubectlLayer: layer,
962
962
  vpc,
963
963
  clusterName: 'cluster-name',
964
- version: eks.KubernetesVersion.V1_33,
964
+ version: eks.KubernetesVersion.V1_34,
965
965
  });
966
966
 
967
967
  // or
@@ -977,12 +977,12 @@ const cluster2 = eks.Cluster.fromClusterAttributes(this, 'MyCluster', {
977
977
  By default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function:
978
978
 
979
979
  ```ts
980
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
980
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
981
981
 
982
982
  new eks.Cluster(this, 'MyCluster', {
983
983
  kubectlMemory: Size.gibibytes(4),
984
- version: eks.KubernetesVersion.V1_33,
985
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
984
+ version: eks.KubernetesVersion.V1_34,
985
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
986
986
  });
987
987
 
988
988
  // or
@@ -1019,13 +1019,13 @@ cluster.addAutoScalingGroupCapacity('self-ng-arm', {
1019
1019
  When you create a cluster, you can specify a `mastersRole`. The `Cluster` construct will associate this role with the `system:masters` [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) group, giving it super-user access to the cluster.
1020
1020
 
1021
1021
  ```ts
1022
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1022
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1023
1023
 
1024
1024
  declare const role: iam.Role;
1025
1025
  new eks.Cluster(this, 'HelloEKS', {
1026
- version: eks.KubernetesVersion.V1_33,
1026
+ version: eks.KubernetesVersion.V1_34,
1027
1027
  mastersRole: role,
1028
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
1028
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
1029
1029
  });
1030
1030
  ```
1031
1031
 
@@ -1071,26 +1071,26 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
1071
1071
  > This setting can only be specified when the cluster is created and cannot be updated.
1072
1072
 
1073
1073
  ```ts
1074
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1074
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1075
1075
 
1076
1076
  const secretsKey = new kms.Key(this, 'SecretsKey');
1077
1077
  const cluster = new eks.Cluster(this, 'MyCluster', {
1078
1078
  secretsEncryptionKey: secretsKey,
1079
- version: eks.KubernetesVersion.V1_33,
1080
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
1079
+ version: eks.KubernetesVersion.V1_34,
1080
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
1081
1081
  });
1082
1082
  ```
1083
1083
 
1084
1084
  You can also use a similar configuration for running a cluster built using the FargateCluster construct.
1085
1085
 
1086
1086
  ```ts
1087
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1087
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1088
1088
 
1089
1089
  const secretsKey = new kms.Key(this, 'SecretsKey');
1090
1090
  const cluster = new eks.FargateCluster(this, 'MyFargateCluster', {
1091
1091
  secretsEncryptionKey: secretsKey,
1092
- version: eks.KubernetesVersion.V1_33,
1093
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
1092
+ version: eks.KubernetesVersion.V1_34,
1093
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
1094
1094
  });
1095
1095
  ```
1096
1096
 
@@ -1108,11 +1108,11 @@ When you create an Amazon EKS cluster, you can configure it to leverage the [EKS
1108
1108
  Once you have identified the on-premises node and pod (optional) CIDRs you will use for your hybrid nodes and the workloads running on them, you can specify them during cluster creation using the `remoteNodeNetworks` and `remotePodNetworks` (optional) properties:
1109
1109
 
1110
1110
  ```ts
1111
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1111
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1112
1112
 
1113
1113
  new eks.Cluster(this, 'Cluster', {
1114
- version: eks.KubernetesVersion.V1_33,
1115
- kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
1114
+ version: eks.KubernetesVersion.V1_34,
1115
+ kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
1116
1116
  remoteNodeNetworks: [
1117
1117
  {
1118
1118
  cidrs: ['10.0.0.0/16'],
@@ -1165,7 +1165,7 @@ To access the Kubernetes resources from the console, make sure your viewing prin
1165
1165
  in the `aws-auth` ConfigMap. Some options to consider:
1166
1166
 
1167
1167
  ```ts
1168
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1168
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1169
1169
  declare const cluster: eks.Cluster;
1170
1170
  declare const your_current_role: iam.Role;
1171
1171
  declare const vpc: ec2.Vpc;
@@ -1185,7 +1185,7 @@ your_current_role.addToPolicy(new iam.PolicyStatement({
1185
1185
 
1186
1186
  ```ts
1187
1187
  // Option 2: create your custom mastersRole with scoped assumeBy arn as the Cluster prop. Switch to this role from the AWS console.
1188
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1188
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1189
1189
  declare const vpc: ec2.Vpc;
1190
1190
 
1191
1191
 
@@ -1195,8 +1195,8 @@ const mastersRole = new iam.Role(this, 'MastersRole', {
1195
1195
 
1196
1196
  const cluster = new eks.Cluster(this, 'EksCluster', {
1197
1197
  vpc,
1198
- version: eks.KubernetesVersion.V1_33,
1199
- kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
1198
+ version: eks.KubernetesVersion.V1_34,
1199
+ kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
1200
1200
  mastersRole,
1201
1201
  });
1202
1202
 
@@ -1240,13 +1240,13 @@ AWS IAM principals from both Amazon EKS access entry APIs and the aws-auth confi
1240
1240
  To specify the `authenticationMode`:
1241
1241
 
1242
1242
  ```ts
1243
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1243
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1244
1244
  declare const vpc: ec2.Vpc;
1245
1245
 
1246
1246
  new eks.Cluster(this, 'Cluster', {
1247
1247
  vpc,
1248
- version: eks.KubernetesVersion.V1_33,
1249
- kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
1248
+ version: eks.KubernetesVersion.V1_34,
1249
+ kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
1250
1250
  authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP,
1251
1251
  });
1252
1252
  ```
@@ -1291,7 +1291,7 @@ eks.AccessPolicy.fromAccessPolicyName('AmazonEKSAdminPolicy', {
1291
1291
  Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
1292
1292
 
1293
1293
  ```ts
1294
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1294
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1295
1295
  declare const vpc: ec2.Vpc;
1296
1296
 
1297
1297
  const clusterAdminRole = new iam.Role(this, 'ClusterAdminRole', {
@@ -1309,8 +1309,8 @@ const eksAdminViewRole = new iam.Role(this, 'EKSAdminViewRole', {
1309
1309
  const cluster = new eks.Cluster(this, 'Cluster', {
1310
1310
  vpc,
1311
1311
  mastersRole: clusterAdminRole,
1312
- version: eks.KubernetesVersion.V1_33,
1313
- kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
1312
+ version: eks.KubernetesVersion.V1_34,
1313
+ kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
1314
1314
  authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP,
1315
1315
  });
1316
1316
 
@@ -1642,12 +1642,12 @@ Pruning is enabled by default but can be disabled through the `prune` option
1642
1642
  when a cluster is defined:
1643
1643
 
1644
1644
  ```ts
1645
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
1645
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
1646
1646
 
1647
1647
  new eks.Cluster(this, 'MyCluster', {
1648
- version: eks.KubernetesVersion.V1_33,
1648
+ version: eks.KubernetesVersion.V1_34,
1649
1649
  prune: false,
1650
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
1650
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
1651
1651
  });
1652
1652
  ```
1653
1653
 
@@ -2050,17 +2050,17 @@ You can enable logging for each one separately using the `clusterLogging`
2050
2050
  property. For example:
2051
2051
 
2052
2052
  ```ts
2053
- import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
2053
+ import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
2054
2054
 
2055
2055
  const cluster = new eks.Cluster(this, 'Cluster', {
2056
2056
  // ...
2057
- version: eks.KubernetesVersion.V1_33,
2057
+ version: eks.KubernetesVersion.V1_34,
2058
2058
  clusterLogging: [
2059
2059
  eks.ClusterLoggingTypes.API,
2060
2060
  eks.ClusterLoggingTypes.AUTHENTICATOR,
2061
2061
  eks.ClusterLoggingTypes.SCHEDULER,
2062
2062
  ],
2063
- kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
2063
+ kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
2064
2064
  });
2065
2065
  ```
2066
2066
 
@@ -27,7 +27,7 @@ class EksClusterStack extends Stack {
27
27
  vpc: this.vpc,
28
28
  mastersRole,
29
29
  defaultCapacity: 0,
30
- ...getClusterVersionConfig(this, eks.KubernetesVersion.V1_33),
30
+ ...getClusterVersionConfig(this, eks.KubernetesVersion.V1_34),
31
31
  });
32
32
 
33
33
  // create nodegroup with AL2023_X86_64_STANDARD
@@ -17,7 +17,7 @@ class EksFargateClusterStack extends Stack {
17
17
  this.node.setContext(EC2_RESTRICT_DEFAULT_SECURITY_GROUP, false);
18
18
  this.vpc = props?.vpc ?? this.createDummyVpc();
19
19
  new eks.FargateCluster(this, 'FargateCluster', {
20
- ...getClusterVersionConfig(this, eks.KubernetesVersion.V1_33),
20
+ ...getClusterVersionConfig(this, eks.KubernetesVersion.V1_34),
21
21
  prune: false,
22
22
  authenticationMode: props?.authMode,
23
23
  vpc: this.vpc,
@@ -76,6 +76,13 @@ const node22xfn = new Function(stack, 'NODEJS_22_X', {
76
76
  });
77
77
  new CfnOutput(stack, 'NODEJS_22_X-functionName', { value: node22xfn.functionName });
78
78
 
79
+ const node24xfn = new Function(stack, 'NODEJS_24_X', {
80
+ code: new InlineCode('exports.handler = async function(event) { return "success" }'),
81
+ handler: 'index.handler',
82
+ runtime: Runtime.NODEJS_24_X,
83
+ });
84
+ new CfnOutput(stack, 'NODEJS_24_X-functionName', { value: node24xfn.functionName });
85
+
79
86
  new integ.IntegTest(app, 'lambda-runtime-inlinecode', {
80
87
  testCases: [stack],
81
88
  });
@@ -7,7 +7,7 @@ class TestStack extends Stack {
7
7
  constructor(scope: Construct, id: string, props?: StackProps) {
8
8
  super(scope, id, props);
9
9
 
10
- const instanceTypes = ['i4g.large.search', 'i4i.xlarge.search', 'r7gd.xlarge.search'];
10
+ const instanceTypes = ['i4g.large.search', 'i4i.xlarge.search', 'r7gd.xlarge.search', 'r8gd.medium.search'];
11
11
 
12
12
  instanceTypes.forEach((instanceType, index) => {
13
13
  new opensearch.Domain(this, `Domain${index + 1}`, {
@@ -1204,7 +1204,7 @@ const cluster = new rds.DatabaseCluster(this, 'Database', {
1204
1204
  }),
1205
1205
  writer: rds.ClusterInstance.provisioned('writer'),
1206
1206
  vpc,
1207
- cloudwatchLogsExports: ['error', 'general', 'slowquery', 'audit'], // Export all available MySQL-based logs
1207
+ cloudwatchLogsExports: ['error', 'general', 'slowquery', 'audit', 'instance', 'iam-db-auth-error'], // Export all available MySQL-based logs
1208
1208
  cloudwatchLogsRetention: logs.RetentionDays.THREE_MONTHS, // Optional - default is to never expire logs
1209
1209
  cloudwatchLogsRetentionRole: myLogsPublishingRole, // Optional - a role will be created if not provided
1210
1210
  // ...
@@ -0,0 +1,56 @@
1
+ import * as cdk from 'aws-cdk-lib/core';
2
+ import { ExpectedResult, IntegTest } from '@aws-cdk/integ-tests-alpha';
3
+ import * as ec2 from 'aws-cdk-lib/aws-ec2';
4
+ import * as rds from 'aws-cdk-lib/aws-rds';
5
+
6
+ const app = new cdk.App();
7
+
8
+ const stack = new cdk.Stack(app, 'CloudWatchLogsExportsStack');
9
+ const vpc = new ec2.Vpc(stack, 'VPC');
10
+
11
+ const mysql = new rds.DatabaseCluster(stack, 'DatabaseClusterMysql', {
12
+ engine: rds.DatabaseClusterEngine.auroraMysql({ version: rds.AuroraMysqlEngineVersion.VER_3_09_0 }),
13
+ writer: rds.ClusterInstance.serverlessV2('writerInstance'),
14
+ vpc,
15
+ cloudwatchLogsExports: ['error', 'general', 'slowquery', 'audit', 'instance', 'iam-db-auth-error'],
16
+ removalPolicy: cdk.RemovalPolicy.DESTROY,
17
+ });
18
+
19
+ const postgresql = new rds.DatabaseCluster(stack, 'DatabaseClusterPostgresql', {
20
+ engine: rds.DatabaseClusterEngine.auroraPostgres({ version: rds.AuroraPostgresEngineVersion.VER_16_4 }),
21
+ writer: rds.ClusterInstance.serverlessV2('writerInstance'),
22
+ vpc,
23
+ cloudwatchLogsExports: ['postgresql', 'iam-db-auth-error', 'instance'],
24
+ removalPolicy: cdk.RemovalPolicy.DESTROY,
25
+ });
26
+
27
+ const integ = new IntegTest(app, 'CloudWatchLogsExportsStackInteg', {
28
+ testCases: [stack],
29
+ });
30
+
31
+ integ.assertions.awsApiCall('RDS', 'describeDBClusters', {
32
+ DBClusterIdentifier: mysql.clusterIdentifier,
33
+ }).expect(ExpectedResult.objectLike({
34
+ DBClusters: [{
35
+ EnabledCloudwatchLogsExports: [
36
+ 'audit',
37
+ 'error',
38
+ 'general',
39
+ 'iam-db-auth-error',
40
+ 'instance',
41
+ 'slowquery',
42
+ ],
43
+ }],
44
+ }));
45
+
46
+ integ.assertions.awsApiCall('RDS', 'describeDBClusters', {
47
+ DBClusterIdentifier: postgresql.clusterIdentifier,
48
+ }).expect(ExpectedResult.objectLike({
49
+ DBClusters: [{
50
+ EnabledCloudwatchLogsExports: [
51
+ 'iam-db-auth-error',
52
+ 'instance',
53
+ 'postgresql',
54
+ ],
55
+ }],
56
+ }));