konokenj.cdk-api-mcp-server 0.53.0__py3-none-any.whl → 0.54.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of konokenj.cdk-api-mcp-server might be problematic. Click here for more details.
- cdk_api_mcp_server/__about__.py +1 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-eks-v2-alpha/README.md +45 -45
- cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-imagebuilder-alpha/README.md +94 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-sagemaker-alpha/README.md +32 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/mixins-preview/README.md +151 -5
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/README.md/README.md +2 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigateway/README.md +25 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigateway/integ.lambda-permission-consolidation.ts +55 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-integrations/README.md +35 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-integrations/integ.lambda-permission-consolidation.ts +45 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/README.md +26 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.dynamodb.add-to-resource-policy.ts +17 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ecs/integ.placement-strategies.ts +32 -8
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/README.md +86 -86
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.eks-al2023-nodegroup.ts +1 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.fargate-cluster.ts +1 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/integ.runtime.inlinecode.ts +7 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-secretsmanager/integ.secret.dynamic-reference-key.ts +38 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/README.md +14 -3
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-arm64.ts +27 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-default.ts +25 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-mixed-arch.ts +35 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.evaluate-expression-x86.ts +27 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/interfaces/README.md +33 -0
- {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.54.0.dist-info}/METADATA +2 -2
- {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.54.0.dist-info}/RECORD +29 -20
- {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.54.0.dist-info}/WHEEL +0 -0
- {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.54.0.dist-info}/entry_points.txt +0 -0
- {konokenj_cdk_api_mcp_server-0.53.0.dist-info → konokenj_cdk_api_mcp_server-0.54.0.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -69,12 +69,12 @@ This example defines an Amazon EKS cluster with the following configuration:
|
|
|
69
69
|
* A Kubernetes pod with a container based on the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) image.
|
|
70
70
|
|
|
71
71
|
```ts
|
|
72
|
-
import {
|
|
72
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
73
73
|
|
|
74
74
|
// provisioning a cluster
|
|
75
75
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
76
|
-
version: eks.KubernetesVersion.
|
|
77
|
-
kubectlLayer: new
|
|
76
|
+
version: eks.KubernetesVersion.V1_34,
|
|
77
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
78
78
|
});
|
|
79
79
|
|
|
80
80
|
// apply a kubernetes manifest to the cluster
|
|
@@ -139,11 +139,11 @@ A more detailed breakdown of each is provided further down this README.
|
|
|
139
139
|
Creating a new cluster is done using the `Cluster` or `FargateCluster` constructs. The only required properties are the kubernetes `version` and `kubectlLayer`.
|
|
140
140
|
|
|
141
141
|
```ts
|
|
142
|
-
import {
|
|
142
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
143
143
|
|
|
144
144
|
new eks.Cluster(this, 'HelloEKS', {
|
|
145
|
-
version: eks.KubernetesVersion.
|
|
146
|
-
kubectlLayer: new
|
|
145
|
+
version: eks.KubernetesVersion.V1_34,
|
|
146
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
147
147
|
});
|
|
148
148
|
```
|
|
149
149
|
|
|
@@ -157,12 +157,12 @@ This can happen in one of three situations:
|
|
|
157
157
|
This affects the EKS cluster itself, the custom resource that created the cluster, associated IAM roles, node groups, security groups, VPC and any other CloudFormation resources managed by this construct.
|
|
158
158
|
|
|
159
159
|
```ts
|
|
160
|
-
import {
|
|
160
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
161
161
|
import * as core from 'aws-cdk-lib/core';
|
|
162
162
|
|
|
163
163
|
new eks.Cluster(this, 'HelloEKS', {
|
|
164
|
-
version: eks.KubernetesVersion.
|
|
165
|
-
kubectlLayer: new
|
|
164
|
+
version: eks.KubernetesVersion.V1_34,
|
|
165
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
166
166
|
removalPolicy: core.RemovalPolicy.RETAIN, // Keep all resources created by the construct.
|
|
167
167
|
});
|
|
168
168
|
```
|
|
@@ -170,11 +170,11 @@ new eks.Cluster(this, 'HelloEKS', {
|
|
|
170
170
|
You can also use `FargateCluster` to provision a cluster that uses only fargate workers.
|
|
171
171
|
|
|
172
172
|
```ts
|
|
173
|
-
import {
|
|
173
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
174
174
|
|
|
175
175
|
new eks.FargateCluster(this, 'HelloEKS', {
|
|
176
|
-
version: eks.KubernetesVersion.
|
|
177
|
-
kubectlLayer: new
|
|
176
|
+
version: eks.KubernetesVersion.V1_34,
|
|
177
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
178
178
|
});
|
|
179
179
|
```
|
|
180
180
|
|
|
@@ -197,13 +197,13 @@ By default, this library will allocate a managed node group with 2 *m5.large* in
|
|
|
197
197
|
At cluster instantiation time, you can customize the number of instances and their type:
|
|
198
198
|
|
|
199
199
|
```ts
|
|
200
|
-
import {
|
|
200
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
201
201
|
|
|
202
202
|
new eks.Cluster(this, 'HelloEKS', {
|
|
203
|
-
version: eks.KubernetesVersion.
|
|
203
|
+
version: eks.KubernetesVersion.V1_34,
|
|
204
204
|
defaultCapacity: 5,
|
|
205
205
|
defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL),
|
|
206
|
-
kubectlLayer: new
|
|
206
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
207
207
|
});
|
|
208
208
|
```
|
|
209
209
|
|
|
@@ -212,12 +212,12 @@ To access the node group that was created on your behalf, you can use `cluster.d
|
|
|
212
212
|
Additional customizations are available post instantiation. To apply them, set the default capacity to 0, and use the `cluster.addNodegroupCapacity` method:
|
|
213
213
|
|
|
214
214
|
```ts
|
|
215
|
-
import {
|
|
215
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
216
216
|
|
|
217
217
|
const cluster = new eks.Cluster(this, 'HelloEKS', {
|
|
218
|
-
version: eks.KubernetesVersion.
|
|
218
|
+
version: eks.KubernetesVersion.V1_34,
|
|
219
219
|
defaultCapacity: 0,
|
|
220
|
-
kubectlLayer: new
|
|
220
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
221
221
|
});
|
|
222
222
|
|
|
223
223
|
cluster.addNodegroupCapacity('custom-node-group', {
|
|
@@ -293,7 +293,7 @@ Node groups are available with IPv6 configured networks. For custom roles assig
|
|
|
293
293
|
> For more details visit [Configuring the Amazon VPC CNI plugin for Kubernetes to use IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role)
|
|
294
294
|
|
|
295
295
|
```ts
|
|
296
|
-
import {
|
|
296
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
297
297
|
|
|
298
298
|
const ipv6Management = new iam.PolicyDocument({
|
|
299
299
|
statements: [new iam.PolicyStatement({
|
|
@@ -319,9 +319,9 @@ const eksClusterNodeGroupRole = new iam.Role(this, 'eksClusterNodeGroupRole', {
|
|
|
319
319
|
});
|
|
320
320
|
|
|
321
321
|
const cluster = new eks.Cluster(this, 'HelloEKS', {
|
|
322
|
-
version: eks.KubernetesVersion.
|
|
322
|
+
version: eks.KubernetesVersion.V1_34,
|
|
323
323
|
defaultCapacity: 0,
|
|
324
|
-
kubectlLayer: new
|
|
324
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
325
325
|
});
|
|
326
326
|
|
|
327
327
|
cluster.addNodegroupCapacity('custom-node-group', {
|
|
@@ -433,12 +433,12 @@ has been changed. As a workaround, you need to add a temporary policy to the clu
|
|
|
433
433
|
successful replacement. Consider this example if you are renaming the cluster from `foo` to `bar`:
|
|
434
434
|
|
|
435
435
|
```ts
|
|
436
|
-
import {
|
|
436
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
437
437
|
|
|
438
438
|
const cluster = new eks.Cluster(this, 'cluster-to-rename', {
|
|
439
439
|
clusterName: 'foo', // rename this to 'bar'
|
|
440
|
-
kubectlLayer: new
|
|
441
|
-
version: eks.KubernetesVersion.
|
|
440
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
441
|
+
version: eks.KubernetesVersion.V1_34,
|
|
442
442
|
});
|
|
443
443
|
|
|
444
444
|
// allow the cluster admin role to delete the cluster 'foo'
|
|
@@ -491,11 +491,11 @@ To create an EKS cluster that **only** uses Fargate capacity, you can use `Farga
|
|
|
491
491
|
The following code defines an Amazon EKS cluster with a default Fargate Profile that matches all pods from the "kube-system" and "default" namespaces. It is also configured to [run CoreDNS on Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns).
|
|
492
492
|
|
|
493
493
|
```ts
|
|
494
|
-
import {
|
|
494
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
495
495
|
|
|
496
496
|
const cluster = new eks.FargateCluster(this, 'MyCluster', {
|
|
497
|
-
version: eks.KubernetesVersion.
|
|
498
|
-
kubectlLayer: new
|
|
497
|
+
version: eks.KubernetesVersion.V1_34,
|
|
498
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
499
499
|
});
|
|
500
500
|
```
|
|
501
501
|
|
|
@@ -571,12 +571,12 @@ To disable bootstrapping altogether (i.e. to fully customize user-data), set `bo
|
|
|
571
571
|
You can also configure the cluster to use an auto-scaling group as the default capacity:
|
|
572
572
|
|
|
573
573
|
```ts
|
|
574
|
-
import {
|
|
574
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
575
575
|
|
|
576
576
|
const cluster = new eks.Cluster(this, 'HelloEKS', {
|
|
577
|
-
version: eks.KubernetesVersion.
|
|
577
|
+
version: eks.KubernetesVersion.V1_34,
|
|
578
578
|
defaultCapacityType: eks.DefaultCapacityType.EC2,
|
|
579
|
-
kubectlLayer: new
|
|
579
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
580
580
|
});
|
|
581
581
|
```
|
|
582
582
|
|
|
@@ -678,12 +678,12 @@ AWS Identity and Access Management (IAM) and native Kubernetes [Role Based Acces
|
|
|
678
678
|
You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) by using the `endpointAccess` property:
|
|
679
679
|
|
|
680
680
|
```ts
|
|
681
|
-
import {
|
|
681
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
682
682
|
|
|
683
683
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
684
|
-
version: eks.KubernetesVersion.
|
|
684
|
+
version: eks.KubernetesVersion.V1_34,
|
|
685
685
|
endpointAccess: eks.EndpointAccess.PRIVATE, // No access outside of your VPC.
|
|
686
|
-
kubectlLayer: new
|
|
686
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
687
687
|
});
|
|
688
688
|
```
|
|
689
689
|
|
|
@@ -703,31 +703,31 @@ From the docs:
|
|
|
703
703
|
To deploy the controller on your EKS cluster, configure the `albController` property:
|
|
704
704
|
|
|
705
705
|
```ts
|
|
706
|
-
import {
|
|
706
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
707
707
|
|
|
708
708
|
new eks.Cluster(this, 'HelloEKS', {
|
|
709
|
-
version: eks.KubernetesVersion.
|
|
709
|
+
version: eks.KubernetesVersion.V1_34,
|
|
710
710
|
albController: {
|
|
711
711
|
version: eks.AlbControllerVersion.V2_8_2,
|
|
712
712
|
},
|
|
713
|
-
kubectlLayer: new
|
|
713
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
714
714
|
});
|
|
715
715
|
```
|
|
716
716
|
|
|
717
717
|
To provide additional Helm chart values supported by `albController` in CDK, use the `additionalHelmChartValues` property. For example, the following code snippet shows how to set the `enableWafV2` flag:
|
|
718
718
|
|
|
719
719
|
```ts
|
|
720
|
-
import {
|
|
720
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
721
721
|
|
|
722
722
|
new eks.Cluster(this, 'HelloEKS', {
|
|
723
|
-
version: eks.KubernetesVersion.
|
|
723
|
+
version: eks.KubernetesVersion.V1_34,
|
|
724
724
|
albController: {
|
|
725
725
|
version: eks.AlbControllerVersion.V2_8_2,
|
|
726
726
|
additionalHelmChartValues: {
|
|
727
727
|
enableWafv2: false
|
|
728
728
|
}
|
|
729
729
|
},
|
|
730
|
-
kubectlLayer: new
|
|
730
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
731
731
|
});
|
|
732
732
|
```
|
|
733
733
|
|
|
@@ -764,15 +764,15 @@ if (cluster.albController) {
|
|
|
764
764
|
You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properties:
|
|
765
765
|
|
|
766
766
|
```ts
|
|
767
|
-
import {
|
|
767
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
768
768
|
|
|
769
769
|
declare const vpc: ec2.Vpc;
|
|
770
770
|
|
|
771
771
|
new eks.Cluster(this, 'HelloEKS', {
|
|
772
|
-
version: eks.KubernetesVersion.
|
|
772
|
+
version: eks.KubernetesVersion.V1_34,
|
|
773
773
|
vpc,
|
|
774
774
|
vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }],
|
|
775
|
-
kubectlLayer: new
|
|
775
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
776
776
|
});
|
|
777
777
|
```
|
|
778
778
|
|
|
@@ -815,11 +815,11 @@ The `ClusterHandler` is a set of Lambda functions (`onEventHandler`, `isComplete
|
|
|
815
815
|
You can configure the environment of the Cluster Handler functions by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
816
816
|
|
|
817
817
|
```ts
|
|
818
|
-
import {
|
|
818
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
819
819
|
|
|
820
820
|
declare const proxyInstanceSecurityGroup: ec2.SecurityGroup;
|
|
821
821
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
822
|
-
version: eks.KubernetesVersion.
|
|
822
|
+
version: eks.KubernetesVersion.V1_34,
|
|
823
823
|
clusterHandlerEnvironment: {
|
|
824
824
|
https_proxy: 'http://proxy.myproxy.com',
|
|
825
825
|
},
|
|
@@ -828,7 +828,7 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
|
828
828
|
* Cluster Handler Lambdas so that it can reach the proxy.
|
|
829
829
|
*/
|
|
830
830
|
clusterHandlerSecurityGroup: proxyInstanceSecurityGroup,
|
|
831
|
-
kubectlLayer: new
|
|
831
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
832
832
|
});
|
|
833
833
|
```
|
|
834
834
|
|
|
@@ -837,7 +837,7 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
|
837
837
|
You can optionally choose to configure your cluster to use IPv6 using the [`ipFamily`](https://docs.aws.amazon.com/eks/latest/APIReference/API_KubernetesNetworkConfigRequest.html#AmazonEKS-Type-KubernetesNetworkConfigRequest-ipFamily) definition for your cluster. Note that this will require the underlying subnets to have an associated IPv6 CIDR.
|
|
838
838
|
|
|
839
839
|
```ts
|
|
840
|
-
import {
|
|
840
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
841
841
|
declare const vpc: ec2.Vpc;
|
|
842
842
|
|
|
843
843
|
function associateSubnetWithV6Cidr(vpc: ec2.Vpc, count: number, subnet: ec2.ISubnet) {
|
|
@@ -863,11 +863,11 @@ for (let subnet of subnets) {
|
|
|
863
863
|
}
|
|
864
864
|
|
|
865
865
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
866
|
-
version: eks.KubernetesVersion.
|
|
866
|
+
version: eks.KubernetesVersion.V1_34,
|
|
867
867
|
vpc: vpc,
|
|
868
868
|
ipFamily: eks.IpFamily.IP_V6,
|
|
869
869
|
vpcSubnets: [{ subnets: vpc.publicSubnets }],
|
|
870
|
-
kubectlLayer: new
|
|
870
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
871
871
|
});
|
|
872
872
|
```
|
|
873
873
|
|
|
@@ -898,14 +898,14 @@ const cluster = eks.Cluster.fromClusterAttributes(this, 'Cluster', {
|
|
|
898
898
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
899
899
|
|
|
900
900
|
```ts
|
|
901
|
-
import {
|
|
901
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
902
902
|
|
|
903
903
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
904
|
-
version: eks.KubernetesVersion.
|
|
904
|
+
version: eks.KubernetesVersion.V1_34,
|
|
905
905
|
kubectlEnvironment: {
|
|
906
906
|
'http_proxy': 'http://proxy.myproxy.com',
|
|
907
907
|
},
|
|
908
|
-
kubectlLayer: new
|
|
908
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
909
909
|
});
|
|
910
910
|
```
|
|
911
911
|
|
|
@@ -922,11 +922,11 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
922
922
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
923
923
|
|
|
924
924
|
```ts
|
|
925
|
-
import {
|
|
925
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
926
926
|
|
|
927
927
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
928
|
-
version: eks.KubernetesVersion.
|
|
929
|
-
kubectlLayer: new
|
|
928
|
+
version: eks.KubernetesVersion.V1_34,
|
|
929
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
930
930
|
});
|
|
931
931
|
```
|
|
932
932
|
|
|
@@ -961,7 +961,7 @@ const cluster1 = new eks.Cluster(this, 'MyCluster', {
|
|
|
961
961
|
kubectlLayer: layer,
|
|
962
962
|
vpc,
|
|
963
963
|
clusterName: 'cluster-name',
|
|
964
|
-
version: eks.KubernetesVersion.
|
|
964
|
+
version: eks.KubernetesVersion.V1_34,
|
|
965
965
|
});
|
|
966
966
|
|
|
967
967
|
// or
|
|
@@ -977,12 +977,12 @@ const cluster2 = eks.Cluster.fromClusterAttributes(this, 'MyCluster', {
|
|
|
977
977
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function:
|
|
978
978
|
|
|
979
979
|
```ts
|
|
980
|
-
import {
|
|
980
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
981
981
|
|
|
982
982
|
new eks.Cluster(this, 'MyCluster', {
|
|
983
983
|
kubectlMemory: Size.gibibytes(4),
|
|
984
|
-
version: eks.KubernetesVersion.
|
|
985
|
-
kubectlLayer: new
|
|
984
|
+
version: eks.KubernetesVersion.V1_34,
|
|
985
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
986
986
|
});
|
|
987
987
|
|
|
988
988
|
// or
|
|
@@ -1019,13 +1019,13 @@ cluster.addAutoScalingGroupCapacity('self-ng-arm', {
|
|
|
1019
1019
|
When you create a cluster, you can specify a `mastersRole`. The `Cluster` construct will associate this role with the `system:masters` [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) group, giving it super-user access to the cluster.
|
|
1020
1020
|
|
|
1021
1021
|
```ts
|
|
1022
|
-
import {
|
|
1022
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1023
1023
|
|
|
1024
1024
|
declare const role: iam.Role;
|
|
1025
1025
|
new eks.Cluster(this, 'HelloEKS', {
|
|
1026
|
-
version: eks.KubernetesVersion.
|
|
1026
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1027
1027
|
mastersRole: role,
|
|
1028
|
-
kubectlLayer: new
|
|
1028
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
1029
1029
|
});
|
|
1030
1030
|
```
|
|
1031
1031
|
|
|
@@ -1071,26 +1071,26 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
1071
1071
|
> This setting can only be specified when the cluster is created and cannot be updated.
|
|
1072
1072
|
|
|
1073
1073
|
```ts
|
|
1074
|
-
import {
|
|
1074
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1075
1075
|
|
|
1076
1076
|
const secretsKey = new kms.Key(this, 'SecretsKey');
|
|
1077
1077
|
const cluster = new eks.Cluster(this, 'MyCluster', {
|
|
1078
1078
|
secretsEncryptionKey: secretsKey,
|
|
1079
|
-
version: eks.KubernetesVersion.
|
|
1080
|
-
kubectlLayer: new
|
|
1079
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1080
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
1081
1081
|
});
|
|
1082
1082
|
```
|
|
1083
1083
|
|
|
1084
1084
|
You can also use a similar configuration for running a cluster built using the FargateCluster construct.
|
|
1085
1085
|
|
|
1086
1086
|
```ts
|
|
1087
|
-
import {
|
|
1087
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1088
1088
|
|
|
1089
1089
|
const secretsKey = new kms.Key(this, 'SecretsKey');
|
|
1090
1090
|
const cluster = new eks.FargateCluster(this, 'MyFargateCluster', {
|
|
1091
1091
|
secretsEncryptionKey: secretsKey,
|
|
1092
|
-
version: eks.KubernetesVersion.
|
|
1093
|
-
kubectlLayer: new
|
|
1092
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1093
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
1094
1094
|
});
|
|
1095
1095
|
```
|
|
1096
1096
|
|
|
@@ -1108,11 +1108,11 @@ When you create an Amazon EKS cluster, you can configure it to leverage the [EKS
|
|
|
1108
1108
|
Once you have identified the on-premises node and pod (optional) CIDRs you will use for your hybrid nodes and the workloads running on them, you can specify them during cluster creation using the `remoteNodeNetworks` and `remotePodNetworks` (optional) properties:
|
|
1109
1109
|
|
|
1110
1110
|
```ts
|
|
1111
|
-
import {
|
|
1111
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1112
1112
|
|
|
1113
1113
|
new eks.Cluster(this, 'Cluster', {
|
|
1114
|
-
version: eks.KubernetesVersion.
|
|
1115
|
-
kubectlLayer: new
|
|
1114
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1115
|
+
kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
|
|
1116
1116
|
remoteNodeNetworks: [
|
|
1117
1117
|
{
|
|
1118
1118
|
cidrs: ['10.0.0.0/16'],
|
|
@@ -1165,7 +1165,7 @@ To access the Kubernetes resources from the console, make sure your viewing prin
|
|
|
1165
1165
|
in the `aws-auth` ConfigMap. Some options to consider:
|
|
1166
1166
|
|
|
1167
1167
|
```ts
|
|
1168
|
-
import {
|
|
1168
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1169
1169
|
declare const cluster: eks.Cluster;
|
|
1170
1170
|
declare const your_current_role: iam.Role;
|
|
1171
1171
|
declare const vpc: ec2.Vpc;
|
|
@@ -1185,7 +1185,7 @@ your_current_role.addToPolicy(new iam.PolicyStatement({
|
|
|
1185
1185
|
|
|
1186
1186
|
```ts
|
|
1187
1187
|
// Option 2: create your custom mastersRole with scoped assumeBy arn as the Cluster prop. Switch to this role from the AWS console.
|
|
1188
|
-
import {
|
|
1188
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1189
1189
|
declare const vpc: ec2.Vpc;
|
|
1190
1190
|
|
|
1191
1191
|
|
|
@@ -1195,8 +1195,8 @@ const mastersRole = new iam.Role(this, 'MastersRole', {
|
|
|
1195
1195
|
|
|
1196
1196
|
const cluster = new eks.Cluster(this, 'EksCluster', {
|
|
1197
1197
|
vpc,
|
|
1198
|
-
version: eks.KubernetesVersion.
|
|
1199
|
-
kubectlLayer: new
|
|
1198
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1199
|
+
kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
|
|
1200
1200
|
mastersRole,
|
|
1201
1201
|
});
|
|
1202
1202
|
|
|
@@ -1240,13 +1240,13 @@ AWS IAM principals from both Amazon EKS access entry APIs and the aws-auth confi
|
|
|
1240
1240
|
To specify the `authenticationMode`:
|
|
1241
1241
|
|
|
1242
1242
|
```ts
|
|
1243
|
-
import {
|
|
1243
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1244
1244
|
declare const vpc: ec2.Vpc;
|
|
1245
1245
|
|
|
1246
1246
|
new eks.Cluster(this, 'Cluster', {
|
|
1247
1247
|
vpc,
|
|
1248
|
-
version: eks.KubernetesVersion.
|
|
1249
|
-
kubectlLayer: new
|
|
1248
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1249
|
+
kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
|
|
1250
1250
|
authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP,
|
|
1251
1251
|
});
|
|
1252
1252
|
```
|
|
@@ -1291,7 +1291,7 @@ eks.AccessPolicy.fromAccessPolicyName('AmazonEKSAdminPolicy', {
|
|
|
1291
1291
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
1292
1292
|
|
|
1293
1293
|
```ts
|
|
1294
|
-
import {
|
|
1294
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1295
1295
|
declare const vpc: ec2.Vpc;
|
|
1296
1296
|
|
|
1297
1297
|
const clusterAdminRole = new iam.Role(this, 'ClusterAdminRole', {
|
|
@@ -1309,8 +1309,8 @@ const eksAdminViewRole = new iam.Role(this, 'EKSAdminViewRole', {
|
|
|
1309
1309
|
const cluster = new eks.Cluster(this, 'Cluster', {
|
|
1310
1310
|
vpc,
|
|
1311
1311
|
mastersRole: clusterAdminRole,
|
|
1312
|
-
version: eks.KubernetesVersion.
|
|
1313
|
-
kubectlLayer: new
|
|
1312
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1313
|
+
kubectlLayer: new KubectlV34Layer(this, 'KubectlLayer'),
|
|
1314
1314
|
authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP,
|
|
1315
1315
|
});
|
|
1316
1316
|
|
|
@@ -1642,12 +1642,12 @@ Pruning is enabled by default but can be disabled through the `prune` option
|
|
|
1642
1642
|
when a cluster is defined:
|
|
1643
1643
|
|
|
1644
1644
|
```ts
|
|
1645
|
-
import {
|
|
1645
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1646
1646
|
|
|
1647
1647
|
new eks.Cluster(this, 'MyCluster', {
|
|
1648
|
-
version: eks.KubernetesVersion.
|
|
1648
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1649
1649
|
prune: false,
|
|
1650
|
-
kubectlLayer: new
|
|
1650
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
1651
1651
|
});
|
|
1652
1652
|
```
|
|
1653
1653
|
|
|
@@ -2050,17 +2050,17 @@ You can enable logging for each one separately using the `clusterLogging`
|
|
|
2050
2050
|
property. For example:
|
|
2051
2051
|
|
|
2052
2052
|
```ts
|
|
2053
|
-
import {
|
|
2053
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
2054
2054
|
|
|
2055
2055
|
const cluster = new eks.Cluster(this, 'Cluster', {
|
|
2056
2056
|
// ...
|
|
2057
|
-
version: eks.KubernetesVersion.
|
|
2057
|
+
version: eks.KubernetesVersion.V1_34,
|
|
2058
2058
|
clusterLogging: [
|
|
2059
2059
|
eks.ClusterLoggingTypes.API,
|
|
2060
2060
|
eks.ClusterLoggingTypes.AUTHENTICATOR,
|
|
2061
2061
|
eks.ClusterLoggingTypes.SCHEDULER,
|
|
2062
2062
|
],
|
|
2063
|
-
kubectlLayer: new
|
|
2063
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
2064
2064
|
});
|
|
2065
2065
|
```
|
|
2066
2066
|
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.eks-al2023-nodegroup.ts
CHANGED
|
@@ -27,7 +27,7 @@ class EksClusterStack extends Stack {
|
|
|
27
27
|
vpc: this.vpc,
|
|
28
28
|
mastersRole,
|
|
29
29
|
defaultCapacity: 0,
|
|
30
|
-
...getClusterVersionConfig(this, eks.KubernetesVersion.
|
|
30
|
+
...getClusterVersionConfig(this, eks.KubernetesVersion.V1_34),
|
|
31
31
|
});
|
|
32
32
|
|
|
33
33
|
// create nodegroup with AL2023_X86_64_STANDARD
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.fargate-cluster.ts
CHANGED
|
@@ -17,7 +17,7 @@ class EksFargateClusterStack extends Stack {
|
|
|
17
17
|
this.node.setContext(EC2_RESTRICT_DEFAULT_SECURITY_GROUP, false);
|
|
18
18
|
this.vpc = props?.vpc ?? this.createDummyVpc();
|
|
19
19
|
new eks.FargateCluster(this, 'FargateCluster', {
|
|
20
|
-
...getClusterVersionConfig(this, eks.KubernetesVersion.
|
|
20
|
+
...getClusterVersionConfig(this, eks.KubernetesVersion.V1_34),
|
|
21
21
|
prune: false,
|
|
22
22
|
authenticationMode: props?.authMode,
|
|
23
23
|
vpc: this.vpc,
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/integ.runtime.inlinecode.ts
CHANGED
|
@@ -76,6 +76,13 @@ const node22xfn = new Function(stack, 'NODEJS_22_X', {
|
|
|
76
76
|
});
|
|
77
77
|
new CfnOutput(stack, 'NODEJS_22_X-functionName', { value: node22xfn.functionName });
|
|
78
78
|
|
|
79
|
+
const node24xfn = new Function(stack, 'NODEJS_24_X', {
|
|
80
|
+
code: new InlineCode('exports.handler = async function(event) { return "success" }'),
|
|
81
|
+
handler: 'index.handler',
|
|
82
|
+
runtime: Runtime.NODEJS_24_X,
|
|
83
|
+
});
|
|
84
|
+
new CfnOutput(stack, 'NODEJS_24_X-functionName', { value: node24xfn.functionName });
|
|
85
|
+
|
|
79
86
|
new integ.IntegTest(app, 'lambda-runtime-inlinecode', {
|
|
80
87
|
testCases: [stack],
|
|
81
88
|
});
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import * as cdk from 'aws-cdk-lib';
|
|
2
|
+
import { SecretValue } from 'aws-cdk-lib';
|
|
3
|
+
import * as secretsmanager from 'aws-cdk-lib/aws-secretsmanager';
|
|
4
|
+
import * as integ from '@aws-cdk/integ-tests-alpha';
|
|
5
|
+
|
|
6
|
+
class TestStack extends cdk.Stack {
|
|
7
|
+
constructor(scope: cdk.App, id: string) {
|
|
8
|
+
super(scope, id);
|
|
9
|
+
|
|
10
|
+
// Create a default secret
|
|
11
|
+
const secret = new secretsmanager.Secret(this, 'Secret');
|
|
12
|
+
|
|
13
|
+
// Create a JSON secret containing cfnDynamicReferenceKey values extracted from the default secret
|
|
14
|
+
new secretsmanager.Secret(this, 'JSONSecret', {
|
|
15
|
+
secretObjectValue: {
|
|
16
|
+
cfnDynamicReferenceKeyWithDefaults: SecretValue.unsafePlainText(secret.cfnDynamicReferenceKey()),
|
|
17
|
+
cfnDynamicReferenceKeyWithJsonFieldAndVersionStage: SecretValue.unsafePlainText(secret.cfnDynamicReferenceKey({
|
|
18
|
+
jsonField: 'json-key',
|
|
19
|
+
versionStage: 'version-stage',
|
|
20
|
+
})),
|
|
21
|
+
cfnDynamicReferenceKeyWithJsonFieldAndVersionId: SecretValue.unsafePlainText(secret.cfnDynamicReferenceKey({
|
|
22
|
+
jsonField: 'json-key',
|
|
23
|
+
versionId: 'version-id',
|
|
24
|
+
})),
|
|
25
|
+
},
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
const app = new cdk.App();
|
|
31
|
+
|
|
32
|
+
const stack = new TestStack(app, 'cdk-integ-secrets-dynamic-reference-key');
|
|
33
|
+
|
|
34
|
+
new integ.IntegTest(app, 'cdk-integ-secrets-dynamic-reference-key-test', {
|
|
35
|
+
testCases: [stack],
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
app.synth();
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/README.md
CHANGED
|
@@ -141,6 +141,17 @@ The `EvaluateExpression` supports a `runtime` prop to specify the Lambda
|
|
|
141
141
|
runtime to use to evaluate the expression. Currently, only runtimes
|
|
142
142
|
of the Node.js family are supported.
|
|
143
143
|
|
|
144
|
+
The `EvaluateExpression` also supports an `architecture` prop to specify the Lambda
|
|
145
|
+
architecture. This can be useful when migrating to ARM64 or when running integration
|
|
146
|
+
tests on ARM64 systems.
|
|
147
|
+
|
|
148
|
+
```ts
|
|
149
|
+
const convertToSecondsArm64 = new tasks.EvaluateExpression(this, 'Convert to seconds', {
|
|
150
|
+
expression: '$.waitMilliseconds / 1000',
|
|
151
|
+
architecture: lambda.Architecture.ARM_64,
|
|
152
|
+
});
|
|
153
|
+
```
|
|
154
|
+
|
|
144
155
|
## API Gateway
|
|
145
156
|
|
|
146
157
|
Step Functions supports [API Gateway](https://docs.aws.amazon.com/step-functions/latest/dg/connect-api-gateway.html) through the service integration pattern.
|
|
@@ -1374,12 +1385,12 @@ The following code snippet includes a Task state that uses eks:call to list the
|
|
|
1374
1385
|
|
|
1375
1386
|
```ts
|
|
1376
1387
|
import * as eks from 'aws-cdk-lib/aws-eks';
|
|
1377
|
-
import {
|
|
1388
|
+
import { KubectlV34Layer } from '@aws-cdk/lambda-layer-kubectl-v34';
|
|
1378
1389
|
|
|
1379
1390
|
const myEksCluster = new eks.Cluster(this, 'my sample cluster', {
|
|
1380
|
-
version: eks.KubernetesVersion.
|
|
1391
|
+
version: eks.KubernetesVersion.V1_34,
|
|
1381
1392
|
clusterName: 'myEksCluster',
|
|
1382
|
-
kubectlLayer: new
|
|
1393
|
+
kubectlLayer: new KubectlV34Layer(this, 'kubectl'),
|
|
1383
1394
|
});
|
|
1384
1395
|
|
|
1385
1396
|
new tasks.EksCall(this, 'Call a EKS Endpoint', {
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { App, Stack } from 'aws-cdk-lib';
|
|
2
|
+
import * as integ from '@aws-cdk/integ-tests-alpha';
|
|
3
|
+
import * as sfn from 'aws-cdk-lib/aws-stepfunctions';
|
|
4
|
+
import * as lambda from 'aws-cdk-lib/aws-lambda';
|
|
5
|
+
import * as cdk from 'aws-cdk-lib';
|
|
6
|
+
import * as tasks from 'aws-cdk-lib/aws-stepfunctions-tasks';
|
|
7
|
+
|
|
8
|
+
const app = new App();
|
|
9
|
+
const stack = new Stack(app, 'aws-cdk-sfn-evaluate-expression-arm64-integ');
|
|
10
|
+
|
|
11
|
+
const evaluateExpression = new tasks.EvaluateExpression(stack, 'EvaluateExpression', {
|
|
12
|
+
expression: '$.a + $.b',
|
|
13
|
+
architecture: lambda.Architecture.ARM_64,
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
const sm = new sfn.StateMachine(stack, 'StateMachine', {
|
|
17
|
+
definitionBody: sfn.DefinitionBody.fromChainable(evaluateExpression),
|
|
18
|
+
timeout: cdk.Duration.seconds(30),
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
new cdk.CfnOutput(stack, 'stateMachineArn', {
|
|
22
|
+
value: sm.stateMachineArn,
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
new integ.IntegTest(app, 'EvaluateExpressionArm64Test', {
|
|
26
|
+
testCases: [stack],
|
|
27
|
+
});
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { App, Stack } from 'aws-cdk-lib';
|
|
2
|
+
import * as integ from '@aws-cdk/integ-tests-alpha';
|
|
3
|
+
import * as sfn from 'aws-cdk-lib/aws-stepfunctions';
|
|
4
|
+
import * as cdk from 'aws-cdk-lib';
|
|
5
|
+
import * as tasks from 'aws-cdk-lib/aws-stepfunctions-tasks';
|
|
6
|
+
|
|
7
|
+
const app = new App();
|
|
8
|
+
const stack = new Stack(app, 'aws-cdk-sfn-evaluate-expression-default-integ');
|
|
9
|
+
|
|
10
|
+
const evaluateExpression = new tasks.EvaluateExpression(stack, 'EvaluateExpression', {
|
|
11
|
+
expression: '$.a + $.b',
|
|
12
|
+
});
|
|
13
|
+
|
|
14
|
+
const sm = new sfn.StateMachine(stack, 'StateMachine', {
|
|
15
|
+
definitionBody: sfn.DefinitionBody.fromChainable(evaluateExpression),
|
|
16
|
+
timeout: cdk.Duration.seconds(30),
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
new cdk.CfnOutput(stack, 'stateMachineArn', {
|
|
20
|
+
value: sm.stateMachineArn,
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
new integ.IntegTest(app, 'EvaluateExpressionDefaultTest', {
|
|
24
|
+
testCases: [stack],
|
|
25
|
+
});
|