konokenj.cdk-api-mcp-server 0.28.0__py3-none-any.whl → 0.30.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of konokenj.cdk-api-mcp-server might be problematic. Click here for more details.
- cdk_api_mcp_server/__about__.py +1 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-amplify-alpha/README.md +71 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/integ-tests-alpha/README.md +1 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.api-dualstack.ts +4 -3
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.api.ts +4 -2
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.stage.ts +20 -7
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-authorizers/integ.iam.ts +38 -34
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-integrations/integ.sqs.ts +71 -58
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-backup/README.md +2 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-backup/integ.backup.ts +12 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-cloudfront/README.md +18 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-cloudwatch/README.md +32 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-cloudwatch/integ.alarm-and-dashboard.ts +1 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-cloudwatch/integ.math-alarm-and-dashboard.ts +4 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-codedeploy/integ.deployment-config.ts +4 -15
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-codedeploy/integ.deployment-group.ts +218 -40
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-codepipeline-actions/integ.pipeline-elastic-beanstalk-deploy.ts +1 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ec2/README.md +13 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ec2/integ.client-vpn-endpoint-client-route-enforcement.ts +68 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/README.md +83 -83
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.eks-al2023-nodegroup.ts +1 -2
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.fargate-cluster.ts +1 -3
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.helm-chart-logging.ts +55 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-kms/README.md +4 -3
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-kms/integ.alias-from-alias-name.ts +26 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/README.md +15 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/integ.log-retention.ts +8 -2
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda-event-sources/README.md +68 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda-event-sources/integ.kafka-schema-registry.ts +186 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-rds/README.md +23 -13
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-rds/integ.cluster-snapshot.ts +3 -71
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-rds/integ.instance-engine-lifecycle-support.ts +53 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-s3/README.md +16 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-s3-notifications/integ.bucket-notifications.ts +42 -80
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.call-aws-service-cross-region-lambda.ts +97 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.start-job-run.ts +51 -106
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/cx-api/FEATURE_FLAGS.md +21 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/cx-api/README.md +14 -0
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/pipelines/README.md +7 -1
- cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/pipelines/integ.newpipeline-single-publisher.ts +53 -0
- {konokenj_cdk_api_mcp_server-0.28.0.dist-info → konokenj_cdk_api_mcp_server-0.30.0.dist-info}/METADATA +2 -2
- {konokenj_cdk_api_mcp_server-0.28.0.dist-info → konokenj_cdk_api_mcp_server-0.30.0.dist-info}/RECORD +45 -38
- {konokenj_cdk_api_mcp_server-0.28.0.dist-info → konokenj_cdk_api_mcp_server-0.30.0.dist-info}/WHEEL +0 -0
- {konokenj_cdk_api_mcp_server-0.28.0.dist-info → konokenj_cdk_api_mcp_server-0.30.0.dist-info}/entry_points.txt +0 -0
- {konokenj_cdk_api_mcp_server-0.28.0.dist-info → konokenj_cdk_api_mcp_server-0.30.0.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -69,12 +69,12 @@ This example defines an Amazon EKS cluster with the following configuration:
|
|
|
69
69
|
* A Kubernetes pod with a container based on the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) image.
|
|
70
70
|
|
|
71
71
|
```ts
|
|
72
|
-
import {
|
|
72
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
73
73
|
|
|
74
74
|
// provisioning a cluster
|
|
75
75
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
76
|
-
version: eks.KubernetesVersion.
|
|
77
|
-
kubectlLayer: new
|
|
76
|
+
version: eks.KubernetesVersion.V1_33,
|
|
77
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
78
78
|
});
|
|
79
79
|
|
|
80
80
|
// apply a kubernetes manifest to the cluster
|
|
@@ -139,22 +139,22 @@ A more detailed breakdown of each is provided further down this README.
|
|
|
139
139
|
Creating a new cluster is done using the `Cluster` or `FargateCluster` constructs. The only required properties are the kubernetes `version` and `kubectlLayer`.
|
|
140
140
|
|
|
141
141
|
```ts
|
|
142
|
-
import {
|
|
142
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
143
143
|
|
|
144
144
|
new eks.Cluster(this, 'HelloEKS', {
|
|
145
|
-
version: eks.KubernetesVersion.
|
|
146
|
-
kubectlLayer: new
|
|
145
|
+
version: eks.KubernetesVersion.V1_33,
|
|
146
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
147
147
|
});
|
|
148
148
|
```
|
|
149
149
|
|
|
150
150
|
You can also use `FargateCluster` to provision a cluster that uses only fargate workers.
|
|
151
151
|
|
|
152
152
|
```ts
|
|
153
|
-
import {
|
|
153
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
154
154
|
|
|
155
155
|
new eks.FargateCluster(this, 'HelloEKS', {
|
|
156
|
-
version: eks.KubernetesVersion.
|
|
157
|
-
kubectlLayer: new
|
|
156
|
+
version: eks.KubernetesVersion.V1_33,
|
|
157
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
158
158
|
});
|
|
159
159
|
```
|
|
160
160
|
|
|
@@ -177,13 +177,13 @@ By default, this library will allocate a managed node group with 2 *m5.large* in
|
|
|
177
177
|
At cluster instantiation time, you can customize the number of instances and their type:
|
|
178
178
|
|
|
179
179
|
```ts
|
|
180
|
-
import {
|
|
180
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
181
181
|
|
|
182
182
|
new eks.Cluster(this, 'HelloEKS', {
|
|
183
|
-
version: eks.KubernetesVersion.
|
|
183
|
+
version: eks.KubernetesVersion.V1_33,
|
|
184
184
|
defaultCapacity: 5,
|
|
185
185
|
defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL),
|
|
186
|
-
kubectlLayer: new
|
|
186
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
187
187
|
});
|
|
188
188
|
```
|
|
189
189
|
|
|
@@ -192,12 +192,12 @@ To access the node group that was created on your behalf, you can use `cluster.d
|
|
|
192
192
|
Additional customizations are available post instantiation. To apply them, set the default capacity to 0, and use the `cluster.addNodegroupCapacity` method:
|
|
193
193
|
|
|
194
194
|
```ts
|
|
195
|
-
import {
|
|
195
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
196
196
|
|
|
197
197
|
const cluster = new eks.Cluster(this, 'HelloEKS', {
|
|
198
|
-
version: eks.KubernetesVersion.
|
|
198
|
+
version: eks.KubernetesVersion.V1_33,
|
|
199
199
|
defaultCapacity: 0,
|
|
200
|
-
kubectlLayer: new
|
|
200
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
201
201
|
});
|
|
202
202
|
|
|
203
203
|
cluster.addNodegroupCapacity('custom-node-group', {
|
|
@@ -273,7 +273,7 @@ Node groups are available with IPv6 configured networks. For custom roles assig
|
|
|
273
273
|
> For more details visit [Configuring the Amazon VPC CNI plugin for Kubernetes to use IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role)
|
|
274
274
|
|
|
275
275
|
```ts
|
|
276
|
-
import {
|
|
276
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
277
277
|
|
|
278
278
|
const ipv6Management = new iam.PolicyDocument({
|
|
279
279
|
statements: [new iam.PolicyStatement({
|
|
@@ -299,9 +299,9 @@ const eksClusterNodeGroupRole = new iam.Role(this, 'eksClusterNodeGroupRole', {
|
|
|
299
299
|
});
|
|
300
300
|
|
|
301
301
|
const cluster = new eks.Cluster(this, 'HelloEKS', {
|
|
302
|
-
version: eks.KubernetesVersion.
|
|
302
|
+
version: eks.KubernetesVersion.V1_33,
|
|
303
303
|
defaultCapacity: 0,
|
|
304
|
-
kubectlLayer: new
|
|
304
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
305
305
|
});
|
|
306
306
|
|
|
307
307
|
cluster.addNodegroupCapacity('custom-node-group', {
|
|
@@ -413,12 +413,12 @@ has been changed. As a workaround, you need to add a temporary policy to the clu
|
|
|
413
413
|
successful replacement. Consider this example if you are renaming the cluster from `foo` to `bar`:
|
|
414
414
|
|
|
415
415
|
```ts
|
|
416
|
-
import {
|
|
416
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
417
417
|
|
|
418
418
|
const cluster = new eks.Cluster(this, 'cluster-to-rename', {
|
|
419
419
|
clusterName: 'foo', // rename this to 'bar'
|
|
420
|
-
kubectlLayer: new
|
|
421
|
-
version: eks.KubernetesVersion.
|
|
420
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
421
|
+
version: eks.KubernetesVersion.V1_33,
|
|
422
422
|
});
|
|
423
423
|
|
|
424
424
|
// allow the cluster admin role to delete the cluster 'foo'
|
|
@@ -471,11 +471,11 @@ To create an EKS cluster that **only** uses Fargate capacity, you can use `Farga
|
|
|
471
471
|
The following code defines an Amazon EKS cluster with a default Fargate Profile that matches all pods from the "kube-system" and "default" namespaces. It is also configured to [run CoreDNS on Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns).
|
|
472
472
|
|
|
473
473
|
```ts
|
|
474
|
-
import {
|
|
474
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
475
475
|
|
|
476
476
|
const cluster = new eks.FargateCluster(this, 'MyCluster', {
|
|
477
|
-
version: eks.KubernetesVersion.
|
|
478
|
-
kubectlLayer: new
|
|
477
|
+
version: eks.KubernetesVersion.V1_33,
|
|
478
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
479
479
|
});
|
|
480
480
|
```
|
|
481
481
|
|
|
@@ -551,12 +551,12 @@ To disable bootstrapping altogether (i.e. to fully customize user-data), set `bo
|
|
|
551
551
|
You can also configure the cluster to use an auto-scaling group as the default capacity:
|
|
552
552
|
|
|
553
553
|
```ts
|
|
554
|
-
import {
|
|
554
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
555
555
|
|
|
556
556
|
const cluster = new eks.Cluster(this, 'HelloEKS', {
|
|
557
|
-
version: eks.KubernetesVersion.
|
|
557
|
+
version: eks.KubernetesVersion.V1_33,
|
|
558
558
|
defaultCapacityType: eks.DefaultCapacityType.EC2,
|
|
559
|
-
kubectlLayer: new
|
|
559
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
560
560
|
});
|
|
561
561
|
```
|
|
562
562
|
|
|
@@ -658,12 +658,12 @@ AWS Identity and Access Management (IAM) and native Kubernetes [Role Based Acces
|
|
|
658
658
|
You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) by using the `endpointAccess` property:
|
|
659
659
|
|
|
660
660
|
```ts
|
|
661
|
-
import {
|
|
661
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
662
662
|
|
|
663
663
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
664
|
-
version: eks.KubernetesVersion.
|
|
664
|
+
version: eks.KubernetesVersion.V1_33,
|
|
665
665
|
endpointAccess: eks.EndpointAccess.PRIVATE, // No access outside of your VPC.
|
|
666
|
-
kubectlLayer: new
|
|
666
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
667
667
|
});
|
|
668
668
|
```
|
|
669
669
|
|
|
@@ -683,31 +683,31 @@ From the docs:
|
|
|
683
683
|
To deploy the controller on your EKS cluster, configure the `albController` property:
|
|
684
684
|
|
|
685
685
|
```ts
|
|
686
|
-
import {
|
|
686
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
687
687
|
|
|
688
688
|
new eks.Cluster(this, 'HelloEKS', {
|
|
689
|
-
version: eks.KubernetesVersion.
|
|
689
|
+
version: eks.KubernetesVersion.V1_33,
|
|
690
690
|
albController: {
|
|
691
691
|
version: eks.AlbControllerVersion.V2_8_2,
|
|
692
692
|
},
|
|
693
|
-
kubectlLayer: new
|
|
693
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
694
694
|
});
|
|
695
695
|
```
|
|
696
696
|
|
|
697
697
|
To provide additional Helm chart values supported by `albController` in CDK, use the `additionalHelmChartValues` property. For example, the following code snippet shows how to set the `enableWafV2` flag:
|
|
698
698
|
|
|
699
699
|
```ts
|
|
700
|
-
import {
|
|
700
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
701
701
|
|
|
702
702
|
new eks.Cluster(this, 'HelloEKS', {
|
|
703
|
-
version: eks.KubernetesVersion.
|
|
703
|
+
version: eks.KubernetesVersion.V1_33,
|
|
704
704
|
albController: {
|
|
705
705
|
version: eks.AlbControllerVersion.V2_8_2,
|
|
706
706
|
additionalHelmChartValues: {
|
|
707
707
|
enableWafv2: false
|
|
708
708
|
}
|
|
709
709
|
},
|
|
710
|
-
kubectlLayer: new
|
|
710
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
711
711
|
});
|
|
712
712
|
```
|
|
713
713
|
|
|
@@ -744,15 +744,15 @@ if (cluster.albController) {
|
|
|
744
744
|
You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properties:
|
|
745
745
|
|
|
746
746
|
```ts
|
|
747
|
-
import {
|
|
747
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
748
748
|
|
|
749
749
|
declare const vpc: ec2.Vpc;
|
|
750
750
|
|
|
751
751
|
new eks.Cluster(this, 'HelloEKS', {
|
|
752
|
-
version: eks.KubernetesVersion.
|
|
752
|
+
version: eks.KubernetesVersion.V1_33,
|
|
753
753
|
vpc,
|
|
754
754
|
vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }],
|
|
755
|
-
kubectlLayer: new
|
|
755
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
756
756
|
});
|
|
757
757
|
```
|
|
758
758
|
|
|
@@ -795,11 +795,11 @@ The `ClusterHandler` is a set of Lambda functions (`onEventHandler`, `isComplete
|
|
|
795
795
|
You can configure the environment of the Cluster Handler functions by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
796
796
|
|
|
797
797
|
```ts
|
|
798
|
-
import {
|
|
798
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
799
799
|
|
|
800
800
|
declare const proxyInstanceSecurityGroup: ec2.SecurityGroup;
|
|
801
801
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
802
|
-
version: eks.KubernetesVersion.
|
|
802
|
+
version: eks.KubernetesVersion.V1_33,
|
|
803
803
|
clusterHandlerEnvironment: {
|
|
804
804
|
https_proxy: 'http://proxy.myproxy.com',
|
|
805
805
|
},
|
|
@@ -808,7 +808,7 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
|
808
808
|
* Cluster Handler Lambdas so that it can reach the proxy.
|
|
809
809
|
*/
|
|
810
810
|
clusterHandlerSecurityGroup: proxyInstanceSecurityGroup,
|
|
811
|
-
kubectlLayer: new
|
|
811
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
812
812
|
});
|
|
813
813
|
```
|
|
814
814
|
|
|
@@ -817,7 +817,7 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
|
817
817
|
You can optionally choose to configure your cluster to use IPv6 using the [`ipFamily`](https://docs.aws.amazon.com/eks/latest/APIReference/API_KubernetesNetworkConfigRequest.html#AmazonEKS-Type-KubernetesNetworkConfigRequest-ipFamily) definition for your cluster. Note that this will require the underlying subnets to have an associated IPv6 CIDR.
|
|
818
818
|
|
|
819
819
|
```ts
|
|
820
|
-
import {
|
|
820
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
821
821
|
declare const vpc: ec2.Vpc;
|
|
822
822
|
|
|
823
823
|
function associateSubnetWithV6Cidr(vpc: ec2.Vpc, count: number, subnet: ec2.ISubnet) {
|
|
@@ -843,11 +843,11 @@ for (let subnet of subnets) {
|
|
|
843
843
|
}
|
|
844
844
|
|
|
845
845
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
846
|
-
version: eks.KubernetesVersion.
|
|
846
|
+
version: eks.KubernetesVersion.V1_33,
|
|
847
847
|
vpc: vpc,
|
|
848
848
|
ipFamily: eks.IpFamily.IP_V6,
|
|
849
849
|
vpcSubnets: [{ subnets: vpc.publicSubnets }],
|
|
850
|
-
kubectlLayer: new
|
|
850
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
851
851
|
});
|
|
852
852
|
```
|
|
853
853
|
|
|
@@ -878,14 +878,14 @@ const cluster = eks.Cluster.fromClusterAttributes(this, 'Cluster', {
|
|
|
878
878
|
You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
|
|
879
879
|
|
|
880
880
|
```ts
|
|
881
|
-
import {
|
|
881
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
882
882
|
|
|
883
883
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
884
|
-
version: eks.KubernetesVersion.
|
|
884
|
+
version: eks.KubernetesVersion.V1_33,
|
|
885
885
|
kubectlEnvironment: {
|
|
886
886
|
'http_proxy': 'http://proxy.myproxy.com',
|
|
887
887
|
},
|
|
888
|
-
kubectlLayer: new
|
|
888
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
889
889
|
});
|
|
890
890
|
```
|
|
891
891
|
|
|
@@ -902,11 +902,11 @@ Depending on which version of kubernetes you're targeting, you will need to use
|
|
|
902
902
|
the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
|
|
903
903
|
|
|
904
904
|
```ts
|
|
905
|
-
import {
|
|
905
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
906
906
|
|
|
907
907
|
const cluster = new eks.Cluster(this, 'hello-eks', {
|
|
908
|
-
version: eks.KubernetesVersion.
|
|
909
|
-
kubectlLayer: new
|
|
908
|
+
version: eks.KubernetesVersion.V1_33,
|
|
909
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
910
910
|
});
|
|
911
911
|
```
|
|
912
912
|
|
|
@@ -941,7 +941,7 @@ const cluster1 = new eks.Cluster(this, 'MyCluster', {
|
|
|
941
941
|
kubectlLayer: layer,
|
|
942
942
|
vpc,
|
|
943
943
|
clusterName: 'cluster-name',
|
|
944
|
-
version: eks.KubernetesVersion.
|
|
944
|
+
version: eks.KubernetesVersion.V1_33,
|
|
945
945
|
});
|
|
946
946
|
|
|
947
947
|
// or
|
|
@@ -957,12 +957,12 @@ const cluster2 = eks.Cluster.fromClusterAttributes(this, 'MyCluster', {
|
|
|
957
957
|
By default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function:
|
|
958
958
|
|
|
959
959
|
```ts
|
|
960
|
-
import {
|
|
960
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
961
961
|
|
|
962
962
|
new eks.Cluster(this, 'MyCluster', {
|
|
963
963
|
kubectlMemory: Size.gibibytes(4),
|
|
964
|
-
version: eks.KubernetesVersion.
|
|
965
|
-
kubectlLayer: new
|
|
964
|
+
version: eks.KubernetesVersion.V1_33,
|
|
965
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
966
966
|
});
|
|
967
967
|
|
|
968
968
|
// or
|
|
@@ -999,13 +999,13 @@ cluster.addAutoScalingGroupCapacity('self-ng-arm', {
|
|
|
999
999
|
When you create a cluster, you can specify a `mastersRole`. The `Cluster` construct will associate this role with the `system:masters` [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) group, giving it super-user access to the cluster.
|
|
1000
1000
|
|
|
1001
1001
|
```ts
|
|
1002
|
-
import {
|
|
1002
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1003
1003
|
|
|
1004
1004
|
declare const role: iam.Role;
|
|
1005
1005
|
new eks.Cluster(this, 'HelloEKS', {
|
|
1006
|
-
version: eks.KubernetesVersion.
|
|
1006
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1007
1007
|
mastersRole: role,
|
|
1008
|
-
kubectlLayer: new
|
|
1008
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
1009
1009
|
});
|
|
1010
1010
|
```
|
|
1011
1011
|
|
|
@@ -1051,26 +1051,26 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
|
|
|
1051
1051
|
> This setting can only be specified when the cluster is created and cannot be updated.
|
|
1052
1052
|
|
|
1053
1053
|
```ts
|
|
1054
|
-
import {
|
|
1054
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1055
1055
|
|
|
1056
1056
|
const secretsKey = new kms.Key(this, 'SecretsKey');
|
|
1057
1057
|
const cluster = new eks.Cluster(this, 'MyCluster', {
|
|
1058
1058
|
secretsEncryptionKey: secretsKey,
|
|
1059
|
-
version: eks.KubernetesVersion.
|
|
1060
|
-
kubectlLayer: new
|
|
1059
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1060
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
1061
1061
|
});
|
|
1062
1062
|
```
|
|
1063
1063
|
|
|
1064
1064
|
You can also use a similar configuration for running a cluster built using the FargateCluster construct.
|
|
1065
1065
|
|
|
1066
1066
|
```ts
|
|
1067
|
-
import {
|
|
1067
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1068
1068
|
|
|
1069
1069
|
const secretsKey = new kms.Key(this, 'SecretsKey');
|
|
1070
1070
|
const cluster = new eks.FargateCluster(this, 'MyFargateCluster', {
|
|
1071
1071
|
secretsEncryptionKey: secretsKey,
|
|
1072
|
-
version: eks.KubernetesVersion.
|
|
1073
|
-
kubectlLayer: new
|
|
1072
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1073
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
1074
1074
|
});
|
|
1075
1075
|
```
|
|
1076
1076
|
|
|
@@ -1088,11 +1088,11 @@ When you create an Amazon EKS cluster, you can configure it to leverage the [EKS
|
|
|
1088
1088
|
Once you have identified the on-premises node and pod (optional) CIDRs you will use for your hybrid nodes and the workloads running on them, you can specify them during cluster creation using the `remoteNodeNetworks` and `remotePodNetworks` (optional) properties:
|
|
1089
1089
|
|
|
1090
1090
|
```ts
|
|
1091
|
-
import {
|
|
1091
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1092
1092
|
|
|
1093
1093
|
new eks.Cluster(this, 'Cluster', {
|
|
1094
|
-
version: eks.KubernetesVersion.
|
|
1095
|
-
kubectlLayer: new
|
|
1094
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1095
|
+
kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
|
|
1096
1096
|
remoteNodeNetworks: [
|
|
1097
1097
|
{
|
|
1098
1098
|
cidrs: ['10.0.0.0/16'],
|
|
@@ -1145,7 +1145,7 @@ To access the Kubernetes resources from the console, make sure your viewing prin
|
|
|
1145
1145
|
in the `aws-auth` ConfigMap. Some options to consider:
|
|
1146
1146
|
|
|
1147
1147
|
```ts
|
|
1148
|
-
import {
|
|
1148
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1149
1149
|
declare const cluster: eks.Cluster;
|
|
1150
1150
|
declare const your_current_role: iam.Role;
|
|
1151
1151
|
declare const vpc: ec2.Vpc;
|
|
@@ -1165,7 +1165,7 @@ your_current_role.addToPolicy(new iam.PolicyStatement({
|
|
|
1165
1165
|
|
|
1166
1166
|
```ts
|
|
1167
1167
|
// Option 2: create your custom mastersRole with scoped assumeBy arn as the Cluster prop. Switch to this role from the AWS console.
|
|
1168
|
-
import {
|
|
1168
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1169
1169
|
declare const vpc: ec2.Vpc;
|
|
1170
1170
|
|
|
1171
1171
|
|
|
@@ -1175,8 +1175,8 @@ const mastersRole = new iam.Role(this, 'MastersRole', {
|
|
|
1175
1175
|
|
|
1176
1176
|
const cluster = new eks.Cluster(this, 'EksCluster', {
|
|
1177
1177
|
vpc,
|
|
1178
|
-
version: eks.KubernetesVersion.
|
|
1179
|
-
kubectlLayer: new
|
|
1178
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1179
|
+
kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
|
|
1180
1180
|
mastersRole,
|
|
1181
1181
|
});
|
|
1182
1182
|
|
|
@@ -1220,13 +1220,13 @@ AWS IAM principals from both Amazon EKS access entry APIs and the aws-auth confi
|
|
|
1220
1220
|
To specify the `authenticationMode`:
|
|
1221
1221
|
|
|
1222
1222
|
```ts
|
|
1223
|
-
import {
|
|
1223
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1224
1224
|
declare const vpc: ec2.Vpc;
|
|
1225
1225
|
|
|
1226
1226
|
new eks.Cluster(this, 'Cluster', {
|
|
1227
1227
|
vpc,
|
|
1228
|
-
version: eks.KubernetesVersion.
|
|
1229
|
-
kubectlLayer: new
|
|
1228
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1229
|
+
kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
|
|
1230
1230
|
authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP,
|
|
1231
1231
|
});
|
|
1232
1232
|
```
|
|
@@ -1271,7 +1271,7 @@ eks.AccessPolicy.fromAccessPolicyName('AmazonEKSAdminPolicy', {
|
|
|
1271
1271
|
Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
|
|
1272
1272
|
|
|
1273
1273
|
```ts
|
|
1274
|
-
import {
|
|
1274
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1275
1275
|
declare const vpc: ec2.Vpc;
|
|
1276
1276
|
|
|
1277
1277
|
const clusterAdminRole = new iam.Role(this, 'ClusterAdminRole', {
|
|
@@ -1289,8 +1289,8 @@ const eksAdminViewRole = new iam.Role(this, 'EKSAdminViewRole', {
|
|
|
1289
1289
|
const cluster = new eks.Cluster(this, 'Cluster', {
|
|
1290
1290
|
vpc,
|
|
1291
1291
|
mastersRole: clusterAdminRole,
|
|
1292
|
-
version: eks.KubernetesVersion.
|
|
1293
|
-
kubectlLayer: new
|
|
1292
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1293
|
+
kubectlLayer: new KubectlV33Layer(this, 'KubectlLayer'),
|
|
1294
1294
|
authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP,
|
|
1295
1295
|
});
|
|
1296
1296
|
|
|
@@ -1622,12 +1622,12 @@ Pruning is enabled by default but can be disabled through the `prune` option
|
|
|
1622
1622
|
when a cluster is defined:
|
|
1623
1623
|
|
|
1624
1624
|
```ts
|
|
1625
|
-
import {
|
|
1625
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
1626
1626
|
|
|
1627
1627
|
new eks.Cluster(this, 'MyCluster', {
|
|
1628
|
-
version: eks.KubernetesVersion.
|
|
1628
|
+
version: eks.KubernetesVersion.V1_33,
|
|
1629
1629
|
prune: false,
|
|
1630
|
-
kubectlLayer: new
|
|
1630
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
1631
1631
|
});
|
|
1632
1632
|
```
|
|
1633
1633
|
|
|
@@ -2030,17 +2030,17 @@ You can enable logging for each one separately using the `clusterLogging`
|
|
|
2030
2030
|
property. For example:
|
|
2031
2031
|
|
|
2032
2032
|
```ts
|
|
2033
|
-
import {
|
|
2033
|
+
import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
|
|
2034
2034
|
|
|
2035
2035
|
const cluster = new eks.Cluster(this, 'Cluster', {
|
|
2036
2036
|
// ...
|
|
2037
|
-
version: eks.KubernetesVersion.
|
|
2037
|
+
version: eks.KubernetesVersion.V1_33,
|
|
2038
2038
|
clusterLogging: [
|
|
2039
2039
|
eks.ClusterLoggingTypes.API,
|
|
2040
2040
|
eks.ClusterLoggingTypes.AUTHENTICATOR,
|
|
2041
2041
|
eks.ClusterLoggingTypes.SCHEDULER,
|
|
2042
2042
|
],
|
|
2043
|
-
kubectlLayer: new
|
|
2043
|
+
kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
|
|
2044
2044
|
});
|
|
2045
2045
|
```
|
|
2046
2046
|
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.eks-al2023-nodegroup.ts
CHANGED
|
@@ -27,7 +27,7 @@ class EksClusterStack extends Stack {
|
|
|
27
27
|
vpc: this.vpc,
|
|
28
28
|
mastersRole,
|
|
29
29
|
defaultCapacity: 0,
|
|
30
|
-
...getClusterVersionConfig(this, eks.KubernetesVersion.
|
|
30
|
+
...getClusterVersionConfig(this, eks.KubernetesVersion.V1_33),
|
|
31
31
|
});
|
|
32
32
|
|
|
33
33
|
// create nodegroup with AL2023_X86_64_STANDARD
|
|
@@ -65,4 +65,3 @@ new integ.IntegTest(app, 'aws-cdk-eks-cluster-al2023-nodegroup', {
|
|
|
65
65
|
// Test includes assets that are updated weekly. If not disabled, the upgrade PR will fail.
|
|
66
66
|
diffAssets: false,
|
|
67
67
|
});
|
|
68
|
-
app.synth();
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.fargate-cluster.ts
CHANGED
|
@@ -17,7 +17,7 @@ class EksFargateClusterStack extends Stack {
|
|
|
17
17
|
this.node.setContext(EC2_RESTRICT_DEFAULT_SECURITY_GROUP, false);
|
|
18
18
|
this.vpc = props?.vpc ?? this.createDummyVpc();
|
|
19
19
|
new eks.FargateCluster(this, 'FargateCluster', {
|
|
20
|
-
...getClusterVersionConfig(this, eks.KubernetesVersion.
|
|
20
|
+
...getClusterVersionConfig(this, eks.KubernetesVersion.V1_33),
|
|
21
21
|
prune: false,
|
|
22
22
|
authenticationMode: props?.authMode,
|
|
23
23
|
vpc: this.vpc,
|
|
@@ -50,5 +50,3 @@ new integ.IntegTest(app, 'aws-cdk-eks-fargate-cluster', {
|
|
|
50
50
|
// Test includes assets that are updated weekly. If not disabled, the upgrade PR will fail.
|
|
51
51
|
diffAssets: false,
|
|
52
52
|
});
|
|
53
|
-
|
|
54
|
-
app.synth();
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-eks/integ.helm-chart-logging.ts
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
/// !cdk-integ pragma:disable-update-workflow
|
|
2
|
+
import * as ec2 from 'aws-cdk-lib/aws-ec2';
|
|
3
|
+
import { App, Stack } from 'aws-cdk-lib';
|
|
4
|
+
import * as integ from '@aws-cdk/integ-tests-alpha';
|
|
5
|
+
import { getClusterVersionConfig } from './integ-tests-kubernetes-version';
|
|
6
|
+
import * as eks from 'aws-cdk-lib/aws-eks';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Integration test for improved Helm chart error logging
|
|
10
|
+
*
|
|
11
|
+
* This test creates a minimal EKS cluster and installs a Helm chart
|
|
12
|
+
* to verify the improved error logging functionality.
|
|
13
|
+
*/
|
|
14
|
+
class HelmChartLoggingStack extends Stack {
|
|
15
|
+
constructor(scope: App, id: string) {
|
|
16
|
+
super(scope, id);
|
|
17
|
+
|
|
18
|
+
// Create a minimal VPC with just one NAT gateway
|
|
19
|
+
const vpc = new ec2.Vpc(this, 'Vpc', {
|
|
20
|
+
natGateways: 1,
|
|
21
|
+
restrictDefaultSecurityGroup: false,
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
// Create a minimal EKS cluster
|
|
25
|
+
const cluster = new eks.Cluster(this, 'Cluster', {
|
|
26
|
+
vpc,
|
|
27
|
+
defaultCapacity: 1,
|
|
28
|
+
...getClusterVersionConfig(this),
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
// Install a simple Helm chart from a public repository
|
|
32
|
+
// Using the AWS Load Balancer Controller chart as it's commonly used
|
|
33
|
+
cluster.addHelmChart('aws-load-balancer-controller', {
|
|
34
|
+
chart: 'aws-load-balancer-controller',
|
|
35
|
+
repository: 'https://aws.github.io/eks-charts',
|
|
36
|
+
namespace: 'kube-system',
|
|
37
|
+
version: '1.6.0',
|
|
38
|
+
values: {
|
|
39
|
+
clusterName: cluster.clusterName,
|
|
40
|
+
},
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
const app = new App();
|
|
46
|
+
|
|
47
|
+
const stack = new HelmChartLoggingStack(app, 'aws-cdk-eks-helm-logging-test');
|
|
48
|
+
|
|
49
|
+
new integ.IntegTest(app, 'aws-cdk-eks-helm-logging', {
|
|
50
|
+
testCases: [stack],
|
|
51
|
+
// Test includes assets that are updated weekly. If not disabled, the upgrade PR will fail.
|
|
52
|
+
diffAssets: false,
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
app.synth();
|
|
@@ -91,9 +91,10 @@ const trail = new cloudtrail.Trail(this, 'myCloudTrail', {
|
|
|
91
91
|
});
|
|
92
92
|
```
|
|
93
93
|
|
|
94
|
-
Note that calls to `addToResourcePolicy`
|
|
95
|
-
|
|
96
|
-
|
|
94
|
+
Note that calls to `addToResourcePolicy` method on `myKeyAlias` will be a no-op, `addAlias` and `aliasTargetKey` will fail.
|
|
95
|
+
The `grant*` methods will not modify the key policy, as the imported alias does not have a reference to the underlying KMS Key.
|
|
96
|
+
For the `grant*` methods to modify the principal's IAM policy, the feature flag `@aws-cdk/aws-kms:applyImportedAliasPermissionsToPrincipal`
|
|
97
|
+
must be set to `true`. By default, this flag is `false` and `grant*` calls on an imported alias are a no-op.
|
|
97
98
|
|
|
98
99
|
### Lookup key by alias
|
|
99
100
|
|
cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-kms/integ.alias-from-alias-name.ts
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { App, Stack } from 'aws-cdk-lib';
|
|
2
|
+
import { Alias } from 'aws-cdk-lib/aws-kms';
|
|
3
|
+
import { IntegTest } from '@aws-cdk/integ-tests-alpha';
|
|
4
|
+
import { ServicePrincipal, Role } from 'aws-cdk-lib/aws-iam';
|
|
5
|
+
import * as cxapi from 'aws-cdk-lib/cx-api';
|
|
6
|
+
|
|
7
|
+
const app = new App({
|
|
8
|
+
context: { [cxapi.KMS_APPLY_IMPORTED_ALIAS_PERMISSIONS_TO_PRINCIPAL]: true },
|
|
9
|
+
});
|
|
10
|
+
const stack = new Stack(app, 'aws-cdk-kms');
|
|
11
|
+
const alias = Alias.fromAliasName(stack, 'alias', 'alias/MyKey');
|
|
12
|
+
|
|
13
|
+
const role = new Role(stack, 'Role', {
|
|
14
|
+
assumedBy: new ServicePrincipal('lambda.amazonaws.com'),
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
alias.grantVerifyMac(role);
|
|
18
|
+
alias.grantEncryptDecrypt(role);
|
|
19
|
+
alias.grantSignVerify(role);
|
|
20
|
+
alias.grantGenerateMac(role);
|
|
21
|
+
|
|
22
|
+
new IntegTest(app, 'kms-alias-from-alias-name', {
|
|
23
|
+
testCases: [stack],
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
app.synth();
|
|
@@ -281,6 +281,21 @@ const fn = new lambda.Function(this, 'MyFunctionWithFFTrue', {
|
|
|
281
281
|
cdk.Tags.of(fn).add('env', 'dev'); // the tag is also added to the log group
|
|
282
282
|
```
|
|
283
283
|
|
|
284
|
+
### Log removal policy
|
|
285
|
+
|
|
286
|
+
When using the deprecated `logRetention` property for creating a LogGroup, you can configure log removal policy:
|
|
287
|
+
```ts
|
|
288
|
+
import * as logs from 'aws-cdk-lib/aws-logs';
|
|
289
|
+
|
|
290
|
+
const fn = new lambda.Function(this, 'MyFunctionWithFFTrue', {
|
|
291
|
+
runtime: lambda.Runtime.NODEJS_LATEST,
|
|
292
|
+
handler: 'handler.main',
|
|
293
|
+
code: lambda.Code.fromAsset('lambda'),
|
|
294
|
+
logRetention: logs.RetentionDays.INFINITE,
|
|
295
|
+
logRemovalPolicy: RemovalPolicy.RETAIN,
|
|
296
|
+
});
|
|
297
|
+
```
|
|
298
|
+
|
|
284
299
|
## Resource-based Policies
|
|
285
300
|
|
|
286
301
|
AWS Lambda supports resource-based policies for controlling access to Lambda
|