konokenj.cdk-api-mcp-server 0.40.0__py3-none-any.whl → 0.42.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of konokenj.cdk-api-mcp-server might be problematic. Click here for more details.

Files changed (47) hide show
  1. cdk_api_mcp_server/__about__.py +1 -1
  2. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-bedrock-alpha/README.md +540 -0
  3. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-eks-v2-alpha/README.md +44 -46
  4. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-lambda-python-alpha/README.md +6 -6
  5. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-s3tables-alpha/README.md +28 -1
  6. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/README.md/README.md +364 -16
  7. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/README.md +144 -0
  8. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.api-dualstack.ts +3 -4
  9. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.api.ts +2 -4
  10. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.stage.ts +7 -20
  11. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.usage-plan.ts +80 -0
  12. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-authorizers/integ.iam.ts +34 -38
  13. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2-integrations/integ.sqs.ts +58 -71
  14. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-codepipeline-actions/integ.pipeline-elastic-beanstalk-deploy.ts +1 -1
  15. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-cognito/README.md +11 -0
  16. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-cognito/integ.user-pool-client-explicit-props.ts +1 -0
  17. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/README.md +38 -13
  18. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.dynamodb-v2.cci.ts +49 -0
  19. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.dynamodb.cci.ts +27 -0
  20. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.dynamodb.contirubtor-insights-for-gsi.ts +6 -2
  21. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.table-v2-global.ts +9 -3
  22. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ecs/README.md +3 -0
  23. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ecs/integ.ebs-volume-initialization-rate.ts +80 -0
  24. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ecs-patterns/README.md +2 -0
  25. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ecs-patterns/integ.alb-fargate-service-smart-defaults.ts +143 -0
  26. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-events/README.md +25 -3
  27. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-events/integ.archive-customer-managed-key.ts +23 -0
  28. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/README.md +2 -2
  29. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-route53/integ.delete-existing-record-set.ts +0 -1
  30. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-s3-deployment/README.md +18 -0
  31. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-s3-deployment/integ.bucket-deployment-cross-stack-ssm-source.ts +91 -0
  32. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-signer/integ.signing-profile.ts +5 -0
  33. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-sns/README.md +2 -0
  34. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-sns-subscriptions/integ.sns-sqs-subscription-filter.ts +75 -0
  35. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-sns-subscriptions/integ.sns-sqs.ts +21 -40
  36. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/README.md +9 -3
  37. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.invoke-jsonata.ts +87 -80
  38. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.invoke.ts +87 -69
  39. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-stepfunctions-tasks/integ.start-job-run.ts +102 -104
  40. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/core/README.md +2 -1893
  41. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/cx-api/FEATURE_FLAGS.md +52 -0
  42. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/cx-api/README.md +24 -1
  43. {konokenj_cdk_api_mcp_server-0.40.0.dist-info → konokenj_cdk_api_mcp_server-0.42.0.dist-info}/METADATA +2 -2
  44. {konokenj_cdk_api_mcp_server-0.40.0.dist-info → konokenj_cdk_api_mcp_server-0.42.0.dist-info}/RECORD +47 -39
  45. {konokenj_cdk_api_mcp_server-0.40.0.dist-info → konokenj_cdk_api_mcp_server-0.42.0.dist-info}/WHEEL +0 -0
  46. {konokenj_cdk_api_mcp_server-0.40.0.dist-info → konokenj_cdk_api_mcp_server-0.42.0.dist-info}/entry_points.txt +0 -0
  47. {konokenj_cdk_api_mcp_server-0.40.0.dist-info → konokenj_cdk_api_mcp_server-0.42.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -33,7 +33,7 @@ Here is the minimal example of defining an AWS EKS cluster
33
33
 
34
34
  ```ts
35
35
  const cluster = new eks.Cluster(this, 'hello-eks', {
36
- version: eks.KubernetesVersion.V1_32,
36
+ version: eks.KubernetesVersion.V1_33,
37
37
  });
38
38
  ```
39
39
 
@@ -73,7 +73,7 @@ Creating a new cluster is done using the `Cluster` constructs. The only required
73
73
 
74
74
  ```ts
75
75
  new eks.Cluster(this, 'HelloEKS', {
76
- version: eks.KubernetesVersion.V1_32,
76
+ version: eks.KubernetesVersion.V1_33,
77
77
  });
78
78
  ```
79
79
 
@@ -81,7 +81,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate
81
81
 
82
82
  ```ts
83
83
  new eks.FargateCluster(this, 'HelloEKS', {
84
- version: eks.KubernetesVersion.V1_32,
84
+ version: eks.KubernetesVersion.V1_33,
85
85
  });
86
86
  ```
87
87
 
@@ -90,21 +90,21 @@ be created by default. It will only be deployed when `kubectlProviderOptions`
90
90
  property is used.**
91
91
 
92
92
  ```ts
93
- import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32';
93
+ import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
94
94
 
95
95
  new eks.Cluster(this, 'hello-eks', {
96
- version: eks.KubernetesVersion.V1_32,
96
+ version: eks.KubernetesVersion.V1_33,
97
97
  kubectlProviderOptions: {
98
- kubectlLayer: new KubectlV32Layer(this, 'kubectl'),
98
+ kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
99
99
  }
100
100
  });
101
101
  ```
102
102
 
103
- ## EKS Auto Mode
103
+ ### EKS Auto Mode
104
104
 
105
105
  [Amazon EKS Auto Mode](https://aws.amazon.com/eks/auto-mode/) extends AWS management of Kubernetes clusters beyond the cluster itself, allowing AWS to set up and manage the infrastructure that enables the smooth operation of your workloads.
106
106
 
107
- ### Using Auto Mode
107
+ #### Using Auto Mode
108
108
 
109
109
  While `aws-eks` uses `DefaultCapacityType.NODEGROUP` by default, `aws-eks-v2` uses `DefaultCapacityType.AUTOMODE` as the default capacity type.
110
110
 
@@ -113,7 +113,7 @@ Auto Mode is enabled by default when creating a new cluster without specifying a
113
113
  ```ts
114
114
  // Create EKS cluster with Auto Mode implicitly enabled
115
115
  const cluster = new eks.Cluster(this, 'EksAutoCluster', {
116
- version: eks.KubernetesVersion.V1_32,
116
+ version: eks.KubernetesVersion.V1_33,
117
117
  });
118
118
  ```
119
119
 
@@ -122,12 +122,12 @@ You can also explicitly enable Auto Mode using `defaultCapacityType`:
122
122
  ```ts
123
123
  // Create EKS cluster with Auto Mode explicitly enabled
124
124
  const cluster = new eks.Cluster(this, 'EksAutoCluster', {
125
- version: eks.KubernetesVersion.V1_32,
125
+ version: eks.KubernetesVersion.V1_33,
126
126
  defaultCapacityType: eks.DefaultCapacityType.AUTOMODE,
127
127
  });
128
128
  ```
129
129
 
130
- ### Node Pools
130
+ #### Node Pools
131
131
 
132
132
  When Auto Mode is enabled, the cluster comes with two default node pools:
133
133
 
@@ -138,7 +138,7 @@ These node pools are managed automatically by EKS. You can configure which node
138
138
 
139
139
  ```ts
140
140
  const cluster = new eks.Cluster(this, 'EksAutoCluster', {
141
- version: eks.KubernetesVersion.V1_32,
141
+ version: eks.KubernetesVersion.V1_33,
142
142
  defaultCapacityType: eks.DefaultCapacityType.AUTOMODE,
143
143
  compute: {
144
144
  nodePools: ['system', 'general-purpose'],
@@ -148,13 +148,13 @@ const cluster = new eks.Cluster(this, 'EksAutoCluster', {
148
148
 
149
149
  For more information, see [Create a Node Pool for EKS Auto Mode](https://docs.aws.amazon.com/eks/latest/userguide/create-node-pool.html).
150
150
 
151
- ### Disabling Default Node Pools
151
+ #### Disabling Default Node Pools
152
152
 
153
153
  You can disable the default node pools entirely by setting an empty array for `nodePools`. This is useful when you want to use Auto Mode features but manage your compute resources separately:
154
154
 
155
155
  ```ts
156
156
  const cluster = new eks.Cluster(this, 'EksAutoCluster', {
157
- version: eks.KubernetesVersion.V1_32,
157
+ version: eks.KubernetesVersion.V1_33,
158
158
  defaultCapacityType: eks.DefaultCapacityType.AUTOMODE,
159
159
  compute: {
160
160
  nodePools: [], // Disable default node pools
@@ -171,7 +171,7 @@ If you prefer to manage your own node groups instead of using Auto Mode, you can
171
171
  ```ts
172
172
  // Create EKS cluster with traditional managed node group
173
173
  const cluster = new eks.Cluster(this, 'EksCluster', {
174
- version: eks.KubernetesVersion.V1_32,
174
+ version: eks.KubernetesVersion.V1_33,
175
175
  defaultCapacityType: eks.DefaultCapacityType.NODEGROUP,
176
176
  defaultCapacity: 3, // Number of instances
177
177
  defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.LARGE),
@@ -182,7 +182,7 @@ You can also create a cluster with no initial capacity and add node groups later
182
182
 
183
183
  ```ts
184
184
  const cluster = new eks.Cluster(this, 'EksCluster', {
185
- version: eks.KubernetesVersion.V1_32,
185
+ version: eks.KubernetesVersion.V1_33,
186
186
  defaultCapacityType: eks.DefaultCapacityType.NODEGROUP,
187
187
  defaultCapacity: 0,
188
188
  });
@@ -203,7 +203,7 @@ You can combine Auto Mode with traditional node groups for specific workload req
203
203
 
204
204
  ```ts
205
205
  const cluster = new eks.Cluster(this, 'Cluster', {
206
- version: eks.KubernetesVersion.V1_32,
206
+ version: eks.KubernetesVersion.V1_33,
207
207
  defaultCapacityType: eks.DefaultCapacityType.AUTOMODE,
208
208
  compute: {
209
209
  nodePools: ['system', 'general-purpose'],
@@ -243,7 +243,7 @@ By default, when using `DefaultCapacityType.NODEGROUP`, this library will alloca
243
243
 
244
244
  ```ts
245
245
  new eks.Cluster(this, 'HelloEKS', {
246
- version: eks.KubernetesVersion.V1_32,
246
+ version: eks.KubernetesVersion.V1_33,
247
247
  defaultCapacityType: eks.DefaultCapacityType.NODEGROUP,
248
248
  });
249
249
  ```
@@ -252,7 +252,7 @@ At cluster instantiation time, you can customize the number of instances and the
252
252
 
253
253
  ```ts
254
254
  new eks.Cluster(this, 'HelloEKS', {
255
- version: eks.KubernetesVersion.V1_32,
255
+ version: eks.KubernetesVersion.V1_33,
256
256
  defaultCapacityType: eks.DefaultCapacityType.NODEGROUP,
257
257
  defaultCapacity: 5,
258
258
  defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL),
@@ -265,7 +265,7 @@ Additional customizations are available post instantiation. To apply them, set t
265
265
 
266
266
  ```ts
267
267
  const cluster = new eks.Cluster(this, 'HelloEKS', {
268
- version: eks.KubernetesVersion.V1_32,
268
+ version: eks.KubernetesVersion.V1_33,
269
269
  defaultCapacityType: eks.DefaultCapacityType.NODEGROUP,
270
270
  defaultCapacity: 0,
271
271
  });
@@ -316,7 +316,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile
316
316
 
317
317
  ```ts
318
318
  const cluster = new eks.FargateCluster(this, 'MyCluster', {
319
- version: eks.KubernetesVersion.V1_32,
319
+ version: eks.KubernetesVersion.V1_33,
320
320
  });
321
321
  ```
322
322
 
@@ -335,7 +335,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/
335
335
 
336
336
  ```ts
337
337
  const cluster = new eks.Cluster(this, 'hello-eks', {
338
- version: eks.KubernetesVersion.V1_32,
338
+ version: eks.KubernetesVersion.V1_33,
339
339
  endpointAccess: eks.EndpointAccess.PRIVATE, // No access outside of your VPC.
340
340
  });
341
341
  ```
@@ -357,7 +357,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop
357
357
 
358
358
  ```ts
359
359
  new eks.Cluster(this, 'HelloEKS', {
360
- version: eks.KubernetesVersion.V1_32,
360
+ version: eks.KubernetesVersion.V1_33,
361
361
  albController: {
362
362
  version: eks.AlbControllerVersion.V2_8_2,
363
363
  },
@@ -398,7 +398,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti
398
398
  declare const vpc: ec2.Vpc;
399
399
 
400
400
  new eks.Cluster(this, 'HelloEKS', {
401
- version: eks.KubernetesVersion.V1_32,
401
+ version: eks.KubernetesVersion.V1_33,
402
402
  vpc,
403
403
  vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }],
404
404
  });
@@ -441,12 +441,12 @@ To create a `Kubectl Handler`, use `kubectlProviderOptions` when creating the cl
441
441
  `kubectlLayer` is the only required property in `kubectlProviderOptions`.
442
442
 
443
443
  ```ts
444
- import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32';
444
+ import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
445
445
 
446
446
  new eks.Cluster(this, 'hello-eks', {
447
- version: eks.KubernetesVersion.V1_32,
447
+ version: eks.KubernetesVersion.V1_33,
448
448
  kubectlProviderOptions: {
449
- kubectlLayer: new KubectlV32Layer(this, 'kubectl'),
449
+ kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
450
450
  }
451
451
  });
452
452
  ```
@@ -456,8 +456,6 @@ new eks.Cluster(this, 'hello-eks', {
456
456
  If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster:
457
457
 
458
458
  ```ts
459
- import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32';
460
-
461
459
  const handlerRole = iam.Role.fromRoleArn(this, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role');
462
460
  // get the serivceToken from the custom resource provider
463
461
  const functionArn = lambda.Function.fromFunctionName(this, 'ProviderOnEventFunc', 'ProviderframeworkonEvent-XXX').functionArn;
@@ -477,12 +475,12 @@ const cluster = eks.Cluster.fromClusterAttributes(this, 'Cluster', {
477
475
  You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
478
476
 
479
477
  ```ts
480
- import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32';
478
+ import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
481
479
 
482
480
  const cluster = new eks.Cluster(this, 'hello-eks', {
483
- version: eks.KubernetesVersion.V1_32,
481
+ version: eks.KubernetesVersion.V1_33,
484
482
  kubectlProviderOptions: {
485
- kubectlLayer: new KubectlV32Layer(this, 'kubectl'),
483
+ kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
486
484
  environment: {
487
485
  'http_proxy': 'http://proxy.myproxy.com',
488
486
  },
@@ -503,12 +501,12 @@ Depending on which version of kubernetes you're targeting, you will need to use
503
501
  the `@aws-cdk/lambda-layer-kubectl-vXY` packages.
504
502
 
505
503
  ```ts
506
- import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32';
504
+ import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
507
505
 
508
506
  const cluster = new eks.Cluster(this, 'hello-eks', {
509
- version: eks.KubernetesVersion.V1_32,
507
+ version: eks.KubernetesVersion.V1_33,
510
508
  kubectlProviderOptions: {
511
- kubectlLayer: new KubectlV32Layer(this, 'kubectl'),
509
+ kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
512
510
  },
513
511
  });
514
512
  ```
@@ -518,14 +516,14 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
518
516
  By default, the kubectl provider is configured with 1024MiB of memory. You can use the `memory` option to specify the memory size for the AWS Lambda function:
519
517
 
520
518
  ```ts
521
- import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32';
519
+ import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
522
520
 
523
521
  new eks.Cluster(this, 'MyCluster', {
524
522
  kubectlProviderOptions: {
525
- kubectlLayer: new KubectlV32Layer(this, 'kubectl'),
523
+ kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
526
524
  memory: Size.gibibytes(4),
527
525
  },
528
- version: eks.KubernetesVersion.V1_32,
526
+ version: eks.KubernetesVersion.V1_33,
529
527
  });
530
528
  ```
531
529
 
@@ -556,7 +554,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr
556
554
  ```ts
557
555
  declare const role: iam.Role;
558
556
  new eks.Cluster(this, 'HelloEKS', {
559
- version: eks.KubernetesVersion.V1_32,
557
+ version: eks.KubernetesVersion.V1_33,
560
558
  mastersRole: role,
561
559
  });
562
560
  ```
@@ -577,7 +575,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u
577
575
  const secretsKey = new kms.Key(this, 'SecretsKey');
578
576
  const cluster = new eks.Cluster(this, 'MyCluster', {
579
577
  secretsEncryptionKey: secretsKey,
580
- version: eks.KubernetesVersion.V1_32,
578
+ version: eks.KubernetesVersion.V1_33,
581
579
  });
582
580
  ```
583
581
 
@@ -587,7 +585,7 @@ You can also use a similar configuration for running a cluster built using the F
587
585
  const secretsKey = new kms.Key(this, 'SecretsKey');
588
586
  const cluster = new eks.FargateCluster(this, 'MyFargateCluster', {
589
587
  secretsEncryptionKey: secretsKey,
590
- version: eks.KubernetesVersion.V1_32,
588
+ version: eks.KubernetesVersion.V1_33,
591
589
  });
592
590
  ```
593
591
 
@@ -628,7 +626,7 @@ eks.AccessPolicy.fromAccessPolicyName('AmazonEKSAdminPolicy', {
628
626
  Use `grantAccess()` to grant the AccessPolicy to an IAM principal:
629
627
 
630
628
  ```ts
631
- import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32';
629
+ import { KubectlV33Layer } from '@aws-cdk/lambda-layer-kubectl-v33';
632
630
  declare const vpc: ec2.Vpc;
633
631
 
634
632
  const clusterAdminRole = new iam.Role(this, 'ClusterAdminRole', {
@@ -642,9 +640,9 @@ const eksAdminRole = new iam.Role(this, 'EKSAdminRole', {
642
640
  const cluster = new eks.Cluster(this, 'Cluster', {
643
641
  vpc,
644
642
  mastersRole: clusterAdminRole,
645
- version: eks.KubernetesVersion.V1_32,
643
+ version: eks.KubernetesVersion.V1_33,
646
644
  kubectlProviderOptions: {
647
- kubectlLayer: new KubectlV32Layer(this, 'kubectl'),
645
+ kubectlLayer: new KubectlV33Layer(this, 'kubectl'),
648
646
  memory: Size.gibibytes(4),
649
647
  },
650
648
  });
@@ -829,7 +827,7 @@ when a cluster is defined:
829
827
 
830
828
  ```ts
831
829
  new eks.Cluster(this, 'MyCluster', {
832
- version: eks.KubernetesVersion.V1_32,
830
+ version: eks.KubernetesVersion.V1_33,
833
831
  prune: false,
834
832
  });
835
833
  ```
@@ -1145,7 +1143,7 @@ property. For example:
1145
1143
  ```ts
1146
1144
  const cluster = new eks.Cluster(this, 'Cluster', {
1147
1145
  // ...
1148
- version: eks.KubernetesVersion.V1_32,
1146
+ version: eks.KubernetesVersion.V1_33,
1149
1147
  clusterLogging: [
1150
1148
  eks.ClusterLoggingTypes.API,
1151
1149
  eks.ClusterLoggingTypes.AUTHENTICATOR,
@@ -210,12 +210,12 @@ new python.PythonFunction(this, 'function', {
210
210
  entry,
211
211
  runtime: Runtime.PYTHON_3_8,
212
212
  bundling: {
213
- network: 'host',
214
- securityOpt: 'no-new-privileges',
215
- user: 'user:group',
216
- volumesFrom: ['777f7dc92da7'],
217
- volumes: [{ hostPath: '/host-path', containerPath: '/container-path' }],
218
- },
213
+ network: 'host',
214
+ securityOpt: 'no-new-privileges',
215
+ user: 'user:group',
216
+ volumesFrom: ['777f7dc92da7'],
217
+ volumes: [{ hostPath: '/host-path', containerPath: '/container-path' }],
218
+ },
219
219
  });
220
220
  ```
221
221
 
@@ -159,9 +159,36 @@ const encryptedBucketAuto = new TableBucket(scope, 'EncryptedTableBucketAuto', {
159
159
  });
160
160
  ```
161
161
 
162
+ ### Controlling Table Permissions
163
+
164
+ ```ts
165
+ // Grant the principal read permissions to the table
166
+ const accountId = '123456789012'
167
+ table.grantRead(new iam.AccountPrincipal(accountId));
168
+
169
+ // Grant the role write permissions to the table
170
+ const role = new iam.Role(stack, 'MyRole', { assumedBy: new iam.ServicePrincipal('sample') });
171
+ table.grantWrite(role);
172
+
173
+ // Grant the user read and write permissions to the table
174
+ table.grantReadWrite(new iam.User(stack, 'MyUser'));
175
+
176
+ // Grant an account permissions to the table
177
+ table.grantReadWrite(new iam.AccountPrincipal(accountId));
178
+
179
+ // Add custom resource policy statements
180
+ const permissions = new iam.PolicyStatement({
181
+ effect: iam.Effect.ALLOW,
182
+ actions: ['s3tables:*'],
183
+ principals: [ new iam.ServicePrincipal('example.aws.internal') ],
184
+ resources: ['*']
185
+ });
186
+
187
+ table.addToResourcePolicy(permissions);
188
+ ```
189
+
162
190
  ## Coming Soon
163
191
 
164
192
  L2 Construct support for:
165
193
 
166
- - Table Policy
167
194
  - KMS encryption support for Tables