@aws-solutions-constructs/aws-eventbridge-lambda 2.0.0-rc.2 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +7 -1047
- package/lib/index.js +1 -1
- package/package.json +5 -5
- package/test/eventbridge-lambda.test.js +4 -4
- package/test/integ.eventbridge-existing-eventbus.expected.json +1 -1
- package/test/integ.eventbridge-existing-eventbus.js +2 -2
- package/test/integ.eventbridge-new-eventbus.expected.json +3 -3
- package/test/integ.eventbridge-new-eventbus.js +2 -2
package/.jsii
CHANGED
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
"url": "https://aws.amazon.com"
|
|
9
9
|
},
|
|
10
10
|
"dependencies": {
|
|
11
|
-
"@aws-solutions-constructs/core": "2.0.0
|
|
11
|
+
"@aws-solutions-constructs/core": "2.0.0",
|
|
12
12
|
"aws-cdk-lib": "^2.0.0-rc.23",
|
|
13
13
|
"constructs": "^10.0.0"
|
|
14
14
|
},
|
|
@@ -40,10 +40,6 @@
|
|
|
40
40
|
"aws-cdk-lib": {
|
|
41
41
|
"submodules": {
|
|
42
42
|
"aws-cdk-lib.alexa_ask": {
|
|
43
|
-
"locationInModule": {
|
|
44
|
-
"filename": "lib/index.ts",
|
|
45
|
-
"line": 1
|
|
46
|
-
},
|
|
47
43
|
"targets": {
|
|
48
44
|
"dotnet": {
|
|
49
45
|
"namespace": "Amazon.CDK.Alexa.Ask"
|
|
@@ -57,13 +53,6 @@
|
|
|
57
53
|
}
|
|
58
54
|
},
|
|
59
55
|
"aws-cdk-lib.assets": {
|
|
60
|
-
"locationInModule": {
|
|
61
|
-
"filename": "lib/index.ts",
|
|
62
|
-
"line": 2
|
|
63
|
-
},
|
|
64
|
-
"readme": {
|
|
65
|
-
"markdown": "# AWS CDK Assets\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n> This API may emit warnings. Backward compatibility is not guaranteed.\n\n---\n\n<!--END STABILITY BANNER-->\n\nAll types moved to @aws-cdk/core.\n"
|
|
66
|
-
},
|
|
67
56
|
"targets": {
|
|
68
57
|
"dotnet": {
|
|
69
58
|
"namespace": "Amazon.CDK.Assets"
|
|
@@ -77,10 +66,6 @@
|
|
|
77
66
|
}
|
|
78
67
|
},
|
|
79
68
|
"aws-cdk-lib.aws_accessanalyzer": {
|
|
80
|
-
"locationInModule": {
|
|
81
|
-
"filename": "lib/index.ts",
|
|
82
|
-
"line": 3
|
|
83
|
-
},
|
|
84
69
|
"targets": {
|
|
85
70
|
"dotnet": {
|
|
86
71
|
"namespace": "Amazon.CDK.AWS.AccessAnalyzer"
|
|
@@ -94,13 +79,6 @@
|
|
|
94
79
|
}
|
|
95
80
|
},
|
|
96
81
|
"aws-cdk-lib.aws_acmpca": {
|
|
97
|
-
"locationInModule": {
|
|
98
|
-
"filename": "lib/index.ts",
|
|
99
|
-
"line": 4
|
|
100
|
-
},
|
|
101
|
-
"readme": {
|
|
102
|
-
"markdown": "# AWS::ACMPCA Construct Library\n\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n```ts\nimport { aws_acmpca as acmpca } from 'aws-cdk-lib';\n```\n\n## Certificate Authority\n\nThis package contains a `CertificateAuthority` class.\nAt the moment, you cannot create new Authorities using it,\nbut you can import existing ones using the `fromCertificateAuthorityArn` static method:\n\n```ts\nconst certificateAuthority = acmpca.CertificateAuthority.fromCertificateAuthorityArn(this, 'CA',\n 'arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/023077d8-2bfa-4eb0-8f22-05c96deade77');\n```\n\n## Low-level `Cfn*` classes\n\nYou can always use the low-level classes\n(starting with `Cfn*`) to create resources like the Certificate Authority:\n\n```ts\nconst cfnCertificateAuthority = new acmpca.CfnCertificateAuthority(this, 'CA', {\n type: 'ROOT',\n keyAlgorithm: 'RSA_2048',\n signingAlgorithm: 'SHA256WITHRSA',\n subject: {\n country: 'US',\n organization: 'string',\n organizationalUnit: 'string',\n distinguishedNameQualifier: 'string',\n state: 'string',\n commonName: '123',\n serialNumber: 'string',\n locality: 'string',\n title: 'string',\n surname: 'string',\n givenName: 'string',\n initials: 'DG',\n pseudonym: 'string',\n generationQualifier: 'DBG',\n },\n});\n```\n\nIf you need to pass the higher-level `ICertificateAuthority` somewhere,\nyou can get it from the lower-level `CfnCertificateAuthority` using the same `fromCertificateAuthorityArn` method:\n\n```ts\nconst certificateAuthority = acmpca.CertificateAuthority.fromCertificateAuthorityArn(this, 'CertificateAuthority',\n cfnCertificateAuthority.attrArn);\n```\n"
|
|
103
|
-
},
|
|
104
82
|
"targets": {
|
|
105
83
|
"dotnet": {
|
|
106
84
|
"namespace": "Amazon.CDK.AWS.ACMPCA"
|
|
@@ -114,10 +92,6 @@
|
|
|
114
92
|
}
|
|
115
93
|
},
|
|
116
94
|
"aws-cdk-lib.aws_amazonmq": {
|
|
117
|
-
"locationInModule": {
|
|
118
|
-
"filename": "lib/index.ts",
|
|
119
|
-
"line": 5
|
|
120
|
-
},
|
|
121
95
|
"targets": {
|
|
122
96
|
"dotnet": {
|
|
123
97
|
"namespace": "Amazon.CDK.AWS.AmazonMQ"
|
|
@@ -131,10 +105,6 @@
|
|
|
131
105
|
}
|
|
132
106
|
},
|
|
133
107
|
"aws-cdk-lib.aws_amplify": {
|
|
134
|
-
"locationInModule": {
|
|
135
|
-
"filename": "lib/index.ts",
|
|
136
|
-
"line": 6
|
|
137
|
-
},
|
|
138
108
|
"targets": {
|
|
139
109
|
"dotnet": {
|
|
140
110
|
"namespace": "Amazon.CDK.AWS.Amplify"
|
|
@@ -148,13 +118,6 @@
|
|
|
148
118
|
}
|
|
149
119
|
},
|
|
150
120
|
"aws-cdk-lib.aws_apigateway": {
|
|
151
|
-
"locationInModule": {
|
|
152
|
-
"filename": "lib/index.ts",
|
|
153
|
-
"line": 7
|
|
154
|
-
},
|
|
155
|
-
"readme": {
|
|
156
|
-
"markdown": "# Amazon API Gateway Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\nAmazon API Gateway is a fully managed service that makes it easy for developers\nto publish, maintain, monitor, and secure APIs at any scale. Create an API to\naccess data, business logic, or functionality from your back-end services, such\nas applications running on Amazon Elastic Compute Cloud (Amazon EC2), code\nrunning on AWS Lambda, or any web application.\n\n## Table of Contents\n\n- [Defining APIs](#defining-apis)\n - [Breaking up Methods and Resources across Stacks](#breaking-up-methods-and-resources-across-stacks)\n- [AWS Lambda-backed APIs](#aws-lambda-backed-apis)\n- [Integration Targets](#integration-targets)\n- [Usage Plan & API Keys](#usage-plan--api-keys)\n- [Working with models](#working-with-models)\n- [Default Integration and Method Options](#default-integration-and-method-options)\n- [Proxy Routes](#proxy-routes)\n- [Authorizers](#authorizers)\n - [IAM-based authorizer](#iam-based-authorizer)\n - [Lambda-based token authorizer](#lambda-based-token-authorizer)\n - [Lambda-based request authorizer](#lambda-based-request-authorizer)\n - [Cognito User Pools authorizer](#cognito-user-pools-authorizer)\n- [Mutual TLS](#mutal-tls-mtls)\n- [Deployments](#deployments)\n - [Deep dive: Invalidation of deployments](#deep-dive-invalidation-of-deployments)\n- [Custom Domains](#custom-domains)\n- [Access Logging](#access-logging)\n- [Cross Origin Resource Sharing (CORS)](#cross-origin-resource-sharing-cors)\n- [Endpoint Configuration](#endpoint-configuration)\n- [Private Integrations](#private-integrations)\n- [Gateway Response](#gateway-response)\n- [OpenAPI Definition](#openapi-definition)\n - [Endpoint configuration](#endpoint-configuration)\n- [Metrics](#metrics)\n- [APIGateway v2](#apigateway-v2)\n\n## Defining APIs\n\nAPIs are defined as a hierarchy of resources and methods. `addResource` and\n`addMethod` can be used to build this hierarchy. The root resource is\n`api.root`.\n\nFor example, the following code defines an API that includes the following HTTP\nendpoints: `ANY /`, `GET /books`, `POST /books`, `GET /books/{book_id}`, `DELETE /books/{book_id}`.\n\n```ts\nconst api = new apigateway.RestApi(this, 'books-api');\n\napi.root.addMethod('ANY');\n\nconst books = api.root.addResource('books');\nbooks.addMethod('GET');\nbooks.addMethod('POST');\n\nconst book = books.addResource('{book_id}');\nbook.addMethod('GET');\nbook.addMethod('DELETE');\n```\n\n## AWS Lambda-backed APIs\n\nA very common practice is to use Amazon API Gateway with AWS Lambda as the\nbackend integration. The `LambdaRestApi` construct makes it easy:\n\nThe following code defines a REST API that routes all requests to the\nspecified AWS Lambda function:\n\n```ts\nconst backend = new lambda.Function(...);\nnew apigateway.LambdaRestApi(this, 'myapi', {\n handler: backend,\n});\n```\n\nYou can also supply `proxy: false`, in which case you will have to explicitly\ndefine the API model:\n\n```ts\nconst backend = new lambda.Function(...);\nconst api = new apigateway.LambdaRestApi(this, 'myapi', {\n handler: backend,\n proxy: false\n});\n\nconst items = api.root.addResource('items');\nitems.addMethod('GET'); // GET /items\nitems.addMethod('POST'); // POST /items\n\nconst item = items.addResource('{item}');\nitem.addMethod('GET'); // GET /items/{item}\n\n// the default integration for methods is \"handler\", but one can\n// customize this behavior per method or even a sub path.\nitem.addMethod('DELETE', new apigateway.HttpIntegration('http://amazon.com'));\n```\n\n### Breaking up Methods and Resources across Stacks\n\nIt is fairly common for REST APIs with a large number of Resources and Methods to hit the [CloudFormation\nlimit](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html) of 500 resources per\nstack.\n\nTo help with this, Resources and Methods for the same REST API can be re-organized across multiple stacks. A common\nway to do this is to have a stack per Resource or groups of Resources, but this is not the only possible way.\nThe following example uses sets up two Resources '/pets' and '/books' in separate stacks using nested stacks:\n\n[Resources grouped into nested stacks](test/integ.restapi-import.lit.ts)\n\n## Integration Targets\n\nMethods are associated with backend integrations, which are invoked when this\nmethod is called. API Gateway supports the following integrations:\n\n- `MockIntegration` - can be used to test APIs. This is the default\n integration if one is not specified.\n- `LambdaIntegration` - can be used to invoke an AWS Lambda function.\n- `AwsIntegration` - can be used to invoke arbitrary AWS service APIs.\n- `HttpIntegration` - can be used to invoke HTTP endpoints.\n\nThe following example shows how to integrate the `GET /book/{book_id}` method to\nan AWS Lambda function:\n\n```ts\nconst getBookHandler = new lambda.Function(...);\nconst getBookIntegration = new apigateway.LambdaIntegration(getBookHandler);\nbook.addMethod('GET', getBookIntegration);\n```\n\nIntegration options can be optionally be specified:\n\n```ts\nconst getBookIntegration = new apigateway.LambdaIntegration(getBookHandler, {\n contentHandling: apigateway.ContentHandling.CONVERT_TO_TEXT, // convert to base64\n credentialsPassthrough: true, // use caller identity to invoke the function\n});\n```\n\nMethod options can optionally be specified when adding methods:\n\n```ts\nbook.addMethod('GET', getBookIntegration, {\n authorizationType: apigateway.AuthorizationType.IAM,\n apiKeyRequired: true\n});\n```\n\nIt is possible to also integrate with AWS services in a different region. The following code integrates with Amazon SQS in the\n`eu-west-1` region.\n\n```ts\nconst getMessageIntegration = new apigateway.AwsIntegration({\n service: 'sqs', \n path: 'queueName', \n region: 'eu-west-1' \n});\n```\n\n## Usage Plan & API Keys\n\nA usage plan specifies who can access one or more deployed API stages and methods, and the rate at which they can be\naccessed. The plan uses API keys to identify API clients and meters access to the associated API stages for each key.\nUsage plans also allow configuring throttling limits and quota limits that are enforced on individual client API keys. \n\nThe following example shows how to create and asscociate a usage plan and an API key:\n\n```ts\nconst api = new apigateway.RestApi(this, 'hello-api');\n\nconst v1 = api.root.addResource('v1');\nconst echo = v1.addResource('echo');\nconst echoMethod = echo.addMethod('GET', integration, { apiKeyRequired: true });\n\nconst plan = api.addUsagePlan('UsagePlan', {\n name: 'Easy',\n throttle: {\n rateLimit: 10,\n burstLimit: 2\n }\n});\n\nconst key = api.addApiKey('ApiKey');\nplan.addApiKey(key);\n```\n\nTo associate a plan to a given RestAPI stage:\n\n```ts\nplan.addApiStage({\n stage: api.deploymentStage,\n throttle: [\n {\n method: echoMethod,\n throttle: {\n rateLimit: 10,\n burstLimit: 2\n }\n }\n ]\n});\n```\n\nExisting usage plans can be imported into a CDK app using its id.\n\n```ts\nconst importedUsagePlan = UsagePlan.fromUsagePlanId(stack, 'imported-usage-plan', '<usage-plan-key-id>');\n```\n\nThe name and value of the API Key can be specified at creation; if not\nprovided, a name and value will be automatically generated by API Gateway.\n\n```ts\nconst key = api.addApiKey('ApiKey', {\n apiKeyName: 'myApiKey1',\n value: 'MyApiKeyThatIsAtLeast20Characters',\n});\n```\n\nExisting API keys can also be imported into a CDK app using its id.\n\n```ts\nconst importedKey = ApiKey.fromApiKeyId(this, 'imported-key', '<api-key-id>');\n```\n\nThe \"grant\" methods can be used to give prepackaged sets of permissions to other resources. The\nfollowing code provides read permission to an API key.\n\n```ts\nimportedKey.grantRead(lambda);\n```\n\n### ⚠️ Multiple API Keys\n\nIt is possible to specify multiple API keys for a given Usage Plan, by calling `usagePlan.addApiKey()`.\n\nWhen using multiple API keys, a past bug of the CDK prevents API key associations to a Usage Plan to be deleted.\nIf the CDK app had the [feature flag] - `@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId` - enabled when the API\nkeys were created, then the app will not be affected by this bug.\n\nIf this is not the case, you will need to ensure that the CloudFormation [logical ids] of the API keys that are not\nbeing deleted remain unchanged.\nMake note of the logical ids of these API keys before removing any, and set it as part of the `addApiKey()` method:\n\n```ts\nusageplan.addApiKey(apiKey, {\n overrideLogicalId: '...',\n});\n```\n\n[feature flag]: https://docs.aws.amazon.com/cdk/latest/guide/featureflags.html\n[logical ids]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/resources-section-structure.html\n\n### Rate Limited API Key\n\nIn scenarios where you need to create a single api key and configure rate limiting for it, you can use `RateLimitedApiKey`.\nThis construct lets you specify rate limiting properties which should be applied only to the api key being created.\nThe API key created has the specified rate limits, such as quota and throttles, applied.\n\nThe following example shows how to use a rate limited api key :\n\n```ts\nconst key = new apigateway.RateLimitedApiKey(this, 'rate-limited-api-key', {\n customerId: 'hello-customer',\n resources: [api],\n quota: {\n limit: 10000,\n period: apigateway.Period.MONTH\n }\n});\n```\n\n## Working with models\n\nWhen you work with Lambda integrations that are not Proxy integrations, you\nhave to define your models and mappings for the request, response, and integration.\n\n```ts\nconst hello = new lambda.Function(this, 'hello', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'hello.handler',\n code: lambda.Code.fromAsset('lambda')\n});\n\nconst api = new apigateway.RestApi(this, 'hello-api', { });\nconst resource = api.root.addResource('v1');\n```\n\nYou can define more parameters on the integration to tune the behavior of API Gateway\n\n```ts\nconst integration = new LambdaIntegration(hello, {\n proxy: false,\n requestParameters: {\n // You can define mapping parameters from your method to your integration\n // - Destination parameters (the key) are the integration parameters (used in mappings)\n // - Source parameters (the value) are the source request parameters or expressions\n // @see: https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html\n 'integration.request.querystring.who': 'method.request.querystring.who'\n },\n allowTestInvoke: true,\n requestTemplates: {\n // You can define a mapping that will build a payload for your integration, based\n // on the integration parameters that you have specified\n // Check: https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html\n 'application/json': JSON.stringify({ action: 'sayHello', pollId: \"$util.escapeJavaScript($input.params('who'))\" })\n },\n // This parameter defines the behavior of the engine is no suitable response template is found\n passthroughBehavior: PassthroughBehavior.NEVER,\n integrationResponses: [\n {\n // Successful response from the Lambda function, no filter defined\n // - the selectionPattern filter only tests the error message\n // We will set the response status code to 200\n statusCode: \"200\",\n responseTemplates: {\n // This template takes the \"message\" result from the Lambda function, and embeds it in a JSON response\n // Check https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html\n 'application/json': JSON.stringify({ state: 'ok', greeting: '$util.escapeJavaScript($input.body)' })\n },\n responseParameters: {\n // We can map response parameters\n // - Destination parameters (the key) are the response parameters (used in mappings)\n // - Source parameters (the value) are the integration response parameters or expressions\n 'method.response.header.Content-Type': \"'application/json'\",\n 'method.response.header.Access-Control-Allow-Origin': \"'*'\",\n 'method.response.header.Access-Control-Allow-Credentials': \"'true'\"\n }\n },\n {\n // For errors, we check if the error message is not empty, get the error data\n selectionPattern: '(\\n|.)+',\n // We will set the response status code to 200\n statusCode: \"400\",\n responseTemplates: {\n 'application/json': JSON.stringify({ state: 'error', message: \"$util.escapeJavaScript($input.path('$.errorMessage'))\" })\n },\n responseParameters: {\n 'method.response.header.Content-Type': \"'application/json'\",\n 'method.response.header.Access-Control-Allow-Origin': \"'*'\",\n 'method.response.header.Access-Control-Allow-Credentials': \"'true'\"\n }\n }\n ]\n});\n\n```\n\nYou can define models for your responses (and requests)\n\n```ts\n// We define the JSON Schema for the transformed valid response\nconst responseModel = api.addModel('ResponseModel', {\n contentType: 'application/json',\n modelName: 'ResponseModel',\n schema: {\n schema: JsonSchemaVersion.DRAFT4,\n title: 'pollResponse',\n type: JsonSchemaType.OBJECT,\n properties: {\n state: { type: JsonSchemaType.STRING },\n greeting: { type: JsonSchemaType.STRING }\n }\n }\n});\n\n// We define the JSON Schema for the transformed error response\nconst errorResponseModel = api.addModel('ErrorResponseModel', {\n contentType: 'application/json',\n modelName: 'ErrorResponseModel',\n schema: {\n schema: JsonSchemaVersion.DRAFT4,\n title: 'errorResponse',\n type: JsonSchemaType.OBJECT,\n properties: {\n state: { type: JsonSchemaType.STRING },\n message: { type: JsonSchemaType.STRING }\n }\n }\n});\n\n```\n\nAnd reference all on your method definition.\n\n```ts\nresource.addMethod('GET', integration, {\n // We can mark the parameters as required\n requestParameters: {\n 'method.request.querystring.who': true\n },\n // we can set request validator options like below\n requestValidatorOptions: {\n requestValidatorName: 'test-validator',\n validateRequestBody: true,\n validateRequestParameters: false\n }\n methodResponses: [\n {\n // Successful response from the integration\n statusCode: '200',\n // Define what parameters are allowed or not\n responseParameters: {\n 'method.response.header.Content-Type': true,\n 'method.response.header.Access-Control-Allow-Origin': true,\n 'method.response.header.Access-Control-Allow-Credentials': true\n },\n // Validate the schema on the response\n responseModels: {\n 'application/json': responseModel\n }\n },\n {\n // Same thing for the error responses\n statusCode: '400',\n responseParameters: {\n 'method.response.header.Content-Type': true,\n 'method.response.header.Access-Control-Allow-Origin': true,\n 'method.response.header.Access-Control-Allow-Credentials': true\n },\n responseModels: {\n 'application/json': errorResponseModel\n }\n }\n ]\n});\n```\n\nSpecifying `requestValidatorOptions` automatically creates the RequestValidator construct with the given options.\nHowever, if you have your RequestValidator already initialized or imported, use the `requestValidator` option instead.\n\n## Default Integration and Method Options\n\nThe `defaultIntegration` and `defaultMethodOptions` properties can be used to\nconfigure a default integration at any resource level. These options will be\nused when defining method under this resource (recursively) with undefined\nintegration or options.\n\n> If not defined, the default integration is `MockIntegration`. See reference\ndocumentation for default method options.\n\nThe following example defines the `booksBackend` integration as a default\nintegration. This means that all API methods that do not explicitly define an\nintegration will be routed to this AWS Lambda function.\n\n```ts\nconst booksBackend = new apigateway.LambdaIntegration(...);\nconst api = new apigateway.RestApi(this, 'books', {\n defaultIntegration: booksBackend\n});\n\nconst books = new api.root.addResource('books');\nbooks.addMethod('GET'); // integrated with `booksBackend`\nbooks.addMethod('POST'); // integrated with `booksBackend`\n\nconst book = books.addResource('{book_id}');\nbook.addMethod('GET'); // integrated with `booksBackend`\n```\n\nA Method can be configured with authorization scopes. Authorization scopes are\nused in conjunction with an [authorizer that uses Amazon Cognito user\npools](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-integrate-with-cognito.html#apigateway-enable-cognito-user-pool).\nRead more about authorization scopes\n[here](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigateway-method.html#cfn-apigateway-method-authorizationscopes).\n\nAuthorization scopes for a Method can be configured using the `authorizationScopes` property as shown below -\n\n```ts\nbooks.addMethod('GET', new apigateway.HttpIntegration('http://amazon.com'), {\n authorizationType: AuthorizationType.COGNITO,\n authorizationScopes: ['Scope1','Scope2']\n});\n```\n\n## Proxy Routes\n\nThe `addProxy` method can be used to install a greedy `{proxy+}` resource\non a path. By default, this also installs an `\"ANY\"` method:\n\n```ts\nconst proxy = resource.addProxy({\n defaultIntegration: new LambdaIntegration(handler),\n\n // \"false\" will require explicitly adding methods on the `proxy` resource\n anyMethod: true // \"true\" is the default\n});\n```\n\n## Authorizers\n\nAPI Gateway [supports several different authorization types](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-control-access-to-api.html)\nthat can be used for controlling access to your REST APIs.\n\n### IAM-based authorizer\n\nThe following CDK code provides 'execute-api' permission to an IAM user, via IAM policies, for the 'GET' method on the `books` resource:\n\n```ts\nconst getBooks = books.addMethod('GET', new apigateway.HttpIntegration('http://amazon.com'), {\n authorizationType: apigateway.AuthorizationType.IAM\n});\n\niamUser.attachInlinePolicy(new iam.Policy(this, 'AllowBooks', {\n statements: [\n new iam.PolicyStatement({\n actions: [ 'execute-api:Invoke' ],\n effect: iam.Effect.ALLOW,\n resources: [ getBooks.methodArn ]\n })\n ]\n}))\n```\n\n### Lambda-based token authorizer\n\nAPI Gateway also allows [lambda functions to be used as authorizers](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html).\n\nThis module provides support for token-based Lambda authorizers. When a client makes a request to an API's methods configured with such\nan authorizer, API Gateway calls the Lambda authorizer, which takes the caller's identity as input and returns an IAM policy as output.\nA token-based Lambda authorizer (also called a token authorizer) receives the caller's identity in a bearer token, such as\na JSON Web Token (JWT) or an OAuth token.\n\nAPI Gateway interacts with the authorizer Lambda function handler by passing input and expecting the output in a specific format.\nThe event object that the handler is called with contains the `authorizationToken` and the `methodArn` from the request to the\nAPI Gateway endpoint. The handler is expected to return the `principalId` (i.e. the client identifier) and a `policyDocument` stating\nwhat the client is authorizer to perform.\nSee [here](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) for a detailed specification on\ninputs and outputs of the Lambda handler.\n\nThe following code attaches a token-based Lambda authorizer to the 'GET' Method of the Book resource:\n\n```ts\nconst authFn = new lambda.Function(this, 'booksAuthorizerLambda', {\n // ...\n // ...\n});\n\nconst auth = new apigateway.TokenAuthorizer(this, 'booksAuthorizer', {\n handler: authFn\n});\n\nbooks.addMethod('GET', new apigateway.HttpIntegration('http://amazon.com'), {\n authorizer: auth\n});\n```\n\nA full working example is shown below.\n\n[Full token authorizer example](test/authorizers/integ.token-authorizer.lit.ts).\n\nBy default, the `TokenAuthorizer` looks for the authorization token in the request header with the key 'Authorization'. This can,\nhowever, be modified by changing the `identitySource` property.\n\nAuthorizers can also be passed via the `defaultMethodOptions` property within the `RestApi` construct or the `Method` construct. Unless\nexplicitly overridden, the specified defaults will be applied across all `Method`s across the `RestApi` or across all `Resource`s,\ndepending on where the defaults were specified.\n\n### Lambda-based request authorizer\n\nThis module provides support for request-based Lambda authorizers. When a client makes a request to an API's methods configured with such\nan authorizer, API Gateway calls the Lambda authorizer, which takes specified parts of the request, known as identity sources,\nas input and returns an IAM policy as output. A request-based Lambda authorizer (also called a request authorizer) receives\nthe identity sources in a series of values pulled from the request, from the headers, stage variables, query strings, and the context.\n\nAPI Gateway interacts with the authorizer Lambda function handler by passing input and expecting the output in a specific format.\nThe event object that the handler is called with contains the body of the request and the `methodArn` from the request to the\nAPI Gateway endpoint. The handler is expected to return the `principalId` (i.e. the client identifier) and a `policyDocument` stating\nwhat the client is authorizer to perform.\nSee [here](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) for a detailed specification on\ninputs and outputs of the Lambda handler.\n\nThe following code attaches a request-based Lambda authorizer to the 'GET' Method of the Book resource:\n\n```ts\nconst authFn = new lambda.Function(this, 'booksAuthorizerLambda', {\n // ...\n // ...\n});\n\nconst auth = new apigateway.RequestAuthorizer(this, 'booksAuthorizer', {\n handler: authFn,\n identitySources: [IdentitySource.header('Authorization')]\n});\n\nbooks.addMethod('GET', new apigateway.HttpIntegration('http://amazon.com'), {\n authorizer: auth\n});\n```\n\nA full working example is shown below.\n\n[Full request authorizer example](test/authorizers/integ.request-authorizer.lit.ts).\n\nBy default, the `RequestAuthorizer` does not pass any kind of information from the request. This can,\nhowever, be modified by changing the `identitySource` property, and is required when specifying a value for caching.\n\nAuthorizers can also be passed via the `defaultMethodOptions` property within the `RestApi` construct or the `Method` construct. Unless\nexplicitly overridden, the specified defaults will be applied across all `Method`s across the `RestApi` or across all `Resource`s,\ndepending on where the defaults were specified.\n\n### Cognito User Pools authorizer\n\nAPI Gateway also allows [Amazon Cognito user pools as authorizer](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-integrate-with-cognito.html)\n\nThe following snippet configures a Cognito user pool as an authorizer:\n\n```ts\nconst userPool = new cognito.UserPool(stack, 'UserPool');\n\nconst auth = new apigateway.CognitoUserPoolsAuthorizer(this, 'booksAuthorizer', {\n cognitoUserPools: [userPool]\n});\n\nbooks.addMethod('GET', new apigateway.HttpIntegration('http://amazon.com'), {\n authorizer: auth,\n authorizationType: apigateway.AuthorizationType.COGNITO,\n});\n```\n\n## Mutual TLS (mTLS)\n\nMutual TLS can be configured to limit access to your API based by using client certificates instead of (or as an extension of) using authorization headers.\n\n```ts\nnew apigw.DomainName(this, 'domain-name', {\n domainName: 'example.com',\n certificate: acm.Certificate.fromCertificateArn(this, 'cert', 'arn:aws:acm:us-east-1:1111111:certificate/11-3336f1-44483d-adc7-9cd375c5169d'),\n mtls: {\n bucket: new Bucket(this, 'bucket'),\n key: 'truststore.pem',\n version: 'version',\n },\n});\n```\n\nInstructions for configuring your trust store can be found [here](https://aws.amazon.com/blogs/compute/introducing-mutual-tls-authentication-for-amazon-api-gateway/).\n\n## Deployments\n\nBy default, the `RestApi` construct will automatically create an API Gateway\n[Deployment] and a \"prod\" [Stage] which represent the API configuration you\ndefined in your CDK app. This means that when you deploy your app, your API will\nbe have open access from the internet via the stage URL.\n\nThe URL of your API can be obtained from the attribute `restApi.url`, and is\nalso exported as an `Output` from your stack, so it's printed when you `cdk\ndeploy` your app:\n\n```console\n$ cdk deploy\n...\nbooks.booksapiEndpointE230E8D5 = https://6lyktd4lpk.execute-api.us-east-1.amazonaws.com/prod/\n```\n\nTo disable this behavior, you can set `{ deploy: false }` when creating your\nAPI. This means that the API will not be deployed and a stage will not be\ncreated for it. You will need to manually define a `apigateway.Deployment` and\n`apigateway.Stage` resources.\n\nUse the `deployOptions` property to customize the deployment options of your\nAPI.\n\nThe following example will configure API Gateway to emit logs and data traces to\nAWS CloudWatch for all API calls:\n\n> By default, an IAM role will be created and associated with API Gateway to\nallow it to write logs and metrics to AWS CloudWatch unless `cloudWatchRole` is\nset to `false`.\n\n```ts\nconst api = new apigateway.RestApi(this, 'books', {\n deployOptions: {\n loggingLevel: apigateway.MethodLoggingLevel.INFO,\n dataTraceEnabled: true\n }\n})\n```\n\n### Deep dive: Invalidation of deployments\n\nAPI Gateway deployments are an immutable snapshot of the API. This means that we\nwant to automatically create a new deployment resource every time the API model\ndefined in our CDK app changes.\n\nIn order to achieve that, the AWS CloudFormation logical ID of the\n`AWS::ApiGateway::Deployment` resource is dynamically calculated by hashing the\nAPI configuration (resources, methods). This means that when the configuration\nchanges (i.e. a resource or method are added, configuration is changed), a new\nlogical ID will be assigned to the deployment resource. This will cause\nCloudFormation to create a new deployment resource.\n\nBy default, old deployments are _deleted_. You can set `retainDeployments: true`\nto allow users revert the stage to an old deployment manually.\n\n[Deployment]: https://docs.aws.amazon.com/apigateway/api-reference/resource/deployment/\n[Stage]: https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/\n\n## Custom Domains\n\nTo associate an API with a custom domain, use the `domainName` configuration when\nyou define your API:\n\n```ts\nconst api = new apigw.RestApi(this, 'MyDomain', {\n domainName: {\n domainName: 'example.com',\n certificate: acmCertificateForExampleCom,\n },\n});\n```\n\nThis will define a `DomainName` resource for you, along with a `BasePathMapping`\nfrom the root of the domain to the deployment stage of the API. This is a common\nset up.\n\nTo route domain traffic to an API Gateway API, use Amazon Route 53 to create an\nalias record. An alias record is a Route 53 extension to DNS. It's similar to a\nCNAME record, but you can create an alias record both for the root domain, such\nas `example.com`, and for subdomains, such as `www.example.com`. (You can create\nCNAME records only for subdomains.)\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\nimport { aws_route53_targets as targets } from 'aws-cdk-lib';\n\nnew route53.ARecord(this, 'CustomDomainAliasRecord', {\n zone: hostedZoneForExampleCom,\n target: route53.RecordTarget.fromAlias(new targets.ApiGateway(api))\n});\n```\n\nYou can also define a `DomainName` resource directly in order to customize the default behavior:\n\n```ts\nnew apigw.DomainName(this, 'custom-domain', {\n domainName: 'example.com',\n certificate: acmCertificateForExampleCom,\n endpointType: apigw.EndpointType.EDGE, // default is REGIONAL\n securityPolicy: apigw.SecurityPolicy.TLS_1_2\n});\n```\n\nOnce you have a domain, you can map base paths of the domain to APIs.\nThe following example will map the URL <https://example.com/go-to-api1>\nto the `api1` API and <https://example.com/boom> to the `api2` API.\n\n```ts\ndomain.addBasePathMapping(api1, { basePath: 'go-to-api1' });\ndomain.addBasePathMapping(api2, { basePath: 'boom' });\n```\n\nYou can specify the API `Stage` to which this base path URL will map to. By default, this will be the\n`deploymentStage` of the `RestApi`.\n\n```ts\nconst betaDeploy = new Deployment(this, 'beta-deployment', {\n api: restapi,\n});\nconst betaStage = new Stage(this, 'beta-stage', {\n deployment: betaDeploy,\n});\ndomain.addBasePathMapping(restapi, { basePath: 'api/beta', stage: betaStage });\n```\n\nIf you don't specify `basePath`, all URLs under this domain will be mapped\nto the API, and you won't be able to map another API to the same domain:\n\n```ts\ndomain.addBasePathMapping(api);\n```\n\nThis can also be achieved through the `mapping` configuration when defining the\ndomain as demonstrated above.\n\nIf you wish to setup this domain with an Amazon Route53 alias, use the `targets.ApiGatewayDomain`:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\nimport { aws_route53_targets as targets } from 'aws-cdk-lib';\n\nnew route53.ARecord(this, 'CustomDomainAliasRecord', {\n zone: hostedZoneForExampleCom,\n target: route53.RecordTarget.fromAlias(new targets.ApiGatewayDomain(domainName))\n});\n```\n\n## Access Logging\n\nAccess logging creates logs every time an API method is accessed. Access logs can have information on\nwho has accessed the API, how the caller accessed the API and what responses were generated.\nAccess logs are configured on a Stage of the RestApi.\nAccess logs can be expressed in a format of your choosing, and can contain any access details, with a\nminimum that it must include the 'requestId'. The list of variables that can be expressed in the access\nlog can be found\n[here](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference).\nRead more at [Setting Up CloudWatch API Logging in API\nGateway](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-logging.html)\n\n```ts\n// production stage\nconst prdLogGroup = new cwlogs.LogGroup(this, \"PrdLogs\");\nconst api = new apigateway.RestApi(this, 'books', {\n deployOptions: {\n accessLogDestination: new apigateway.LogGroupLogDestination(prdLogGroup),\n accessLogFormat: apigateway.AccessLogFormat.jsonWithStandardFields()\n }\n})\nconst deployment = new apigateway.Deployment(stack, 'Deployment', {api});\n\n// development stage\nconst devLogGroup = new cwlogs.LogGroup(this, \"DevLogs\");\nnew apigateway.Stage(this, 'dev', {\n deployment,\n accessLogDestination: new apigateway.LogGroupLogDestination(devLogGroup),\n accessLogFormat: apigateway.AccessLogFormat.jsonWithStandardFields({\n caller: false,\n httpMethod: true,\n ip: true,\n protocol: true,\n requestTime: true,\n resourcePath: true,\n responseLength: true,\n status: true,\n user: true\n })\n});\n```\n\nThe following code will generate the access log in the [CLF format](https://en.wikipedia.org/wiki/Common_Log_Format).\n\n```ts\nconst logGroup = new cwlogs.LogGroup(this, \"ApiGatewayAccessLogs\");\nconst api = new apigateway.RestApi(this, 'books', {\n deployOptions: {\n accessLogDestination: new apigateway.LogGroupLogDestination(logGroup),\n accessLogFormat: apigateway.AccessLogFormat.clf(),\n }});\n```\n\nYou can also configure your own access log format by using the `AccessLogFormat.custom()` API.\n`AccessLogField` provides commonly used fields. The following code configures access log to contain.\n\n```ts\nconst logGroup = new cwlogs.LogGroup(this, \"ApiGatewayAccessLogs\");\nnew apigateway.RestApi(this, 'books', {\n deployOptions: {\n accessLogDestination: new apigateway.LogGroupLogDestination(logGroup),\n accessLogFormat: apigateway.AccessLogFormat.custom(\n `${AccessLogField.contextRequestId()} ${AccessLogField.contextErrorMessage()} ${AccessLogField.contextErrorMessageString()}`\n )\n }\n});\n```\n\nYou can use the `methodOptions` property to configure\n[default method throttling](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-request-throttling.html#apigateway-api-level-throttling-in-usage-plan)\nfor a stage. The following snippet configures the a stage that accepts\n100 requests per minute, allowing burst up to 200 requests per minute.\n\n```ts\nconst api = new apigateway.RestApi(this, 'books');\nconst deployment = new apigateway.Deployment(this, 'my-deployment', { api });\nconst stage = new apigateway.Stage(this, 'my-stage', {\n deployment,\n methodOptions: {\n '/*/*': { // This special path applies to all resource paths and all HTTP methods\n throttlingRateLimit: 100,\n throttlingBurstLimit: 200\n }\n }\n});\n```\n\nConfiguring `methodOptions` on the `deployOptions` of `RestApi` will set the\nthrottling behaviors on the default stage that is automatically created.\n\n```ts\nconst api = new apigateway.RestApi(this, 'books', {\n deployOptions: {\n methodOptions: {\n '/*/*': { // This special path applies to all resource paths and all HTTP methods\n throttlingRateLimit: 100,\n throttlingBurstLimit: 1000\n }\n }\n }\n});\n```\n\n## Cross Origin Resource Sharing (CORS)\n\n[Cross-Origin Resource Sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a mechanism\nthat uses additional HTTP headers to tell browsers to give a web application\nrunning at one origin, access to selected resources from a different origin. A\nweb application executes a cross-origin HTTP request when it requests a resource\nthat has a different origin (domain, protocol, or port) from its own.\n\nYou can add the CORS [preflight](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Preflighted_requests) OPTIONS\nHTTP method to any API resource via the `defaultCorsPreflightOptions` option or by calling the `addCorsPreflight` on a specific resource.\n\nThe following example will enable CORS for all methods and all origins on all resources of the API:\n\n```ts\nnew apigateway.RestApi(this, 'api', {\n defaultCorsPreflightOptions: {\n allowOrigins: apigateway.Cors.ALL_ORIGINS,\n allowMethods: apigateway.Cors.ALL_METHODS // this is also the default\n }\n})\n```\n\nThe following example will add an OPTIONS method to the `myResource` API resource, which\nonly allows GET and PUT HTTP requests from the origin <https://amazon.com.>\n\n```ts\nmyResource.addCorsPreflight({\n allowOrigins: [ 'https://amazon.com' ],\n allowMethods: [ 'GET', 'PUT' ]\n});\n```\n\nSee the\n[`CorsOptions`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-apigateway.CorsOptions.html)\nAPI reference for a detailed list of supported configuration options.\n\nYou can specify defaults this at the resource level, in which case they will be applied to the entire resource sub-tree:\n\n```ts\nconst subtree = resource.addResource('subtree', {\n defaultCorsPreflightOptions: {\n allowOrigins: [ 'https://amazon.com' ]\n }\n});\n```\n\nThis means that all resources under `subtree` (inclusive) will have a preflight\nOPTIONS added to them.\n\nSee [#906](https://github.com/aws/aws-cdk/issues/906) for a list of CORS\nfeatures which are not yet supported.\n\n## Endpoint Configuration\n\nAPI gateway allows you to specify an\n[API Endpoint Type](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-endpoint-types.html).\nTo define an endpoint type for the API gateway, use `endpointConfiguration` property:\n\n```ts\nconst api = new apigw.RestApi(stack, 'api', {\n endpointConfiguration: {\n types: [ apigw.EndpointType.EDGE ]\n }\n});\n```\n\nYou can also create an association between your Rest API and a VPC endpoint. By doing so,\nAPI Gateway will generate a new\nRoute53 Alias DNS record which you can use to invoke your private APIs. More info can be found\n[here](https://docs.aws.amazon.com/apigateway/latest/developerguide/associate-private-api-with-vpc-endpoint.html).\n\nHere is an example:\n\n```ts\nconst someEndpoint: IVpcEndpoint = /* Get or Create endpoint here */\nconst api = new apigw.RestApi(stack, 'api', {\n endpointConfiguration: {\n types: [ apigw.EndpointType.PRIVATE ],\n vpcEndpoints: [ someEndpoint ]\n }\n});\n```\n\nBy performing this association, we can invoke the API gateway using the following format:\n\n```plaintext\nhttps://{rest-api-id}-{vpce-id}.execute-api.{region}.amazonaws.com/{stage}\n```\n\n## Private Integrations\n\nA private integration makes it simple to expose HTTP/HTTPS resources behind an\nAmazon VPC for access by clients outside of the VPC. The private integration uses\nan API Gateway resource of `VpcLink` to encapsulate connections between API\nGateway and targeted VPC resources.\nThe `VpcLink` is then attached to the `Integration` of a specific API Gateway\nMethod. The following code sets up a private integration with a network load\nbalancer -\n\n```ts\nconst vpc = new ec2.Vpc(stack, 'VPC');\nconst nlb = new elbv2.NetworkLoadBalancer(stack, 'NLB', {\n vpc,\n});\nconst link = new apigw.VpcLink(stack, 'link', {\n targets: [nlb],\n});\n\nconst integration = new apigw.Integration({\n type: apigw.IntegrationType.HTTP_PROXY,\n options: {\n connectionType: apigw.ConnectionType.VPC_LINK,\n vpcLink: link,\n },\n});\n```\n\nThe uri for the private integration, in the case of a VpcLink, will be set to the DNS name of\nthe VPC Link's NLB. If the VPC Link has multiple NLBs or the VPC Link is imported or the DNS\nname cannot be determined for any other reason, the user is expected to specify the `uri`\nproperty.\n\nAny existing `VpcLink` resource can be imported into the CDK app via the `VpcLink.fromVpcLinkId()`.\n\n```ts\nconst stack = new Stack(app, 'my-stack');\n\nconst awesomeLink = VpcLink.fromVpcLinkId(stack, 'awesome-vpc-link', 'us-east-1_oiuR12Abd');\n```\n\n## Gateway response\n\nIf the Rest API fails to process an incoming request, it returns to the client an error response without forwarding the\nrequest to the integration backend. API Gateway has a set of standard response messages that are sent to the client for\neach type of error. These error responses can be configured on the Rest API. The list of Gateway responses that can be\nconfigured can be found [here](https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html).\nLearn more about [Gateway\nResponses](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-gatewayResponse-definition.html).\n\nThe following code configures a Gateway Response when the response is 'access denied':\n\n```ts\nconst api = new apigateway.RestApi(this, 'books-api');\napi.addGatewayResponse('test-response', {\n type: ResponseType.ACCESS_DENIED,\n statusCode: '500',\n responseHeaders: {\n 'Access-Control-Allow-Origin': \"test.com\",\n 'test-key': 'test-value'\n },\n templates: {\n 'application/json': '{ \"message\": $context.error.messageString, \"statusCode\": \"488\", \"type\": \"$context.error.responseType\" }'\n }\n});\n```\n\n## OpenAPI Definition\n\nCDK supports creating a REST API by importing an OpenAPI definition file. It currently supports OpenAPI v2.0 and OpenAPI\nv3.0 definition files. Read more about [Configuring a REST API using\nOpenAPI](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html).\n\nThe following code creates a REST API using an external OpenAPI definition JSON file -\n\n```ts\nconst api = new apigateway.SpecRestApi(this, 'books-api', {\n apiDefinition: apigateway.ApiDefinition.fromAsset('path-to-file.json')\n});\n\nconst booksResource = api.root.addResource('books')\nbooksResource.addMethod('GET', ...);\n```\n\nIt is possible to use the `addResource()` API to define additional API Gateway Resources.\n\n**Note:** Deployment will fail if a Resource of the same name is already defined in the Open API specification.\n\n**Note:** Any default properties configured, such as `defaultIntegration`, `defaultMethodOptions`, etc. will only be\napplied to Resources and Methods defined in the CDK, and not the ones defined in the spec. Use the [API Gateway\nextensions to OpenAPI](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions.html)\nto configure these.\n\nThere are a number of limitations in using OpenAPI definitions in API Gateway. Read the [Amazon API Gateway important\nnotes for REST APIs](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-known-issues.html#api-gateway-known-issues-rest-apis)\nfor more details.\n\n**Note:** When starting off with an OpenAPI definition using `SpecRestApi`, it is not possible to configure some\nproperties that can be configured directly in the OpenAPI specification file. This is to prevent people duplication\nof these properties and potential confusion.\n\n### Endpoint configuration\n\nBy default, `SpecRestApi` will create an edge optimized endpoint.\n\nThis can be modified as shown below:\n\n```ts\nconst api = new apigateway.SpecRestApi(this, 'ExampleRestApi', {\n // ...\n endpointTypes: [apigateway.EndpointType.PRIVATE]\n});\n```\n\n**Note:** For private endpoints you will still need to provide the\n[`x-amazon-apigateway-policy`](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html) and\n[`x-amazon-apigateway-endpoint-configuration`](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html)\nin your openApi file.\n\n## Metrics\n\nThe API Gateway service sends metrics around the performance of Rest APIs to Amazon CloudWatch.\nThese metrics can be referred to using the metric APIs available on the `RestApi` construct.\nThe APIs with the `metric` prefix can be used to get reference to specific metrics for this API. For example,\nthe method below refers to the client side errors metric for this API.\n\n```ts\nconst api = new apigw.RestApi(stack, 'my-api');\nconst clientErrorMetric = api.metricClientError();\n```\n\n## APIGateway v2\n\nAPIGateway v2 APIs are now moved to its own package named `aws-apigatewayv2`. For backwards compatibility, existing\nAPIGateway v2 \"CFN resources\" (such as `CfnApi`) that were previously exported as part of this package, are still\nexported from here and have been marked deprecated. However, updates to these CloudFormation resources, such as new\nproperties and new resource types will not be available.\n\nMove to using `aws-apigatewayv2` to get the latest APIs and updates.\n\n----\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n"
|
|
157
|
-
},
|
|
158
121
|
"targets": {
|
|
159
122
|
"dotnet": {
|
|
160
123
|
"namespace": "Amazon.CDK.AWS.APIGateway"
|
|
@@ -168,10 +131,6 @@
|
|
|
168
131
|
}
|
|
169
132
|
},
|
|
170
133
|
"aws-cdk-lib.aws_apigatewayv2": {
|
|
171
|
-
"locationInModule": {
|
|
172
|
-
"filename": "lib/index.ts",
|
|
173
|
-
"line": 8
|
|
174
|
-
},
|
|
175
134
|
"targets": {
|
|
176
135
|
"dotnet": {
|
|
177
136
|
"namespace": "Amazon.CDK.AWS.Apigatewayv2"
|
|
@@ -185,10 +144,6 @@
|
|
|
185
144
|
}
|
|
186
145
|
},
|
|
187
146
|
"aws-cdk-lib.aws_appconfig": {
|
|
188
|
-
"locationInModule": {
|
|
189
|
-
"filename": "lib/index.ts",
|
|
190
|
-
"line": 9
|
|
191
|
-
},
|
|
192
147
|
"targets": {
|
|
193
148
|
"dotnet": {
|
|
194
149
|
"namespace": "Amazon.CDK.AWS.AppConfig"
|
|
@@ -202,10 +157,6 @@
|
|
|
202
157
|
}
|
|
203
158
|
},
|
|
204
159
|
"aws-cdk-lib.aws_appflow": {
|
|
205
|
-
"locationInModule": {
|
|
206
|
-
"filename": "lib/index.ts",
|
|
207
|
-
"line": 10
|
|
208
|
-
},
|
|
209
160
|
"targets": {
|
|
210
161
|
"dotnet": {
|
|
211
162
|
"namespace": "Amazon.CDK.AWS.AppFlow"
|
|
@@ -219,10 +170,6 @@
|
|
|
219
170
|
}
|
|
220
171
|
},
|
|
221
172
|
"aws-cdk-lib.aws_appintegrations": {
|
|
222
|
-
"locationInModule": {
|
|
223
|
-
"filename": "lib/index.ts",
|
|
224
|
-
"line": 11
|
|
225
|
-
},
|
|
226
173
|
"targets": {
|
|
227
174
|
"dotnet": {
|
|
228
175
|
"namespace": "Amazon.CDK.AWS.AppIntegrations"
|
|
@@ -236,13 +183,6 @@
|
|
|
236
183
|
}
|
|
237
184
|
},
|
|
238
185
|
"aws-cdk-lib.aws_applicationautoscaling": {
|
|
239
|
-
"locationInModule": {
|
|
240
|
-
"filename": "lib/index.ts",
|
|
241
|
-
"line": 12
|
|
242
|
-
},
|
|
243
|
-
"readme": {
|
|
244
|
-
"markdown": "# AWS Auto Scaling Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n**Application AutoScaling** is used to configure autoscaling for all\nservices other than scaling EC2 instances. For example, you will use this to\nscale ECS tasks, DynamoDB capacity, Spot Fleet sizes, Comprehend document classification endpoints, Lambda function provisioned concurrency and more.\n\nAs a CDK user, you will probably not have to interact with this library\ndirectly; instead, it will be used by other construct libraries to\noffer AutoScaling features for their own constructs.\n\nThis document will describe the general autoscaling features and concepts;\nyour particular service may offer only a subset of these.\n\n## AutoScaling basics\n\nResources can offer one or more **attributes** to autoscale, typically\nrepresenting some capacity dimension of the underlying service. For example,\na DynamoDB Table offers autoscaling of the read and write capacity of the\ntable proper and its Global Secondary Indexes, an ECS Service offers\nautoscaling of its task count, an RDS Aurora cluster offers scaling of its\nreplica count, and so on.\n\nWhen you enable autoscaling for an attribute, you specify a minimum and a\nmaximum value for the capacity. AutoScaling policies that respond to metrics\nwill never go higher or lower than the indicated capacity (but scheduled\nscaling actions might, see below).\n\nThere are three ways to scale your capacity:\n\n* **In response to a metric** (also known as step scaling); for example, you\n might want to scale out if the CPU usage across your cluster starts to rise,\n and scale in when it drops again.\n* **By trying to keep a certain metric around a given value** (also known as\n target tracking scaling); you might want to automatically scale out an in to\n keep your CPU usage around 50%.\n* **On a schedule**; you might want to organize your scaling around traffic\n flows you expect, by scaling out in the morning and scaling in in the\n evening.\n\nThe general pattern of autoscaling will look like this:\n\n```ts\nconst capacity = resource.autoScaleCapacity({\n minCapacity: 5,\n maxCapacity: 100\n});\n\n// Enable a type of metric scaling and/or schedule scaling\ncapacity.scaleOnMetric(...);\ncapacity.scaleToTrackMetric(...);\ncapacity.scaleOnSchedule(...);\n```\n\n## Step Scaling\n\nThis type of scaling scales in and out in deterministic steps that you\nconfigure, in response to metric values. For example, your scaling strategy\nto scale in response to CPU usage might look like this:\n\n```plaintext\n Scaling -1 (no change) +1 +3\n │ │ │ │ │\n ├────────┼───────────────────────┼────────┼────────┤\n │ │ │ │ │\nCPU usage 0% 10% 50% 70% 100%\n```\n\n(Note that this is not necessarily a recommended scaling strategy, but it's\na possible one. You will have to determine what thresholds are right for you).\n\nYou would configure it like this:\n\n```ts\ncapacity.scaleOnMetric('ScaleToCPU', {\n metric: service.metricCpuUtilization(),\n scalingSteps: [\n { upper: 10, change: -1 },\n { lower: 50, change: +1 },\n { lower: 70, change: +3 },\n ],\n\n // Change this to AdjustmentType.PercentChangeInCapacity to interpret the\n // 'change' numbers before as percentages instead of capacity counts.\n adjustmentType: autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,\n});\n```\n\nThe AutoScaling construct library will create the required CloudWatch alarms and\nAutoScaling policies for you.\n\n## Target Tracking Scaling\n\nThis type of scaling scales in and out in order to keep a metric (typically\nrepresenting utilization) around a value you prefer. This type of scaling is\ntypically heavily service-dependent in what metric you can use, and so\ndifferent services will have different methods here to set up target tracking\nscaling.\n\nThe following example configures the read capacity of a DynamoDB table\nto be around 60% utilization:\n\n```ts\nconst readCapacity = table.autoScaleReadCapacity({\n minCapacity: 10,\n maxCapacity: 1000\n});\nreadCapacity.scaleOnUtilization({\n targetUtilizationPercent: 60\n});\n```\n\n## Scheduled Scaling\n\nThis type of scaling is used to change capacities based on time. It works\nby changing the `minCapacity` and `maxCapacity` of the attribute, and so\ncan be used for two purposes:\n\n* Scale in and out on a schedule by setting the `minCapacity` high or\n the `maxCapacity` low.\n* Still allow the regular scaling actions to do their job, but restrict\n the range they can scale over (by setting both `minCapacity` and\n `maxCapacity` but changing their range over time).\n\nThe following schedule expressions can be used:\n\n* `at(yyyy-mm-ddThh:mm:ss)` -- scale at a particular moment in time\n* `rate(value unit)` -- scale every minute/hour/day\n* `cron(mm hh dd mm dow)` -- scale on arbitrary schedules\n\nOf these, the cron expression is the most useful but also the most\ncomplicated. A schedule is expressed as a cron expression. The `Schedule` class has a `cron` method to help build cron expressions.\n\nThe following example scales the fleet out in the morning, and lets natural\nscaling take over at night:\n\n```ts\nconst capacity = resource.autoScaleCapacity({\n minCapacity: 1,\n maxCapacity: 50,\n});\n\ncapacity.scaleOnSchedule('PrescaleInTheMorning', {\n schedule: autoscaling.Schedule.cron({ hour: '8', minute: '0' }),\n minCapacity: 20,\n});\n\ncapacity.scaleOnSchedule('AllowDownscalingAtNight', {\n schedule: autoscaling.Schedule.cron({ hour: '20', minute: '0' }),\n minCapacity: 1\n});\n```\n\n## Examples\n\n### Lambda Provisioned Concurrency Auto Scaling\n\n```ts\n const handler = new lambda.Function(this, 'MyFunction', {\n runtime: lambda.Runtime.PYTHON_3_7,\n handler: 'index.handler',\n code: new lambda.InlineCode(`\nimport json, time\ndef handler(event, context):\n time.sleep(1)\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello CDK from Lambda!')\n }`),\n reservedConcurrentExecutions: 2,\n });\n\n const fnVer = handler.addVersion('CDKLambdaVersion', undefined, 'demo alias', 10);\n\n new apigateway.LambdaRestApi(this, 'API', { handler: fnVer })\n\n const target = new applicationautoscaling.ScalableTarget(this, 'ScalableTarget', {\n serviceNamespace: applicationautoscaling.ServiceNamespace.LAMBDA,\n maxCapacity: 100,\n minCapacity: 10,\n resourceId: `function:${handler.functionName}:${fnVer.version}`,\n scalableDimension: 'lambda:function:ProvisionedConcurrency',\n })\ns\n target.scaleToTrackMetric('PceTracking', {\n targetValue: 0.9,\n predefinedMetric: applicationautoscaling.PredefinedMetric.LAMBDA_PROVISIONED_CONCURRENCY_UTILIZATION,\n })\n }\n ```\n"
|
|
245
|
-
},
|
|
246
186
|
"targets": {
|
|
247
187
|
"dotnet": {
|
|
248
188
|
"namespace": "Amazon.CDK.AWS.ApplicationAutoScaling"
|
|
@@ -256,10 +196,6 @@
|
|
|
256
196
|
}
|
|
257
197
|
},
|
|
258
198
|
"aws-cdk-lib.aws_applicationinsights": {
|
|
259
|
-
"locationInModule": {
|
|
260
|
-
"filename": "lib/index.ts",
|
|
261
|
-
"line": 13
|
|
262
|
-
},
|
|
263
199
|
"targets": {
|
|
264
200
|
"dotnet": {
|
|
265
201
|
"namespace": "Amazon.CDK.AWS.ApplicationInsights"
|
|
@@ -273,13 +209,6 @@
|
|
|
273
209
|
}
|
|
274
210
|
},
|
|
275
211
|
"aws-cdk-lib.aws_appmesh": {
|
|
276
|
-
"locationInModule": {
|
|
277
|
-
"filename": "lib/index.ts",
|
|
278
|
-
"line": 14
|
|
279
|
-
},
|
|
280
|
-
"readme": {
|
|
281
|
-
"markdown": "# AWS App Mesh Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAWS App Mesh is a service mesh based on the [Envoy](https://www.envoyproxy.io/) proxy that makes it easy to monitor and control microservices. App Mesh standardizes how your microservices communicate, giving you end-to-end visibility and helping to ensure high-availability for your applications.\n\nApp Mesh gives you consistent visibility and network traffic controls for every microservice in an application.\n\nApp Mesh supports microservice applications that use service discovery naming for their components. To use App Mesh, you must have an existing application running on AWS Fargate, Amazon ECS, Amazon EKS, Kubernetes on AWS, or Amazon EC2.\n\nFor further information on **AWS App Mesh**, visit the [AWS App Mesh Documentation](https://docs.aws.amazon.com/app-mesh/index.html).\n\n## Create the App and Stack\n\n```ts\nconst app = new cdk.App();\nconst stack = new cdk.Stack(app, 'stack');\n```\n\n## Creating the Mesh\n\nA service mesh is a logical boundary for network traffic between the services that reside within it.\n\nAfter you create your service mesh, you can create virtual services, virtual nodes, virtual routers, and routes to distribute traffic between the applications in your mesh.\n\nThe following example creates the `AppMesh` service mesh with the default egress filter of `DROP_ALL`. See [the AWS CloudFormation `EgressFilter` resource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appmesh-mesh-egressfilter.html) for more info on egress filters.\n\n```ts\nconst mesh = new Mesh(stack, 'AppMesh', {\n meshName: 'myAwsMesh',\n});\n```\n\nThe mesh can instead be created with the `ALLOW_ALL` egress filter by providing the `egressFilter` property.\n\n```ts\nconst mesh = new Mesh(stack, 'AppMesh', {\n meshName: 'myAwsMesh',\n egressFilter: MeshFilterType.ALLOW_ALL,\n});\n```\n\n## Adding VirtualRouters\n\nA _mesh_ uses _virtual routers_ as logical units to route requests to _virtual nodes_.\n\nVirtual routers handle traffic for one or more virtual services within your mesh.\nAfter you create a virtual router, you can create and associate routes to your virtual router that direct incoming requests to different virtual nodes.\n\n```ts\nconst router = mesh.addVirtualRouter('router', {\n listeners: [ VirtualRouterListener.http(8080) ],\n});\n```\n\nNote that creating the router using the `addVirtualRouter()` method places it in the same stack as the mesh\n(which might be different from the current stack).\nThe router can also be created using the `VirtualRouter` constructor (passing in the mesh) instead of calling the `addVirtualRouter()` method.\nThis is particularly useful when splitting your resources between many stacks: for example, defining the mesh itself as part of an infrastructure stack, but defining the other resources, such as routers, in the application stack:\n\n```ts\nconst mesh = new Mesh(infraStack, 'AppMesh', {\n meshName: 'myAwsMesh',\n egressFilter: MeshFilterType.ALLOW_ALL,\n});\n\n// the VirtualRouter will belong to 'appStack',\n// even though the Mesh belongs to 'infraStack'\nconst router = new VirtualRouter(appStack, 'router', {\n mesh, // notice that mesh is a required property when creating a router with the 'new' statement\n listeners: [VirtualRouterListener.http(8081)],\n});\n```\n\nThe same is true for other `add*()` methods in the App Mesh construct library.\n\nThe `VirtualRouterListener` class lets you define protocol-specific listeners.\nThe `http()`, `http2()`, `grpc()` and `tcp()` methods create listeners for the named protocols.\nThey accept a single parameter that defines the port to on which requests will be matched.\nThe port parameter defaults to 8080 if omitted.\n\n## Adding a VirtualService\n\nA _virtual service_ is an abstraction of a real service that is provided by a virtual node directly, or indirectly by means of a virtual router. Dependent services call your virtual service by its `virtualServiceName`, and those requests are routed to the virtual node or virtual router specified as the provider for the virtual service.\n\nWe recommend that you use the service discovery name of the real service that you're targeting (such as `my-service.default.svc.cluster.local`).\n\nWhen creating a virtual service:\n\n- If you want the virtual service to spread traffic across multiple virtual nodes, specify a virtual router.\n- If you want the virtual service to reach a virtual node directly, without a virtual router, specify a virtual node.\n\nAdding a virtual router as the provider:\n\n```ts\nnew VirtualService(stack, 'virtual-service', {\n virtualServiceName: 'my-service.default.svc.cluster.local', // optional\n virtualServiceProvider: VirtualServiceProvider.virtualRouter(router),\n});\n```\n\nAdding a virtual node as the provider:\n\n```ts\nnew VirtualService(stack, 'virtual-service', {\n virtualServiceName: `my-service.default.svc.cluster.local`, // optional\n virtualServiceProvider: VirtualServiceProvider.virtualNode(node),\n});\n```\n\n## Adding a VirtualNode\n\nA _virtual node_ acts as a logical pointer to a particular task group, such as an Amazon ECS service or a Kubernetes deployment.\n\nWhen you create a virtual node, accept inbound traffic by specifying a *listener*. Outbound traffic that your virtual node expects to send should be specified as a *back end*.\n\nThe response metadata for your new virtual node contains the Amazon Resource Name (ARN) that is associated with the virtual node. Set this value (either the full ARN or the truncated resource name) as the `APPMESH_VIRTUAL_NODE_NAME` environment variable for your task group's Envoy proxy container in your task definition or pod spec. For example, the value could be `mesh/default/virtualNode/simpleapp`. This is then mapped to the `node.id` and `node.cluster` Envoy parameters.\n\n> **Note**\n> If you require your Envoy stats or tracing to use a different name, you can override the `node.cluster` value that is set by `APPMESH_VIRTUAL_NODE_NAME` with the `APPMESH_VIRTUAL_NODE_CLUSTER` environment variable.\n\n```ts\nconst vpc = new ec2.Vpc(stack, 'vpc');\nconst namespace = new servicediscovery.PrivateDnsNamespace(stack, 'test-namespace', {\n vpc,\n name: 'domain.local',\n});\nconst service = namespace.createService('Svc');\n\nconst node = mesh.addVirtualNode('virtual-node', {\n serviceDiscovery: ServiceDiscovery.cloudMap(service),\n listeners: [VirtualNodeListener.http({\n port: 8081,\n healthCheck: HealthCheck.http({\n healthyThreshold: 3,\n interval: cdk.Duration.seconds(5), // minimum\n path: '/health-check-path',\n timeout: cdk.Duration.seconds(2), // minimum\n unhealthyThreshold: 2,\n }),\n })],\n accessLog: AccessLog.fromFilePath('/dev/stdout'),\n});\n```\n\nCreate a `VirtualNode` with the constructor and add tags.\n\n```ts\nconst node = new VirtualNode(stack, 'node', {\n mesh,\n serviceDiscovery: ServiceDiscovery.cloudMap(service),\n listeners: [VirtualNodeListener.http({\n port: 8080,\n healthCheck: HealthCheck.http({\n healthyThreshold: 3,\n interval: cdk.Duration.seconds(5), \n path: '/ping',\n timeout: cdk.Duration.seconds(2), \n unhealthyThreshold: 2,\n }),\n timeout: {\n idle: cdk.Duration.seconds(5),\n },\n })],\n backendDefaults: {\n tlsClientPolicy: {\n validation: {\n trust: TlsValidationTrust.file('/keys/local_cert_chain.pem'),\n },\n },\n },\n accessLog: AccessLog.fromFilePath('/dev/stdout'),\n});\n\ncdk.Tags.of(node).add('Environment', 'Dev');\n```\n\nCreate a `VirtualNode` with the constructor and add backend virtual service.\n\n```ts\nconst node = new VirtualNode(stack, 'node', {\n mesh,\n serviceDiscovery: ServiceDiscovery.cloudMap(service),\n listeners: [VirtualNodeListener.http({\n port: 8080,\n healthCheck: HealthCheck.http({\n healthyThreshold: 3,\n interval: cdk.Duration.seconds(5), \n path: '/ping',\n timeout: cdk.Duration.seconds(2), \n unhealthyThreshold: 2,\n }),\n timeout: {\n idle: cdk.Duration.seconds(5),\n },\n })],\n accessLog: AccessLog.fromFilePath('/dev/stdout'),\n});\n\nconst virtualService = new VirtualService(stack, 'service-1', {\n virtualServiceProvider: VirtualServiceProvider.virtualRouter(router),\n virtualServiceName: 'service1.domain.local',\n});\n\nnode.addBackend(Backend.virtualService(virtualService));\n```\n\nThe `listeners` property can be left blank and added later with the `node.addListener()` method. The `serviceDiscovery` property must be specified when specifying a listener.\n\nThe `backends` property can be added with `node.addBackend()`. In the example, we define a virtual service and add it to the virtual node to allow egress traffic to other nodes.\n\nThe `backendDefaults` property is added to the node while creating the virtual node. These are the virtual node's default settings for all backends.\n\n### Adding TLS to a listener\n\nThe `tls` property specifies TLS configuration when creating a listener for a virtual node or a virtual gateway. \nProvide the TLS certificate to the proxy in one of the following ways:\n\n- A certificate from AWS Certificate Manager (ACM).\n\n- A customer-provided certificate (specify a `certificateChain` path file and a `privateKey` file path).\n\n- A certificate provided by a Secrets Discovery Service (SDS) endpoint over local Unix Domain Socket (specify its `secretName`).\n\n```typescript\nimport { aws_certificatemanager as certificatemanager } from 'aws-cdk-lib';\n\n// A Virtual Node with listener TLS from an ACM provided certificate\nconst cert = new certificatemanager.Certificate(this, 'cert', {...});\n\nconst node = new VirtualNode(stack, 'node', {\n mesh,\n serviceDiscovery: ServiceDiscovery.dns('node'),\n listeners: [VirtualNodeListener.grpc({\n port: 80,\n tls: {\n mode: TlsMode.STRICT,\n certificate: TlsCertificate.acm(cert),\n },\n })],\n});\n\n// A Virtual Gateway with listener TLS from a customer provided file certificate\nconst gateway = new VirtualGateway(this, 'gateway', {\n mesh: mesh,\n listeners: [VirtualGatewayListener.grpc({\n port: 8080,\n tls: {\n mode: TlsMode.STRICT,\n certificate: TlsCertificate.file('path/to/certChain', 'path/to/privateKey'),\n },\n })],\n virtualGatewayName: 'gateway',\n});\n\n// A Virtual Gateway with listener TLS from a SDS provided certificate\nconst gateway2 = new VirtualGateway(this, 'gateway2', {\n mesh: mesh,\n listeners: [VirtualGatewayListener.http2({\n port: 8080,\n tls: {\n mode: TlsMode.STRICT,\n certificate: TlsCertificate.sds('secrete_certificate'),\n },\n })],\n virtualGatewayName: 'gateway2',\n});\n```\n\n### Adding mutual TLS authentication\n\nMutual TLS authentication is an optional component of TLS that offers two-way peer authentication. \nTo enable mutual TLS authentication, add the `mutualTlsCertificate` property to TLS client policy and/or the `mutualTlsValidation` property to your TLS listener.\n\n`tls.mutualTlsValidation` and `tlsClientPolicy.mutualTlsCertificate` can be sourced from either:\n\n- A customer-provided certificate (specify a `certificateChain` path file and a `privateKey` file path).\n\n- A certificate provided by a Secrets Discovery Service (SDS) endpoint over local Unix Domain Socket (specify its `secretName`).\n\n> **Note**\n> Currently, a certificate from AWS Certificate Manager (ACM) cannot be used for mutual TLS authentication.\n\n```typescript\nimport { aws_certificatemanager as certificatemanager } from 'aws-cdk-lib';\n\nconst node1 = new VirtualNode(stack, 'node1', {\n mesh,\n serviceDiscovery: ServiceDiscovery.dns('node'),\n listeners: [VirtualNodeListener.grpc({\n port: 80,\n tls: {\n mode: TlsMode.STRICT,\n certificate: TlsCertificate.file('path/to/certChain', 'path/to/privateKey'),\n // Validate a file client certificates to enable mutual TLS authentication when a client provides a certificate.\n mutualTlsValidation: {\n trust: TlsValidationTrust.file('path-to-certificate'),\n },\n },\n })],\n});\n\nconst node2 = new VirtualNode(stack, 'node2', {\n mesh,\n serviceDiscovery: ServiceDiscovery.dns('node2'),\n backendDefaults: {\n tlsClientPolicy: {\n ports: [8080, 8081],\n validation: {\n subjectAlternativeNames: SubjectAlternativeNames.matchingExactly('mesh-endpoint.apps.local'),\n trust: TlsValidationTrust.acm([\n acmpca.CertificateAuthority.fromCertificateAuthorityArn(stack, 'certificate', certificateAuthorityArn)]),\n },\n // Provide a SDS client certificate when a server requests it and enable mutual TLS authentication.\n mutualTlsCertificate: TlsCertificate.sds('secret_certificate'),\n },\n },\n});\n```\n\n### Adding outlier detection to a Virtual Node listener\n\nThe `outlierDetection` property adds outlier detection to a Virtual Node listener. The properties \n`baseEjectionDuration`, `interval`, `maxEjectionPercent`, and `maxServerErrors` are required.\n\n```typescript\n// Cloud Map service discovery is currently required for host ejection by outlier detection\nconst vpc = new ec2.Vpc(stack, 'vpc');\nconst namespace = new servicediscovery.PrivateDnsNamespace(this, 'test-namespace', {\n vpc,\n name: 'domain.local',\n});\nconst service = namespace.createService('Svc');\n\nconst node = mesh.addVirtualNode('virtual-node', {\n serviceDiscovery: ServiceDiscovery.cloudMap(service),\n listeners: [VirtualNodeListener.http({\n outlierDetection: {\n baseEjectionDuration: cdk.Duration.seconds(10),\n interval: cdk.Duration.seconds(30),\n maxEjectionPercent: 50,\n maxServerErrors: 5,\n },\n })],\n});\n```\n\n### Adding a connection pool to a listener\n\nThe `connectionPool` property can be added to a Virtual Node listener or Virtual Gateway listener to add a request connection pool. Each listener protocol type has its own connection pool properties.\n\n```typescript\n// A Virtual Node with a gRPC listener with a connection pool set\nconst node = new VirtualNode(stack, 'node', {\n mesh,\n // DNS service discovery can optionally specify the DNS response type as either LOAD_BALANCER or ENDPOINTS.\n // LOAD_BALANCER means that the DNS resolver returns a loadbalanced set of endpoints,\n // whereas ENDPOINTS means that the DNS resolver is returning all the endpoints.\n // By default, the response type is assumed to be LOAD_BALANCER\n serviceDiscovery: ServiceDiscovery.dns('node', DnsResponseType.ENDPOINTS),\n listeners: [VirtualNodeListener.http({\n port: 80,\n connectionPool: {\n maxConnections: 100,\n maxPendingRequests: 10,\n },\n })],\n});\n\n// A Virtual Gateway with a gRPC listener with a connection pool set\nconst gateway = new VirtualGateway(stack, 'gateway', {\n mesh,\n listeners: [VirtualGatewayListener.grpc({\n port: 8080,\n connectionPool: {\n maxRequests: 10,\n },\n })],\n virtualGatewayName: 'gateway',\n});\n```\n\n## Adding a Route\n\nA _route_ matches requests with an associated virtual router and distributes traffic to its associated virtual nodes. \nThe route distributes matching requests to one or more target virtual nodes with relative weighting.\n\nThe `RouteSpec` class lets you define protocol-specific route specifications.\nThe `tcp()`, `http()`, `http2()`, and `grpc()` methods create a specification for the named protocols.\n\nFor HTTP-based routes, the match field can match on path (prefix, exact, or regex), HTTP method, scheme, \nHTTP headers, and query parameters. By default, HTTP-based routes match all requests. \n\nFor gRPC-based routes, the match field can match on service name, method name, and metadata.\nWhen specifying the method name, the service name must also be specified.\n\nFor example, here's how to add an HTTP route that matches based on a prefix of the URL path:\n\n```ts\nrouter.addRoute('route-http', {\n routeSpec: RouteSpec.http({\n weightedTargets: [\n {\n virtualNode: node,\n },\n ],\n match: {\n // Path that is passed to this method must start with '/'.\n path: HttpRoutePathMatch.startsWith('/path-to-app'),\n },\n }),\n});\n```\n\nAdd an HTTP2 route that matches based on exact path, method, scheme, headers, and query parameters:\n\n```ts\nrouter.addRoute('route-http2', {\n routeSpec: RouteSpec.http2({\n weightedTargets: [\n {\n virtualNode: node,\n },\n ],\n match: {\n path: HttpRoutePathMatch.exactly('/exact'),\n method: HttpRouteMethod.POST,\n protocol: HttpRouteProtocol.HTTPS,\n headers: [\n // All specified headers must match for the route to match.\n HeaderMatch.valueIs('Content-Type', 'application/json'),\n HeaderMatch.valueIsNot('Content-Type', 'application/json'),\n ],\n queryParameters: [\n // All specified query parameters must match for the route to match.\n QueryParameterMatch.valueIs('query-field', 'value')\n ],\n },\n }),\n});\n```\n\nAdd a single route with two targets and split traffic 50/50:\n\n```ts\nrouter.addRoute('route-http', {\n routeSpec: RouteSpec.http({\n weightedTargets: [\n {\n virtualNode: node,\n weight: 50,\n },\n {\n virtualNode: node,\n weight: 50,\n },\n ],\n match: {\n path: HttpRoutePathMatch.startsWith('/path-to-app'),\n },\n }),\n});\n```\n\nAdd an http2 route with retries:\n\n```ts\nrouter.addRoute('route-http2-retry', {\n routeSpec: RouteSpec.http2({\n weightedTargets: [{ virtualNode: node }],\n retryPolicy: {\n // Retry if the connection failed\n tcpRetryEvents: [TcpRetryEvent.CONNECTION_ERROR],\n // Retry if HTTP responds with a gateway error (502, 503, 504)\n httpRetryEvents: [HttpRetryEvent.GATEWAY_ERROR],\n // Retry five times\n retryAttempts: 5,\n // Use a 1 second timeout per retry\n retryTimeout: cdk.Duration.seconds(1),\n },\n }),\n});\n```\n\nAdd a gRPC route with retries:\n\n```ts\nrouter.addRoute('route-grpc-retry', {\n routeSpec: RouteSpec.grpc({\n weightedTargets: [{ virtualNode: node }],\n match: { serviceName: 'servicename' },\n retryPolicy: {\n tcpRetryEvents: [TcpRetryEvent.CONNECTION_ERROR],\n httpRetryEvents: [HttpRetryEvent.GATEWAY_ERROR],\n // Retry if gRPC responds that the request was cancelled, a resource\n // was exhausted, or if the service is unavailable\n grpcRetryEvents: [\n GrpcRetryEvent.CANCELLED,\n GrpcRetryEvent.RESOURCE_EXHAUSTED,\n GrpcRetryEvent.UNAVAILABLE,\n ],\n retryAttempts: 5,\n retryTimeout: cdk.Duration.seconds(1),\n },\n }),\n});\n```\n\nAdd an gRPC route that matches based on method name and metadata:\n\n```ts\nrouter.addRoute('route-grpc-retry', {\n routeSpec: RouteSpec.grpc({\n weightedTargets: [{ virtualNode: node }],\n match: { \n // When method name is specified, service name must be also specified.\n methodName: 'methodname',\n serviceName: 'servicename',\n metadata: [\n // All specified metadata must match for the route to match.\n HeaderMatch.valueStartsWith('Content-Type', 'application/'),\n HeaderMatch.valueDoesNotStartWith('Content-Type', 'text/'),\n ],\n },\n }),\n});\n```\n\nAdd a gRPC route with timeout:\n\n```ts\nrouter.addRoute('route-http', {\n routeSpec: RouteSpec.grpc({\n weightedTargets: [\n {\n virtualNode: node,\n },\n ],\n match: {\n serviceName: 'my-service.default.svc.cluster.local',\n },\n timeout: {\n idle : cdk.Duration.seconds(2),\n perRequest: cdk.Duration.seconds(1),\n },\n }),\n});\n```\n\n## Adding a Virtual Gateway\n\nA _virtual gateway_ allows resources outside your mesh to communicate with resources inside your mesh.\nThe virtual gateway represents an Envoy proxy running in an Amazon ECS task, in a Kubernetes service, or on an Amazon EC2 instance.\nUnlike a virtual node, which represents Envoy running with an application, a virtual gateway represents Envoy deployed by itself.\n\nA virtual gateway is similar to a virtual node in that it has a listener that accepts traffic for a particular port and protocol (HTTP, HTTP2, gRPC).\nTraffic received by the virtual gateway is directed to other services in your mesh\nusing rules defined in gateway routes which can be added to your virtual gateway.\n\nCreate a virtual gateway with the constructor:\n\n```ts\nconst certificateAuthorityArn = 'arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012';\n\nconst gateway = new VirtualGateway(stack, 'gateway', {\n mesh: mesh,\n listeners: [VirtualGatewayListener.http({\n port: 443,\n healthCheck: HealthCheck.http({\n interval: cdk.Duration.seconds(10),\n }),\n })],\n backendDefaults: {\n tlsClientPolicy: {\n ports: [8080, 8081],\n validation: {\n trust: TlsValidationTrust.acm([\n acmpca.CertificateAuthority.fromCertificateAuthorityArn(stack, 'certificate', certificateAuthorityArn)]),\n },\n },\n },\n accessLog: AccessLog.fromFilePath('/dev/stdout'),\n virtualGatewayName: 'virtualGateway',\n});\n```\n\nAdd a virtual gateway directly to the mesh:\n\n```ts\nconst gateway = mesh.addVirtualGateway('gateway', {\n accessLog: AccessLog.fromFilePath('/dev/stdout'),\n virtualGatewayName: 'virtualGateway',\n listeners: [VirtualGatewayListener.http({\n port: 443,\n healthCheck: HealthCheck.http({\n interval: cdk.Duration.seconds(10),\n }),\n })],\n});\n```\n\nThe `listeners` field defaults to an HTTP Listener on port 8080 if omitted.\nA gateway route can be added using the `gateway.addGatewayRoute()` method.\n\nThe `backendDefaults` property, provided when creating the virtual gateway, specifies the virtual gateway's default settings for all backends.\n\n## Adding a Gateway Route\n\nA _gateway route_ is attached to a virtual gateway and routes matching traffic to an existing virtual service.\n\nFor HTTP-based gateway routes, the `match` field can be used to match on \npath (prefix, exact, or regex), HTTP method, host name, HTTP headers, and query parameters.\nBy default, HTTP-based gateway routes match all requests.\n\n```ts\ngateway.addGatewayRoute('gateway-route-http', {\n routeSpec: GatewayRouteSpec.http({\n routeTarget: virtualService,\n match: {\n path: HttpGatewayRoutePathMatch.regex('regex'),\n },\n }),\n});\n```\n\nFor gRPC-based gateway routes, the `match` field can be used to match on service name, host name, and metadata.\n\n```ts\ngateway.addGatewayRoute('gateway-route-grpc', {\n routeSpec: GatewayRouteSpec.grpc({\n routeTarget: virtualService,\n match: {\n hostname: GatewayRouteHostnameMatch.endsWith('.example.com'),\n },\n }),\n});\n```\n\nFor HTTP based gateway routes, App Mesh automatically rewrites the matched prefix path in Gateway Route to “/”.\nThis automatic rewrite configuration can be overwritten in following ways:\n\n```ts\ngateway.addGatewayRoute('gateway-route-http', {\n routeSpec: GatewayRouteSpec.http({\n routeTarget: virtualService,\n match: {\n // This disables the default rewrite to '/', and retains original path.\n path: HttpGatewayRoutePathMatch.startsWith('/path-to-app/', ''),\n },\n }),\n});\n\ngateway.addGatewayRoute('gateway-route-http-1', {\n routeSpec: GatewayRouteSpec.http({\n routeTarget: virtualService,\n match: {\n // If the request full path is '/path-to-app/xxxxx', this rewrites the path to '/rewrittenUri/xxxxx'.\n // Please note both `prefixPathMatch` and `rewriteTo` must start and end with the `/` character.\n path: HttpGatewayRoutePathMatch.startsWith('/path-to-app/', '/rewrittenUri/'), \n },\n }),\n});\n```\n\nIf matching other path (exact or regex), only specific rewrite path can be specified.\nUnlike `startsWith()` method above, no default rewrite is performed.\n\n```ts\ngateway.addGatewayRoute('gateway-route-http-2', {\n routeSpec: GatewayRouteSpec.http({\n routeTarget: virtualService,\n match: {\n // This rewrites the path from '/test' to '/rewrittenPath'.\n path: HttpGatewayRoutePathMatch.exactly('/test', '/rewrittenPath'), \n },\n }),\n});\n```\n\nFor HTTP/gRPC based routes, App Mesh automatically rewrites \nthe original request received at the Virtual Gateway to the destination Virtual Service name.\nThis default host name rewrite can be configured by specifying the rewrite rule as one of the `match` property:\n\n```ts\ngateway.addGatewayRoute('gateway-route-grpc', {\n routeSpec: GatewayRouteSpec.grpc({\n routeTarget: virtualService,\n match: {\n hostname: GatewayRouteHostnameMatch.exactly('example.com'),\n // This disables the default rewrite to virtual service name and retain original request.\n rewriteRequestHostname: false,\n },\n }),\n});\n```\n\n## Importing Resources\n\nEach App Mesh resource class comes with two static methods, `from<Resource>Arn` and `from<Resource>Attributes` (where `<Resource>` is replaced with the resource name, such as `VirtualNode`) for importing a reference to an existing App Mesh resource.\nThese imported resources can be used with other resources in your mesh as if they were defined directly in your CDK application.\n\n```ts\nconst arn = 'arn:aws:appmesh:us-east-1:123456789012:mesh/testMesh/virtualNode/testNode';\nVirtualNode.fromVirtualNodeArn(stack, 'importedVirtualNode', arn);\n```\n\n```ts\nVirtualNode.fromVirtualNodeAttributes(stack, 'imported-virtual-node', {\n mesh: Mesh.fromMeshName(stack, 'Mesh', 'testMesh'),\n virtualNodeName: virtualNodeName,\n});\n```\n\nTo import a mesh, again there are two static methods, `fromMeshArn` and `fromMeshName`.\n\n```ts\nconst arn = 'arn:aws:appmesh:us-east-1:123456789012:mesh/testMesh';\nMesh.fromMeshArn(stack, 'imported-mesh', arn);\n```\n\n```ts\nMesh.fromMeshName(stack, 'imported-mesh', 'abc');\n```\n\n## IAM Grants\n\n`VirtualNode` and `VirtualGateway` provide `grantStreamAggregatedResources` methods that grant identities that are running \nEnvoy access to stream generated config from App Mesh.\n\n```ts\nconst gateway = new VirtualGateway(stack, 'testGateway', { mesh: mesh });\nconst envoyUser = new iam.User(stack, 'envoyUser');\n\n/**\n * This will grant `grantStreamAggregatedResources` ONLY for this gateway.\n */\ngateway.grantStreamAggregatedResources(envoyUser)\n``` \n\n## Adding Resources to shared meshes\n\nA shared mesh allows resources created by different accounts to communicate with each other in the same mesh:\n\n```ts\n// This is the ARN for the mesh from different AWS IAM account ID.\n// Ensure mesh is properly shared with your account. For more details, see: https://github.com/aws/aws-cdk/issues/15404\nconst arn = 'arn:aws:appmesh:us-east-1:123456789012:mesh/testMesh';\nsharedMesh = Mesh.fromMeshArn(stack, 'imported-mesh', arn);\n\n// This VirtualNode resource can communicate with the resources in the mesh from different AWS IAM account ID.\nnew VirtualNode(stack, 'test-node', {\n mesh: sharedMesh,\n});\n```\n"
|
|
282
|
-
},
|
|
283
212
|
"targets": {
|
|
284
213
|
"dotnet": {
|
|
285
214
|
"namespace": "Amazon.CDK.AWS.AppMesh"
|
|
@@ -293,10 +222,6 @@
|
|
|
293
222
|
}
|
|
294
223
|
},
|
|
295
224
|
"aws-cdk-lib.aws_apprunner": {
|
|
296
|
-
"locationInModule": {
|
|
297
|
-
"filename": "lib/index.ts",
|
|
298
|
-
"line": 15
|
|
299
|
-
},
|
|
300
225
|
"targets": {
|
|
301
226
|
"dotnet": {
|
|
302
227
|
"namespace": "Amazon.CDK.AWS.AppRunner"
|
|
@@ -310,10 +235,6 @@
|
|
|
310
235
|
}
|
|
311
236
|
},
|
|
312
237
|
"aws-cdk-lib.aws_appstream": {
|
|
313
|
-
"locationInModule": {
|
|
314
|
-
"filename": "lib/index.ts",
|
|
315
|
-
"line": 16
|
|
316
|
-
},
|
|
317
238
|
"targets": {
|
|
318
239
|
"dotnet": {
|
|
319
240
|
"namespace": "Amazon.CDK.AWS.AppStream"
|
|
@@ -327,10 +248,6 @@
|
|
|
327
248
|
}
|
|
328
249
|
},
|
|
329
250
|
"aws-cdk-lib.aws_appsync": {
|
|
330
|
-
"locationInModule": {
|
|
331
|
-
"filename": "lib/index.ts",
|
|
332
|
-
"line": 17
|
|
333
|
-
},
|
|
334
251
|
"targets": {
|
|
335
252
|
"dotnet": {
|
|
336
253
|
"namespace": "Amazon.CDK.AWS.Appsync"
|
|
@@ -344,10 +261,6 @@
|
|
|
344
261
|
}
|
|
345
262
|
},
|
|
346
263
|
"aws-cdk-lib.aws_athena": {
|
|
347
|
-
"locationInModule": {
|
|
348
|
-
"filename": "lib/index.ts",
|
|
349
|
-
"line": 18
|
|
350
|
-
},
|
|
351
264
|
"targets": {
|
|
352
265
|
"dotnet": {
|
|
353
266
|
"namespace": "Amazon.CDK.AWS.Athena"
|
|
@@ -361,10 +274,6 @@
|
|
|
361
274
|
}
|
|
362
275
|
},
|
|
363
276
|
"aws-cdk-lib.aws_auditmanager": {
|
|
364
|
-
"locationInModule": {
|
|
365
|
-
"filename": "lib/index.ts",
|
|
366
|
-
"line": 19
|
|
367
|
-
},
|
|
368
277
|
"targets": {
|
|
369
278
|
"dotnet": {
|
|
370
279
|
"namespace": "Amazon.CDK.AWS.AuditManager"
|
|
@@ -378,13 +287,6 @@
|
|
|
378
287
|
}
|
|
379
288
|
},
|
|
380
289
|
"aws-cdk-lib.aws_autoscaling": {
|
|
381
|
-
"locationInModule": {
|
|
382
|
-
"filename": "lib/index.ts",
|
|
383
|
-
"line": 20
|
|
384
|
-
},
|
|
385
|
-
"readme": {
|
|
386
|
-
"markdown": "# Amazon EC2 Auto Scaling Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Auto Scaling Group\n\nAn `AutoScalingGroup` represents a number of instances on which you run your code. You\npick the size of the fleet, the instance type and the OS image:\n\n```ts\nimport { aws_autoscaling as autoscaling } from 'aws-cdk-lib';\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\n\nnew autoscaling.AutoScalingGroup(this, 'ASG', {\n vpc,\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO),\n machineImage: new ec2.AmazonLinuxImage() // get the latest Amazon Linux image\n});\n```\n\nNOTE: AutoScalingGroup has an property called `allowAllOutbound` (allowing the instances to contact the\ninternet) which is set to `true` by default. Be sure to set this to `false` if you don't want\nyour instances to be able to start arbitrary connections. Alternatively, you can specify an existing security\ngroup to attach to the instances that are launched, rather than have the group create a new one.\n\n```ts\nconst mySecurityGroup = new ec2.SecurityGroup(this, 'SecurityGroup', {...});\nnew autoscaling.AutoScalingGroup(this, 'ASG', {\n vpc,\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO),\n machineImage: new ec2.AmazonLinuxImage(),\n securityGroup: mySecurityGroup,\n});\n```\n\n## Machine Images (AMIs)\n\nAMIs control the OS that gets launched when you start your EC2 instance. The EC2\nlibrary contains constructs to select the AMI you want to use.\n\nDepending on the type of AMI, you select it a different way.\n\nThe latest version of Amazon Linux and Microsoft Windows images are\nselectable by instantiating one of these classes:\n\n[example of creating images](test/example.images.lit.ts)\n\n> NOTE: The Amazon Linux images selected will be cached in your `cdk.json`, so that your\n> AutoScalingGroups don't automatically change out from under you when you're making unrelated\n> changes. To update to the latest version of Amazon Linux, remove the cache entry from the `context`\n> section of your `cdk.json`.\n>\n> We will add command-line options to make this step easier in the future.\n\n## AutoScaling Instance Counts\n\nAutoScalingGroups make it possible to raise and lower the number of instances in the group,\nin response to (or in advance of) changes in workload.\n\nWhen you create your AutoScalingGroup, you specify a `minCapacity` and a\n`maxCapacity`. AutoScaling policies that respond to metrics will never go higher\nor lower than the indicated capacity (but scheduled scaling actions might, see\nbelow).\n\nThere are three ways to scale your capacity:\n\n* **In response to a metric** (also known as step scaling); for example, you\n might want to scale out if the CPU usage across your cluster starts to rise,\n and scale in when it drops again.\n* **By trying to keep a certain metric around a given value** (also known as\n target tracking scaling); you might want to automatically scale out and in to\n keep your CPU usage around 50%.\n* **On a schedule**; you might want to organize your scaling around traffic\n flows you expect, by scaling out in the morning and scaling in in the\n evening.\n\nThe general pattern of autoscaling will look like this:\n\n```ts\nconst autoScalingGroup = new autoscaling.AutoScalingGroup(this, 'ASG', {\n minCapacity: 5,\n maxCapacity: 100\n // ...\n});\n\n// Step scaling\nautoScalingGroup.scaleOnMetric(...);\n\n// Target tracking scaling\nautoScalingGroup.scaleOnCpuUtilization(...);\nautoScalingGroup.scaleOnIncomingBytes(...);\nautoScalingGroup.scaleOnOutgoingBytes(...);\nautoScalingGroup.scaleOnRequestCount(...);\nautoScalingGroup.scaleToTrackMetric(...);\n\n// Scheduled scaling\nautoScalingGroup.scaleOnSchedule(...);\n```\n\n### Step Scaling\n\nThis type of scaling scales in and out in deterministics steps that you\nconfigure, in response to metric values. For example, your scaling strategy to\nscale in response to a metric that represents your average worker pool usage\nmight look like this:\n\n```plaintext\n Scaling -1 (no change) +1 +3\n │ │ │ │ │\n ├────────┼───────────────────────┼────────┼────────┤\n │ │ │ │ │\nWorker use 0% 10% 50% 70% 100%\n```\n\n(Note that this is not necessarily a recommended scaling strategy, but it's\na possible one. You will have to determine what thresholds are right for you).\n\nNote that in order to set up this scaling strategy, you will have to emit a\nmetric representing your worker utilization from your instances. After that,\nyou would configure the scaling something like this:\n\n```ts\nconst workerUtilizationMetric = new cloudwatch.Metric({\n namespace: 'MyService',\n metricName: 'WorkerUtilization'\n});\n\ncapacity.scaleOnMetric('ScaleToCPU', {\n metric: workerUtilizationMetric,\n scalingSteps: [\n { upper: 10, change: -1 },\n { lower: 50, change: +1 },\n { lower: 70, change: +3 },\n ],\n\n // Change this to AdjustmentType.PERCENT_CHANGE_IN_CAPACITY to interpret the\n // 'change' numbers before as percentages instead of capacity counts.\n adjustmentType: autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,\n});\n```\n\nThe AutoScaling construct library will create the required CloudWatch alarms and\nAutoScaling policies for you.\n\n### Target Tracking Scaling\n\nThis type of scaling scales in and out in order to keep a metric around a value\nyou prefer. There are four types of predefined metrics you can track, or you can\nchoose to track a custom metric. If you do choose to track a custom metric,\nbe aware that the metric has to represent instance utilization in some way\n(AutoScaling will scale out if the metric is higher than the target, and scale\nin if the metric is lower than the target).\n\nIf you configure multiple target tracking policies, AutoScaling will use the\none that yields the highest capacity.\n\nThe following example scales to keep the CPU usage of your instances around\n50% utilization:\n\n```ts\nautoScalingGroup.scaleOnCpuUtilization('KeepSpareCPU', {\n targetUtilizationPercent: 50\n});\n```\n\nTo scale on average network traffic in and out of your instances:\n\n```ts\nautoScalingGroup.scaleOnIncomingBytes('LimitIngressPerInstance', {\n targetBytesPerSecond: 10 * 1024 * 1024 // 10 MB/s\n});\nautoScalingGroup.scaleOnOutcomingBytes('LimitEgressPerInstance', {\n targetBytesPerSecond: 10 * 1024 * 1024 // 10 MB/s\n});\n```\n\nTo scale on the average request count per instance (only works for\nAutoScalingGroups that have been attached to Application Load\nBalancers):\n\n```ts\nautoScalingGroup.scaleOnRequestCount('LimitRPS', {\n targetRequestsPerSecond: 1000\n});\n```\n\n### Scheduled Scaling\n\nThis type of scaling is used to change capacities based on time. It works by\nchanging `minCapacity`, `maxCapacity` and `desiredCapacity` of the\nAutoScalingGroup, and so can be used for two purposes:\n\n* Scale in and out on a schedule by setting the `minCapacity` high or\n the `maxCapacity` low.\n* Still allow the regular scaling actions to do their job, but restrict\n the range they can scale over (by setting both `minCapacity` and\n `maxCapacity` but changing their range over time).\n\nA schedule is expressed as a cron expression. The `Schedule` class has a `cron` method to help build cron expressions.\n\nThe following example scales the fleet out in the morning, going back to natural\nscaling (all the way down to 1 instance if necessary) at night:\n\n```ts\nautoScalingGroup.scaleOnSchedule('PrescaleInTheMorning', {\n schedule: autoscaling.Schedule.cron({ hour: '8', minute: '0' }),\n minCapacity: 20,\n});\n\nautoScalingGroup.scaleOnSchedule('AllowDownscalingAtNight', {\n schedule: autoscaling.Schedule.cron({ hour: '20', minute: '0' }),\n minCapacity: 1\n});\n```\n\n## Configuring Instances using CloudFormation Init\n\nIt is possible to use the CloudFormation Init mechanism to configure the\ninstances in the AutoScalingGroup. You can write files to it, run commands,\nstart services, etc. See the documentation of\n[AWS::CloudFormation::Init](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-init.html)\nand the documentation of CDK's `aws-ec2` library for more information.\n\nWhen you specify a CloudFormation Init configuration for an AutoScalingGroup:\n\n* you *must* also specify `signals` to configure how long CloudFormation\n should wait for the instances to successfully configure themselves.\n* you *should* also specify an `updatePolicy` to configure how instances\n should be updated when the AutoScalingGroup is updated (for example,\n when the AMI is updated). If you don't specify an update policy, a *rolling\n update* is chosen by default.\n\nHere's an example of using CloudFormation Init to write a file to the\ninstance hosts on startup:\n\n```ts\nnew autoscaling.AutoScalingGroup(this, 'ASG', {\n // ...\n\n init: ec2.CloudFormationInit.fromElements(\n ec2.InitFile.fromString('/etc/my_instance', 'This got written during instance startup'),\n ),\n signals: autoscaling.Signals.waitForAll({\n timeout: Duration.minutes(10),\n }),\n});\n```\n\n## Signals\n\nIn normal operation, CloudFormation will send a Create or Update command to\nan AutoScalingGroup and proceed with the rest of the deployment without waiting\nfor the *instances in the AutoScalingGroup*.\n\nConfigure `signals` to tell CloudFormation to wait for a specific number of\ninstances in the AutoScalingGroup to have been started (or failed to start)\nbefore moving on. An instance is supposed to execute the\n[`cfn-signal`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-signal.html)\nprogram as part of its startup to indicate whether it was started\nsuccessfully or not.\n\nIf you use CloudFormation Init support (described in the previous section),\nthe appropriate call to `cfn-signal` is automatically added to the\nAutoScalingGroup's UserData. If you don't use the `signals` directly, you are\nresponsible for adding such a call yourself.\n\nThe following type of `Signals` are available:\n\n* `Signals.waitForAll([options])`: wait for all of `desiredCapacity` amount of instances\n to have started (recommended).\n* `Signals.waitForMinCapacity([options])`: wait for a `minCapacity` amount of instances\n to have started (use this if waiting for all instances takes too long and you are happy\n with a minimum count of healthy hosts).\n* `Signals.waitForCount(count, [options])`: wait for a specific amount of instances to have\n started.\n\nThere are two `options` you can configure:\n\n* `timeout`: maximum time a host startup is allowed to take. If a host does not report\n success within this time, it is considered a failure. Default is 5 minutes.\n* `minSuccessPercentage`: percentage of hosts that needs to be healthy in order for the\n update to succeed. If you set this value lower than 100, some percentage of hosts may\n report failure, while still considering the deployment a success. Default is 100%.\n\n## Update Policy\n\nThe *update policy* describes what should happen to running instances when the definition\nof the AutoScalingGroup is changed. For example, if you add a command to the UserData\nof an AutoScalingGroup, do the existing instances get replaced with new instances that\nhave executed the new UserData? Or do the \"old\" instances just keep on running?\n\nIt is recommended to always use an update policy, otherwise the current state of your\ninstances also depends the previous state of your instances, rather than just on your\nsource code. This degrades the reproducibility of your deployments.\n\nThe following update policies are available:\n\n* `UpdatePolicy.none()`: leave existing instances alone (not recommended).\n* `UpdatePolicy.rollingUpdate([options])`: progressively replace the existing\n instances with new instances, in small batches. At any point in time,\n roughly the same amount of total instances will be running. If the deployment\n needs to be rolled back, the fresh instances will be replaced with the \"old\"\n configuration again.\n* `UpdatePolicy.replacingUpdate([options])`: build a completely fresh copy\n of the new AutoScalingGroup next to the old one. Once the AutoScalingGroup\n has been successfully created (and the instances started, if `signals` is\n configured on the AutoScalingGroup), the old AutoScalingGroup is deleted.\n If the deployment needs to be rolled back, the new AutoScalingGroup is\n deleted and the old one is left unchanged.\n\n## Allowing Connections\n\nSee the documentation of the `@aws-cdk/aws-ec2` package for more information\nabout allowing connections between resources backed by instances.\n\n## Max Instance Lifetime\n\nTo enable the max instance lifetime support, specify `maxInstanceLifetime` property\nfor the `AutoscalingGroup` resource. The value must be between 7 and 365 days(inclusive).\nTo clear a previously set value, leave this property undefined.\n\n## Instance Monitoring\n\nTo disable detailed instance monitoring, specify `instanceMonitoring` property\nfor the `AutoscalingGroup` resource as `Monitoring.BASIC`. Otherwise detailed monitoring\nwill be enabled.\n\n## Monitoring Group Metrics\n\nGroup metrics are used to monitor group level properties; they describe the group rather than any of its instances (e.g GroupMaxSize, the group maximum size). To enable group metrics monitoring, use the `groupMetrics` property.\nAll group metrics are reported in a granularity of 1 minute at no additional charge.\n\nSee [EC2 docs](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-monitoring.html#as-group-metrics) for a list of all available group metrics.\n\nTo enable group metrics monitoring using the `groupMetrics` property:\n\n```ts\n// Enable monitoring of all group metrics\nnew autoscaling.AutoScalingGroup(stack, 'ASG', {\n groupMetrics: [GroupMetrics.all()],\n // ...\n});\n\n// Enable monitoring for a subset of group metrics\nnew autoscaling.AutoScalingGroup(stack, 'ASG', {\n groupMetrics: [new autoscaling.GroupMetrics(GroupMetric.MIN_SIZE, GroupMetric.MAX_SIZE)],\n // ...\n});\n```\n\n## Protecting new instances from being terminated on scale-in\n\nBy default, Auto Scaling can terminate an instance at any time after launch when\nscaling in an Auto Scaling Group, subject to the group's [termination\npolicy](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html).\n\nHowever, you may wish to protect newly-launched instances from being scaled in\nif they are going to run critical applications that should not be prematurely\nterminated. EC2 Capacity Providers for Amazon ECS requires this attribute be\nset to `true`.\n\n```ts\nnew autoscaling.AutoScalingGroup(stack, 'ASG', {\n newInstancesProtectedFromScaleIn: true,\n // ...\n});\n```\n\n## Future work\n\n* [ ] CloudWatch Events (impossible to add currently as the AutoScalingGroup ARN is\n necessary to make this rule and this cannot be accessed from CloudFormation).\n"
|
|
387
|
-
},
|
|
388
290
|
"targets": {
|
|
389
291
|
"dotnet": {
|
|
390
292
|
"namespace": "Amazon.CDK.AWS.AutoScaling"
|
|
@@ -398,13 +300,6 @@
|
|
|
398
300
|
}
|
|
399
301
|
},
|
|
400
302
|
"aws-cdk-lib.aws_autoscaling_common": {
|
|
401
|
-
"locationInModule": {
|
|
402
|
-
"filename": "lib/index.ts",
|
|
403
|
-
"line": 21
|
|
404
|
-
},
|
|
405
|
-
"readme": {
|
|
406
|
-
"markdown": "# AWS AutoScaling Common Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis is a sister package to `@aws-cdk/aws-autoscaling` and\n`@aws-cdk/aws-applicationautoscaling`. It contains shared implementation\ndetails between them.\n\nIt does not need to be used directly.\n"
|
|
407
|
-
},
|
|
408
303
|
"targets": {
|
|
409
304
|
"dotnet": {
|
|
410
305
|
"namespace": "Amazon.CDK.AWS.AutoScaling.Common"
|
|
@@ -418,13 +313,6 @@
|
|
|
418
313
|
}
|
|
419
314
|
},
|
|
420
315
|
"aws-cdk-lib.aws_autoscaling_hooktargets": {
|
|
421
|
-
"locationInModule": {
|
|
422
|
-
"filename": "lib/index.ts",
|
|
423
|
-
"line": 22
|
|
424
|
-
},
|
|
425
|
-
"readme": {
|
|
426
|
-
"markdown": "# Lifecycle Hook for the CDK AWS AutoScaling Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library contains integration classes for AutoScaling lifecycle hooks.\nInstances of these classes should be passed to the\n`autoScalingGroup.addLifecycleHook()` method.\n\nLifecycle hooks can be activated in one of the following ways:\n\n* Invoke a Lambda function\n* Publish to an SNS topic\n* Send to an SQS queue\n\nFor more information on using this library, see the README of the\n`@aws-cdk/aws-autoscaling` library.\n\nFor more information about lifecycle hooks, see\n[Amazon EC2 AutoScaling Lifecycle hooks](https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) in the Amazon EC2 User Guide.\n"
|
|
427
|
-
},
|
|
428
316
|
"targets": {
|
|
429
317
|
"dotnet": {
|
|
430
318
|
"namespace": "Amazon.CDK.AWS.AutoScaling.HookTargets"
|
|
@@ -438,10 +326,6 @@
|
|
|
438
326
|
}
|
|
439
327
|
},
|
|
440
328
|
"aws-cdk-lib.aws_autoscalingplans": {
|
|
441
|
-
"locationInModule": {
|
|
442
|
-
"filename": "lib/index.ts",
|
|
443
|
-
"line": 23
|
|
444
|
-
},
|
|
445
329
|
"targets": {
|
|
446
330
|
"dotnet": {
|
|
447
331
|
"namespace": "Amazon.CDK.AWS.AutoScalingPlans"
|
|
@@ -455,13 +339,6 @@
|
|
|
455
339
|
}
|
|
456
340
|
},
|
|
457
341
|
"aws-cdk-lib.aws_backup": {
|
|
458
|
-
"locationInModule": {
|
|
459
|
-
"filename": "lib/index.ts",
|
|
460
|
-
"line": 24
|
|
461
|
-
},
|
|
462
|
-
"readme": {
|
|
463
|
-
"markdown": "# AWS Backup Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAWS Backup is a fully managed backup service that makes it easy to centralize and automate the backup of data across AWS services in the cloud and on premises. Using AWS Backup, you can configure backup policies and monitor backup activity for your AWS resources in one place.\n\n## Backup plan and selection\n\nIn AWS Backup, a *backup plan* is a policy expression that defines when and how you want to back up your AWS resources, such as Amazon DynamoDB tables or Amazon Elastic File System (Amazon EFS) file systems. You can assign resources to backup plans, and AWS Backup automatically backs up and retains backups for those resources according to the backup plan. You can create multiple backup plans if you have workloads with different backup requirements.\n\nThis module provides ready-made backup plans (similar to the console experience):\n\n```ts\n// Daily, weekly and monthly with 5 year retention\nconst plan = backup.BackupPlan.dailyWeeklyMonthly5YearRetention(this, 'Plan');\n```\n\nAssigning resources to a plan can be done with `addSelection()`:\n\n```ts fixture=with-plan\nconst myTable = dynamodb.Table.fromTableName(this, 'Table', 'myTableName');\nconst myCoolConstruct = new Construct(this, 'MyCoolConstruct');\n\nplan.addSelection('Selection', {\n resources: [\n backup.BackupResource.fromDynamoDbTable(myTable), // A DynamoDB table\n backup.BackupResource.fromTag('stage', 'prod'), // All resources that are tagged stage=prod in the region/account\n backup.BackupResource.fromConstruct(myCoolConstruct), // All backupable resources in `myCoolConstruct`\n ]\n})\n```\n\nIf not specified, a new IAM role with a managed policy for backup will be\ncreated for the selection. The `BackupSelection` implements `IGrantable`.\n\nTo add rules to a plan, use `addRule()`:\n\n```ts fixture=with-plan\nplan.addRule(new backup.BackupPlanRule({\n completionWindow: Duration.hours(2),\n startWindow: Duration.hours(1),\n scheduleExpression: events.Schedule.cron({ // Only cron expressions are supported\n day: '15',\n hour: '3',\n minute: '30'\n }),\n moveToColdStorageAfter: Duration.days(30)\n}));\n```\n\nReady-made rules are also available:\n\n```ts fixture=with-plan\nplan.addRule(backup.BackupPlanRule.daily());\nplan.addRule(backup.BackupPlanRule.weekly());\n```\n\nBy default a new [vault](#Backup-vault) is created when creating a plan.\nIt is also possible to specify a vault either at the plan level or at the\nrule level.\n\n```ts\nconst myVault = backup.BackupVault.fromBackupVaultName(this, 'Vault1', 'myVault');\nconst otherVault = backup.BackupVault.fromBackupVaultName(this, 'Vault2', 'otherVault');\n\nconst plan = backup.BackupPlan.daily35DayRetention(this, 'Plan', myVault); // Use `myVault` for all plan rules\nplan.addRule(backup.BackupPlanRule.monthly1Year(otherVault)); // Use `otherVault` for this specific rule\n```\n\n## Backup vault\n\nIn AWS Backup, a *backup vault* is a container that you organize your backups in. You can use backup vaults to set the AWS Key Management Service (AWS KMS) encryption key that is used to encrypt backups in the backup vault and to control access to the backups in the backup vault. If you require different encryption keys or access policies for different groups of backups, you can optionally create multiple backup vaults.\n\n```ts\nconst myKey = kms.Key.fromKeyArn(this, 'MyKey', 'aaa');\nconst myTopic = sns.Topic.fromTopicArn(this, 'MyTopic', 'bbb');\n\nconst vault = new backup.BackupVault(this, 'Vault', {\n encryptionKey: myKey, // Custom encryption key\n notificationTopic: myTopic, // Send all vault events to this SNS topic\n});\n```\n\nA vault has a default `RemovalPolicy` set to `RETAIN`. Note that removing a vault\nthat contains recovery points will fail.\n\n\n## Importing existing backup vault\n\nTo import an existing backup vault into your CDK application, use the `BackupVault.fromBackupVaultArn` or `BackupVault.fromBackupVaultName` \nstatic method. Here is an example of giving an IAM Role permission to start a backup job:\n\n```ts\nconst importedVault = backup.BackupVault.fromBackupVaultName(this, 'Vault', 'myVaultName');\n\nconst role = new iam.Role(this, 'Access Role', { assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com') });\n\nimportedVault.grant(role, 'backup:StartBackupJob');\n```\n"
|
|
464
|
-
},
|
|
465
342
|
"targets": {
|
|
466
343
|
"dotnet": {
|
|
467
344
|
"namespace": "Amazon.CDK.AWS.Backup"
|
|
@@ -475,10 +352,6 @@
|
|
|
475
352
|
}
|
|
476
353
|
},
|
|
477
354
|
"aws-cdk-lib.aws_batch": {
|
|
478
|
-
"locationInModule": {
|
|
479
|
-
"filename": "lib/index.ts",
|
|
480
|
-
"line": 25
|
|
481
|
-
},
|
|
482
355
|
"targets": {
|
|
483
356
|
"dotnet": {
|
|
484
357
|
"namespace": "Amazon.CDK.AWS.Batch"
|
|
@@ -492,10 +365,6 @@
|
|
|
492
365
|
}
|
|
493
366
|
},
|
|
494
367
|
"aws-cdk-lib.aws_budgets": {
|
|
495
|
-
"locationInModule": {
|
|
496
|
-
"filename": "lib/index.ts",
|
|
497
|
-
"line": 26
|
|
498
|
-
},
|
|
499
368
|
"targets": {
|
|
500
369
|
"dotnet": {
|
|
501
370
|
"namespace": "Amazon.CDK.AWS.Budgets"
|
|
@@ -509,10 +378,6 @@
|
|
|
509
378
|
}
|
|
510
379
|
},
|
|
511
380
|
"aws-cdk-lib.aws_cassandra": {
|
|
512
|
-
"locationInModule": {
|
|
513
|
-
"filename": "lib/index.ts",
|
|
514
|
-
"line": 27
|
|
515
|
-
},
|
|
516
381
|
"targets": {
|
|
517
382
|
"dotnet": {
|
|
518
383
|
"namespace": "Amazon.CDK.AWS.Cassandra"
|
|
@@ -526,10 +391,6 @@
|
|
|
526
391
|
}
|
|
527
392
|
},
|
|
528
393
|
"aws-cdk-lib.aws_ce": {
|
|
529
|
-
"locationInModule": {
|
|
530
|
-
"filename": "lib/index.ts",
|
|
531
|
-
"line": 28
|
|
532
|
-
},
|
|
533
394
|
"targets": {
|
|
534
395
|
"dotnet": {
|
|
535
396
|
"namespace": "Amazon.CDK.AWS.CE"
|
|
@@ -543,13 +404,6 @@
|
|
|
543
404
|
}
|
|
544
405
|
},
|
|
545
406
|
"aws-cdk-lib.aws_certificatemanager": {
|
|
546
|
-
"locationInModule": {
|
|
547
|
-
"filename": "lib/index.ts",
|
|
548
|
-
"line": 29
|
|
549
|
-
},
|
|
550
|
-
"readme": {
|
|
551
|
-
"markdown": "# AWS Certificate Manager Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\nAWS Certificate Manager (ACM) handles the complexity of creating, storing, and renewing public and private SSL/TLS X.509 certificates and keys that\nprotect your AWS websites and applications. ACM certificates can secure singular domain names, multiple specific domain names, wildcard domains, or\ncombinations of these. ACM wildcard certificates can protect an unlimited number of subdomains.\n\nThis package provides Constructs for provisioning and referencing ACM certificates which can be used with CloudFront and ELB.\n\nAfter requesting a certificate, you will need to prove that you own the\ndomain in question before the certificate will be granted. The CloudFormation\ndeployment will wait until this verification process has been completed.\n\nBecause of this wait time, when using manual validation methods, it's better\nto provision your certificates either in a separate stack from your main\nservice, or provision them manually and import them into your CDK application.\n\n**Note:** There is a limit on total number of ACM certificates that can be requested on an account and region within a year.\nThe default limit is 2000, but this limit may be (much) lower on new AWS accounts.\nSee https://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html for more information.\n\n## DNS validation\n\nDNS validation is the preferred method to validate domain ownership, as it has a number of advantages over email validation.\nSee also [Validate with DNS](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-dns.html)\nin the AWS Certificate Manager User Guide.\n\nIf Amazon Route 53 is your DNS provider for the requested domain, the DNS record can be\ncreated automatically:\n\n```ts\nimport { aws_certificatemanager as acm } from 'aws-cdk-lib';\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nconst myHostedZone = new route53.HostedZone(this, 'HostedZone', {\n zoneName: 'example.com',\n});\nnew acm.Certificate(this, 'Certificate', {\n domainName: 'hello.example.com',\n validation: acm.CertificateValidation.fromDns(myHostedZone),\n});\n```\n\nIf Route 53 is not your DNS provider, the DNS records must be added manually and the stack will not complete\ncreating until the records are added.\n\n```ts\nnew acm.Certificate(this, 'Certificate', {\n domainName: 'hello.example.com',\n validation: acm.CertificateValidation.fromDns(), // Records must be added manually\n});\n```\n\nWhen working with multiple domains, use the `CertificateValidation.fromDnsMultiZone()`:\n\n```ts\nconst exampleCom = new route53.HostedZone(this, 'ExampleCom', {\n zoneName: 'example.com',\n});\nconst exampleNet = new route53.HostedZone(this, 'ExampleNet', {\n zoneName: 'example.net',\n});\n\nconst cert = new acm.Certificate(this, 'Certificate', {\n domainName: 'test.example.com',\n subjectAlternativeNames: ['cool.example.com', 'test.example.net'],\n validation: acm.CertificateValidation.fromDnsMultiZone({\n 'test.example.com': exampleCom,\n 'cool.example.com': exampleCom,\n 'test.example.net': exampleNet,\n }),\n});\n```\n\n## Email validation\n\nEmail-validated certificates (the default) are validated by receiving an\nemail on one of a number of predefined domains and following the instructions\nin the email.\n\nSee [Validate with Email](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-email.html)\nin the AWS Certificate Manager User Guide.\n\n```ts\nnew acm.Certificate(this, 'Certificate', {\n domainName: 'hello.example.com',\n validation: acm.CertificateValidation.fromEmail(), // Optional, this is the default\n});\n```\n\n## Cross-region Certificates\n\nACM certificates that are used with CloudFront -- or higher-level constructs which rely on CloudFront -- must be in the `us-east-1` region.\nThe `DnsValidatedCertificate` construct exists to facilitate creating these certificates cross-region. This resource can only be used with\nRoute53-based DNS validation.\n\n```ts\nnew acm.DnsValidatedCertificate(this, 'CrossRegionCertificate', {\n domainName: 'hello.example.com',\n hostedZone: myHostedZone,\n region: 'us-east-1',\n});\n```\n\n## Importing\n\nIf you want to import an existing certificate, you can do so from its ARN:\n\n```ts\nconst arn = 'arn:aws:...';\nconst certificate = Certificate.fromCertificateArn(this, 'Certificate', arn);\n```\n\n## Sharing between Stacks\n\nTo share the certificate between stacks in the same CDK application, simply\npass the `Certificate` object between the stacks.\n\n## Metrics\n\nThe `DaysToExpiry` metric is available via the `metricDaysToExpiry` method for\nall certificates. This metric is emitted by AWS Certificates Manager once per\nday until the certificate has effectively expired.\n\nAn alarm can be created to determine whether a certificate is soon due for\nrenewal ussing the following code:\n\n```ts\nconst certificate = new Certificate(this, 'Certificate', { /* ... */ });\ncertificate.metricDaysToExpiry().createAlarm({\n comparisonOperator: cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD,\n evaluationPeriods: 1,\n threshold: 45, // Automatic rotation happens between 60 and 45 days before expiry\n});\n```\n"
|
|
552
|
-
},
|
|
553
407
|
"targets": {
|
|
554
408
|
"dotnet": {
|
|
555
409
|
"namespace": "Amazon.CDK.AWS.CertificateManager"
|
|
@@ -563,13 +417,6 @@
|
|
|
563
417
|
}
|
|
564
418
|
},
|
|
565
419
|
"aws-cdk-lib.aws_chatbot": {
|
|
566
|
-
"locationInModule": {
|
|
567
|
-
"filename": "lib/index.ts",
|
|
568
|
-
"line": 30
|
|
569
|
-
},
|
|
570
|
-
"readme": {
|
|
571
|
-
"markdown": "# AWS::Chatbot Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAWS Chatbot is an AWS service that enables DevOps and software development teams to use Slack chat rooms to monitor and respond to operational events in their AWS Cloud. AWS Chatbot processes AWS service notifications from Amazon Simple Notification Service (Amazon SNS), and forwards them to Slack chat rooms so teams can analyze and act on them immediately, regardless of location.\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n```ts\nimport { aws_chatbot as chatbot } from 'aws-cdk-lib';\n\nconst slackChannel = new chatbot.SlackChannelConfiguration(this, 'MySlackChannel', {\n slackChannelConfigurationName: 'YOUR_CHANNEL_NAME',\n slackWorkspaceId: 'YOUR_SLACK_WORKSPACE_ID',\n slackChannelId: 'YOUR_SLACK_CHANNEL_ID',\n});\n\nslackChannel.addToRolePolicy(new iam.PolicyStatement({\n effect: iam.Effect.ALLOW,\n actions: [\n 's3:GetObject',\n ],\n resources: ['arn:aws:s3:::abc/xyz/123.txt'],\n}));\n```\n\n## Log Group\n\nSlack channel configuration automatically create a log group with the name `/aws/chatbot/<configuration-name>` in `us-east-1` upon first execution with\nlog data set to never expire.\n\nThe `logRetention` property can be used to set a different expiration period. A log group will be created if not already exists.\nIf the log group already exists, it's expiration will be configured to the value specified in this construct (never expire, by default).\n\nBy default, CDK uses the AWS SDK retry options when interacting with the log group. The `logRetentionRetryOptions` property\nallows you to customize the maximum number of retries and base backoff duration.\n\n*Note* that, if `logRetention` is set, a [CloudFormation custom\nresource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added\nto the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the\ncorrect log retention period (never expire, by default).\n"
|
|
572
|
-
},
|
|
573
420
|
"targets": {
|
|
574
421
|
"dotnet": {
|
|
575
422
|
"namespace": "Amazon.CDK.AWS.Chatbot"
|
|
@@ -583,10 +430,6 @@
|
|
|
583
430
|
}
|
|
584
431
|
},
|
|
585
432
|
"aws-cdk-lib.aws_cloud9": {
|
|
586
|
-
"locationInModule": {
|
|
587
|
-
"filename": "lib/index.ts",
|
|
588
|
-
"line": 31
|
|
589
|
-
},
|
|
590
433
|
"targets": {
|
|
591
434
|
"dotnet": {
|
|
592
435
|
"namespace": "Amazon.CDK.AWS.Cloud9"
|
|
@@ -600,13 +443,6 @@
|
|
|
600
443
|
}
|
|
601
444
|
},
|
|
602
445
|
"aws-cdk-lib.aws_cloudformation": {
|
|
603
|
-
"locationInModule": {
|
|
604
|
-
"filename": "lib/index.ts",
|
|
605
|
-
"line": 32
|
|
606
|
-
},
|
|
607
|
-
"readme": {
|
|
608
|
-
"markdown": "# AWS CloudFormation Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n> This API may emit warnings. Backward compatibility is not guaranteed.\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n"
|
|
609
|
-
},
|
|
610
446
|
"targets": {
|
|
611
447
|
"dotnet": {
|
|
612
448
|
"namespace": "Amazon.CDK.AWS.CloudFormation"
|
|
@@ -620,13 +456,6 @@
|
|
|
620
456
|
}
|
|
621
457
|
},
|
|
622
458
|
"aws-cdk-lib.aws_cloudfront": {
|
|
623
|
-
"locationInModule": {
|
|
624
|
-
"filename": "lib/index.ts",
|
|
625
|
-
"line": 33
|
|
626
|
-
},
|
|
627
|
-
"readme": {
|
|
628
|
-
"markdown": "# Amazon CloudFront Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAmazon CloudFront is a web service that speeds up distribution of your static and dynamic web content, such as .html, .css, .js, and image files, to\nyour users. CloudFront delivers your content through a worldwide network of data centers called edge locations. When a user requests content that\nyou're serving with CloudFront, the user is routed to the edge location that provides the lowest latency, so that content is delivered with the best\npossible performance.\n\n## Distribution API\n\nThe `Distribution` API is currently being built to replace the existing `CloudFrontWebDistribution` API. The `Distribution` API is optimized for the\nmost common use cases of CloudFront distributions (e.g., single origin and behavior, few customizations) while still providing the ability for more\nadvanced use cases. The API focuses on simplicity for the common use cases, and convenience methods for creating the behaviors and origins necessary\nfor more complex use cases.\n\n### Creating a distribution\n\nCloudFront distributions deliver your content from one or more origins; an origin is the location where you store the original version of your\ncontent. Origins can be created from S3 buckets or a custom origin (HTTP server). Constructs to define origins are in the `@aws-cdk/aws-cloudfront-origins` module.\n\nEach distribution has a default behavior which applies to all requests to that distribution, and routes requests to a primary origin.\nAdditional behaviors may be specified for an origin with a given URL path pattern. Behaviors allow routing with multiple origins,\ncontrolling which HTTP methods to support, whether to require users to use HTTPS, and what query strings or cookies to forward to your origin,\namong other settings.\n\n#### From an S3 Bucket\n\nAn S3 bucket can be added as an origin. If the bucket is configured as a website endpoint, the distribution can use S3 redirects and S3 custom error\ndocuments.\n\n```ts\nimport { aws_cloudfront as cloudfront } from 'aws-cdk-lib';\nimport { aws_cloudfront_origins as origins } from 'aws-cdk-lib';\n\n// Creates a distribution for a S3 bucket.\nconst myBucket = new s3.Bucket(this, 'myBucket');\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.S3Origin(myBucket) },\n});\n```\n\nThe above will treat the bucket differently based on if `IBucket.isWebsite` is set or not. If the bucket is configured as a website, the bucket is\ntreated as an HTTP origin, and the built-in S3 redirects and error pages can be used. Otherwise, the bucket is handled as a bucket origin and\nCloudFront's redirect and error handling will be used. In the latter case, the Origin will create an origin access identity and grant it access to the\nunderlying bucket. This can be used in conjunction with a bucket that is not public to require that your users access your content using CloudFront\nURLs and not S3 URLs directly.\n\n#### ELBv2 Load Balancer\n\nAn Elastic Load Balancing (ELB) v2 load balancer may be used as an origin. In order for a load balancer to serve as an origin, it must be publicly\naccessible (`internetFacing` is true). Both Application and Network load balancers are supported.\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\n\nconst vpc = new ec2.Vpc(...);\n// Create an application load balancer in a VPC. 'internetFacing' must be 'true'\n// for CloudFront to access the load balancer and use it as an origin.\nconst lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {\n vpc,\n internetFacing: true\n});\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.LoadBalancerV2Origin(lb) },\n});\n```\n\n#### From an HTTP endpoint\n\nOrigins can also be created from any other HTTP endpoint, given the domain name, and optionally, other origin properties.\n\n```ts\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.HttpOrigin('www.example.com') },\n});\n```\n\n### Domain Names and Certificates\n\nWhen you create a distribution, CloudFront assigns a domain name for the distribution, for example: `d111111abcdef8.cloudfront.net`; this value can\nbe retrieved from `distribution.distributionDomainName`. CloudFront distributions use a default certificate (`*.cloudfront.net`) to support HTTPS by\ndefault. If you want to use your own domain name, such as `www.example.com`, you must associate a certificate with your distribution that contains\nyour domain name, and provide one (or more) domain names from the certificate for the distribution.\n\nThe certificate must be present in the AWS Certificate Manager (ACM) service in the US East (N. Virginia) region; the certificate\nmay either be created by ACM, or created elsewhere and imported into ACM. When a certificate is used, the distribution will support HTTPS connections\nfrom SNI only and a minimum protocol version of TLSv1.2_2021 if the '@aws-cdk/aws-cloudfront:defaultSecurityPolicyTLSv1.2_2021' feature flag is set, and TLSv1.2_2019 otherwise. \n\n```ts\nconst myCertificate = new acm.DnsValidatedCertificate(this, 'mySiteCert', {\n domainName: 'www.example.com',\n hostedZone,\n});\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.S3Origin(myBucket) },\n domainNames: ['www.example.com'],\n certificate: myCertificate,\n});\n```\n\nHowever, you can customize the minimum protocol version for the certificate while creating the distribution using `minimumProtocolVersion` property.\n\n```ts\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.S3Origin(myBucket) },\n domainNames: ['www.example.com'],\n minimumProtocolVersion: SecurityPolicyProtocol.TLS_V1_2016\n});\n```\n\n### Multiple Behaviors & Origins\n\nEach distribution has a default behavior which applies to all requests to that distribution; additional behaviors may be specified for a\ngiven URL path pattern. Behaviors allow routing with multiple origins, controlling which HTTP methods to support, whether to require users to\nuse HTTPS, and what query strings or cookies to forward to your origin, among others.\n\nThe properties of the default behavior can be adjusted as part of the distribution creation. The following example shows configuring the HTTP\nmethods and viewer protocol policy of the cache.\n\n```ts\nconst myWebDistribution = new cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: {\n origin: new origins.S3Origin(myBucket),\n allowedMethods: AllowedMethods.ALLOW_ALL,\n viewerProtocolPolicy: ViewerProtocolPolicy.REDIRECT_TO_HTTPS,\n }\n});\n```\n\nAdditional behaviors can be specified at creation, or added after the initial creation. Each additional behavior is associated with an origin,\nand enable customization for a specific set of resources based on a URL path pattern. For example, we can add a behavior to `myWebDistribution` to\noverride the default viewer protocol policy for all of the images.\n\n```ts\nmyWebDistribution.addBehavior('/images/*.jpg', new origins.S3Origin(myBucket), {\n viewerProtocolPolicy: ViewerProtocolPolicy.REDIRECT_TO_HTTPS,\n});\n```\n\nThese behaviors can also be specified at distribution creation time.\n\n```ts\nconst bucketOrigin = new origins.S3Origin(myBucket);\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: {\n origin: bucketOrigin,\n allowedMethods: AllowedMethods.ALLOW_ALL,\n viewerProtocolPolicy: ViewerProtocolPolicy.REDIRECT_TO_HTTPS,\n },\n additionalBehaviors: {\n '/images/*.jpg': {\n origin: bucketOrigin,\n viewerProtocolPolicy: ViewerProtocolPolicy.REDIRECT_TO_HTTPS,\n },\n },\n});\n```\n\n### Customizing Cache Keys and TTLs with Cache Policies\n\nYou can use a cache policy to improve your cache hit ratio by controlling the values (URL query strings, HTTP headers, and cookies)\nthat are included in the cache key, and/or adjusting how long items remain in the cache via the time-to-live (TTL) settings.\nCloudFront provides some predefined cache policies, known as managed policies, for common use cases. You can use these managed policies,\nor you can create your own cache policy that’s specific to your needs.\nSee https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html for more details.\n\n```ts\n// Using an existing cache policy\nnew cloudfront.Distribution(this, 'myDistManagedPolicy', {\n defaultBehavior: {\n origin: bucketOrigin,\n cachePolicy: cloudfront.CachePolicy.CACHING_OPTIMIZED,\n },\n});\n\n// Creating a custom cache policy -- all parameters optional\nconst myCachePolicy = new cloudfront.CachePolicy(this, 'myCachePolicy', {\n cachePolicyName: 'MyPolicy',\n comment: 'A default policy',\n defaultTtl: Duration.days(2),\n minTtl: Duration.minutes(1),\n maxTtl: Duration.days(10),\n cookieBehavior: cloudfront.CacheCookieBehavior.all(),\n headerBehavior: cloudfront.CacheHeaderBehavior.allowList('X-CustomHeader'),\n queryStringBehavior: cloudfront.CacheQueryStringBehavior.denyList('username'),\n enableAcceptEncodingGzip: true,\n enableAcceptEncodingBrotli: true,\n});\nnew cloudfront.Distribution(this, 'myDistCustomPolicy', {\n defaultBehavior: {\n origin: bucketOrigin,\n cachePolicy: myCachePolicy,\n },\n});\n```\n\n### Customizing Origin Requests with Origin Request Policies\n\nWhen CloudFront makes a request to an origin, the URL path, request body (if present), and a few standard headers are included.\nOther information from the viewer request, such as URL query strings, HTTP headers, and cookies, is not included in the origin request by default.\nYou can use an origin request policy to control the information that’s included in an origin request.\nCloudFront provides some predefined origin request policies, known as managed policies, for common use cases. You can use these managed policies,\nor you can create your own origin request policy that’s specific to your needs.\nSee https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html for more details.\n\n```ts\n// Using an existing origin request policy\nnew cloudfront.Distribution(this, 'myDistManagedPolicy', {\n defaultBehavior: {\n origin: bucketOrigin,\n originRequestPolicy: cloudfront.OriginRequestPolicy.CORS_S3_ORIGIN,\n },\n});\n// Creating a custom origin request policy -- all parameters optional\nconst myOriginRequestPolicy = new cloudfront.OriginRequestPolicy(stack, 'OriginRequestPolicy', {\n originRequestPolicyName: 'MyPolicy',\n comment: 'A default policy',\n cookieBehavior: cloudfront.OriginRequestCookieBehavior.none(),\n headerBehavior: cloudfront.OriginRequestHeaderBehavior.all('CloudFront-Is-Android-Viewer'),\n queryStringBehavior: cloudfront.OriginRequestQueryStringBehavior.allowList('username'),\n});\nnew cloudfront.Distribution(this, 'myDistCustomPolicy', {\n defaultBehavior: {\n origin: bucketOrigin,\n cachePolicy: myCachePolicy,\n originRequestPolicy: myOriginRequestPolicy,\n },\n});\n```\n\n### Validating signed URLs or signed cookies with Trusted Key Groups\n\nCloudFront Distribution now supports validating signed URLs or signed cookies using key groups. When a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed cookies for all requests that match the cache behavior.\n\nExample:\n\n```ts\n// public key in PEM format\nconst pubKey = new PublicKey(stack, 'MyPubKey', {\n encodedKey: publicKey,\n});\n\nconst keyGroup = new KeyGroup(stack, 'MyKeyGroup', {\n items: [\n pubKey,\n ],\n});\n\nnew cloudfront.Distribution(stack, 'Dist', {\n defaultBehavior: {\n origin: new origins.HttpOrigin('www.example.com'),\n trustedKeyGroups: [\n keyGroup,\n ],\n },\n});\n```\n\n### Lambda@Edge\n\nLambda@Edge is an extension of AWS Lambda, a compute service that lets you execute functions that customize the content that CloudFront delivers.\nYou can author Node.js or Python functions in the US East (N. Virginia) region,\nand then execute them in AWS locations globally that are closer to the viewer,\nwithout provisioning or managing servers.\nLambda@Edge functions are associated with a specific behavior and event type.\nLambda@Edge can be used to rewrite URLs,\nalter responses based on headers or cookies,\nor authorize requests based on headers or authorization tokens.\n\nThe following shows a Lambda@Edge function added to the default behavior and triggered on every request:\n\n```ts\nconst myFunc = new cloudfront.experimental.EdgeFunction(this, 'MyFunction', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n});\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: {\n origin: new origins.S3Origin(myBucket),\n edgeLambdas: [\n {\n functionVersion: myFunc.currentVersion,\n eventType: cloudfront.LambdaEdgeEventType.VIEWER_REQUEST,\n }\n ],\n },\n});\n```\n\n> **Note:** Lambda@Edge functions must be created in the `us-east-1` region, regardless of the region of the CloudFront distribution and stack.\n> To make it easier to request functions for Lambda@Edge, the `EdgeFunction` construct can be used.\n> The `EdgeFunction` construct will automatically request a function in `us-east-1`, regardless of the region of the current stack.\n> `EdgeFunction` has the same interface as `Function` and can be created and used interchangeably.\n> Please note that using `EdgeFunction` requires that the `us-east-1` region has been bootstrapped.\n> See https://docs.aws.amazon.com/cdk/latest/guide/bootstrapping.html for more about bootstrapping regions.\n\nIf the stack is in `us-east-1`, a \"normal\" `lambda.Function` can be used instead of an `EdgeFunction`.\n\n```ts\nconst myFunc = new lambda.Function(this, 'MyFunction', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n});\n```\n\nIf the stack is not in `us-east-1`, and you need references from different applications on the same account,\nyou can also set a specific stack ID for each Lambda@Edge.\n\n```ts\nconst myFunc1 = new cloudfront.experimental.EdgeFunction(this, 'MyFunction1', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromAsset(path.join(__dirname, 'lambda-handler1')),\n stackId: 'edge-lambda-stack-id-1'\n});\n\nconst myFunc2 = new cloudfront.experimental.EdgeFunction(this, 'MyFunction2', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromAsset(path.join(__dirname, 'lambda-handler2')),\n stackId: 'edge-lambda-stack-id-2'\n});\n```\n\nLambda@Edge functions can also be associated with additional behaviors,\neither at or after Distribution creation time.\n\n```ts\n// assigning at Distribution creation\nconst myOrigin = new origins.S3Origin(myBucket);\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: myOrigin },\n additionalBehaviors: {\n 'images/*': {\n origin: myOrigin,\n edgeLambdas: [\n {\n functionVersion: myFunc.currentVersion,\n eventType: cloudfront.LambdaEdgeEventType.ORIGIN_REQUEST,\n includeBody: true, // Optional - defaults to false\n },\n ],\n },\n },\n});\n\n// assigning after creation\nmyDistribution.addBehavior('images/*', myOrigin, {\n edgeLambdas: [\n {\n functionVersion: myFunc.currentVersion,\n eventType: cloudfront.LambdaEdgeEventType.VIEWER_RESPONSE,\n },\n ],\n});\n```\n\nAdding an existing Lambda@Edge function created in a different stack to a CloudFront distribution.\n\n```ts\nconst functionVersion = lambda.Version.fromVersionArn(this, 'Version', 'arn:aws:lambda:us-east-1:123456789012:function:functionName:1');\n\nnew cloudfront.Distribution(this, 'distro', {\n defaultBehavior: {\n origin: new origins.S3Origin(s3Bucket),\n edgeLambdas: [\n {\n functionVersion,\n eventType: cloudfront.LambdaEdgeEventType.VIEWER_REQUEST\n },\n ],\n },\n});\n```\n\n### CloudFront Function\n\nYou can also deploy CloudFront functions and add them to a CloudFront distribution.\n\n```ts\nconst cfFunction = new cloudfront.Function(stack, 'Function', {\n code: cloudfront.FunctionCode.fromInline('function handler(event) { return event.request }'),\n});\n\nnew cloudfront.Distribution(stack, 'distro', {\n defaultBehavior: {\n origin: new origins.S3Origin(s3Bucket),\n functionAssociations: [{\n function: cfFunction,\n eventType: cloudfront.FunctionEventType.VIEWER_REQUEST,\n }],\n },\n});\n```\n\nIt will auto-generate the name of the function and deploy it to the `live` stage.\n\nAdditionally, you can load the function's code from a file using the `FunctionCode.fromFile()` method.\n\n### Logging\n\nYou can configure CloudFront to create log files that contain detailed information about every user request that CloudFront receives.\nThe logs can go to either an existing bucket, or a bucket will be created for you.\n\n```ts\n// Simplest form - creates a new bucket and logs to it.\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.HttpOrigin('www.example.com') },\n enableLogging: true,\n});\n\n// You can optionally log to a specific bucket, configure whether cookies are logged, and give the log files a prefix.\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.HttpOrigin('www.example.com') },\n enableLogging: true, // Optional, this is implied if logBucket is specified\n logBucket: new s3.Bucket(this, 'LogBucket'),\n logFilePrefix: 'distribution-access-logs/',\n logIncludesCookies: true,\n});\n```\n\n### Importing Distributions\n\nExisting distributions can be imported as well; note that like most imported constructs, an imported distribution cannot be modified.\nHowever, it can be used as a reference for other higher-level constructs.\n\n```ts\nconst distribution = cloudfront.Distribution.fromDistributionAttributes(scope, 'ImportedDist', {\n domainName: 'd111111abcdef8.cloudfront.net',\n distributionId: '012345ABCDEF',\n});\n```\n\n## CloudFrontWebDistribution API\n\n> The `CloudFrontWebDistribution` construct is the original construct written for working with CloudFront distributions.\n> Users are encouraged to use the newer `Distribution` instead, as it has a simpler interface and receives new features faster.\n\nExample usage:\n\n```ts\nconst sourceBucket = new Bucket(this, 'Bucket');\n\nconst distribution = new CloudFrontWebDistribution(this, 'MyDistribution', {\n originConfigs: [\n {\n s3OriginSource: {\n s3BucketSource: sourceBucket\n },\n behaviors : [ {isDefaultBehavior: true}]\n }\n ]\n });\n```\n\n### Viewer certificate\n\nBy default, CloudFront Web Distributions will answer HTTPS requests with CloudFront's default certificate, only containing the distribution `domainName` (e.g. d111111abcdef8.cloudfront.net).\nYou can customize the viewer certificate property to provide a custom certificate and/or list of domain name aliases to fit your needs.\n\nSee [Using Alternate Domain Names and HTTPS](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-alternate-domain-names.html) in the CloudFront User Guide.\n\n#### Default certificate\n\nYou can customize the default certificate aliases. This is intended to be used in combination with CNAME records in your DNS zone.\n\nExample:\n\n[create a distribution with an default certificate example](test/example.default-cert-alias.lit.ts)\n\n#### ACM certificate\n\nYou can change the default certificate by one stored AWS Certificate Manager, or ACM.\nThose certificate can either be generated by AWS, or purchased by another CA imported into ACM.\n\nFor more information, see [the aws-certificatemanager module documentation](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-certificatemanager-readme.html) or [Importing Certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the AWS Certificate Manager User Guide.\n\nExample:\n\n[create a distribution with an acm certificate example](test/example.acm-cert-alias.lit.ts)\n\n#### IAM certificate\n\nYou can also import a certificate into the IAM certificate store.\n\nSee [Importing an SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cnames-and-https-procedures.html#cnames-and-https-uploading-certificates) in the CloudFront User Guide.\n\nExample:\n\n[create a distribution with an iam certificate example](test/example.iam-cert-alias.lit.ts)\n\n### Trusted Key Groups\n\nCloudFront Web Distributions supports validating signed URLs or signed cookies using key groups. When a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed cookies for all requests that match the cache behavior.\n\nExample:\n\n```ts\nconst pubKey = new PublicKey(stack, 'MyPubKey', {\n encodedKey: publicKey,\n});\n\nconst keyGroup = new KeyGroup(stack, 'MyKeyGroup', {\n items: [\n pubKey,\n ],\n});\n\nnew CloudFrontWebDistribution(stack, 'AnAmazingWebsiteProbably', {\n originConfigs: [\n {\n s3OriginSource: {\n s3BucketSource: sourceBucket,\n },\n behaviors: [\n {\n isDefaultBehavior: true,\n trustedKeyGroups: [\n keyGroup,\n ],\n },\n ],\n },\n ],\n});\n```\n\n### Restrictions\n\nCloudFront supports adding restrictions to your distribution.\n\nSee [Restricting the Geographic Distribution of Your Content](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/georestrictions.html) in the CloudFront User Guide.\n\nExample:\n\n```ts\nnew cloudfront.CloudFrontWebDistribution(stack, 'MyDistribution', {\n //...\n geoRestriction: GeoRestriction.whitelist('US', 'UK')\n});\n```\n\n### Connection behaviors between CloudFront and your origin\n\nCloudFront provides you even more control over the connection behaviors between CloudFront and your origin. You can now configure the number of connection attempts CloudFront will make to your origin and the origin connection timeout for each attempt.\n\nSee [Origin Connection Attempts](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#origin-connection-attempts)\n\nSee [Origin Connection Timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#origin-connection-timeout)\n\nExample usage:\n\n```ts\nconst distribution = new CloudFrontWebDistribution(this, 'MyDistribution', {\n originConfigs: [\n {\n ...,\n connectionAttempts: 3,\n connectionTimeout: cdk.Duration.seconds(10),\n }\n ]\n});\n```\n\n#### Origin Fallback\n\nIn case the origin source is not available and answers with one of the\nspecified status code the failover origin source will be used.\n\n```ts\nnew CloudFrontWebDistribution(stack, 'ADistribution', {\n originConfigs: [\n {\n s3OriginSource: {\n s3BucketSource: s3.Bucket.fromBucketName(stack, 'aBucket', 'myoriginbucket'),\n originPath: '/',\n originHeaders: {\n 'myHeader': '42',\n },\n originShieldRegion: 'us-west-2'\n },\n failoverS3OriginSource: {\n s3BucketSource: s3.Bucket.fromBucketName(stack, 'aBucketFallback', 'myoriginbucketfallback'),\n originPath: '/somewhere',\n originHeaders: {\n 'myHeader2': '21',\n },\n originShieldRegion: 'us-east-1'\n },\n failoverCriteriaStatusCodes: [FailoverStatusCode.INTERNAL_SERVER_ERROR],\n behaviors: [\n {\n isDefaultBehavior: true,\n },\n ],\n },\n ],\n});\n```\n\n## KeyGroup & PublicKey API\n\nNow you can create a key group to use with CloudFront signed URLs and signed cookies. You can add public keys to use with CloudFront features such as signed URLs, signed cookies, and field-level encryption.\n\nThe following example command uses OpenSSL to generate an RSA key pair with a length of 2048 bits and save to the file named `private_key.pem`.\n\n```bash\nopenssl genrsa -out private_key.pem 2048\n```\n\nThe resulting file contains both the public and the private key. The following example command extracts the public key from the file named `private_key.pem` and stores it in `public_key.pem`. \n\n```bash\nopenssl rsa -pubout -in private_key.pem -out public_key.pem\n```\n\nNote: Don't forget to copy/paste the contents of `public_key.pem` file including `-----BEGIN PUBLIC KEY-----` and `-----END PUBLIC KEY-----` lines into `encodedKey` parameter when creating a `PublicKey`.\n\nExample:\n\n```ts\n new cloudfront.KeyGroup(stack, 'MyKeyGroup', {\n items: [\n new cloudfront.PublicKey(stack, 'MyPublicKey', {\n encodedKey: '...', // contents of public_key.pem file\n // comment: 'Key is expiring on ...',\n }),\n ],\n // comment: 'Key group containing public keys ...',\n });\n```\n\nSee:\n\n* https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html\n* https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html \n"
|
|
629
|
-
},
|
|
630
459
|
"targets": {
|
|
631
460
|
"dotnet": {
|
|
632
461
|
"namespace": "Amazon.CDK.AWS.CloudFront"
|
|
@@ -639,20 +468,8 @@
|
|
|
639
468
|
}
|
|
640
469
|
}
|
|
641
470
|
},
|
|
642
|
-
"aws-cdk-lib.aws_cloudfront.experimental": {
|
|
643
|
-
"locationInModule": {
|
|
644
|
-
"filename": "lib/aws-cloudfront/lib/index.ts",
|
|
645
|
-
"line": 11
|
|
646
|
-
}
|
|
647
|
-
},
|
|
471
|
+
"aws-cdk-lib.aws_cloudfront.experimental": {},
|
|
648
472
|
"aws-cdk-lib.aws_cloudfront_origins": {
|
|
649
|
-
"locationInModule": {
|
|
650
|
-
"filename": "lib/index.ts",
|
|
651
|
-
"line": 34
|
|
652
|
-
},
|
|
653
|
-
"readme": {
|
|
654
|
-
"markdown": "# CloudFront Origins for the CDK CloudFront Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library contains convenience methods for defining origins for a CloudFront distribution. You can use this library to create origins from\nS3 buckets, Elastic Load Balancing v2 load balancers, or any other domain name.\n\n## S3 Bucket\n\nAn S3 bucket can be added as an origin. If the bucket is configured as a website endpoint, the distribution can use S3 redirects and S3 custom error\ndocuments.\n\n```ts\nimport { aws_cloudfront as cloudfront } from 'aws-cdk-lib';\nimport { aws_cloudfront_origins as origins } from 'aws-cdk-lib';\n\nconst myBucket = new s3.Bucket(this, 'myBucket');\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.S3Origin(myBucket) },\n});\n```\n\nThe above will treat the bucket differently based on if `IBucket.isWebsite` is set or not. If the bucket is configured as a website, the bucket is\ntreated as an HTTP origin, and the built-in S3 redirects and error pages can be used. Otherwise, the bucket is handled as a bucket origin and\nCloudFront's redirect and error handling will be used. In the latter case, the Origin will create an origin access identity and grant it access to the\nunderlying bucket. This can be used in conjunction with a bucket that is not public to require that your users access your content using CloudFront\nURLs and not S3 URLs directly. Alternatively, a custom origin access identity can be passed to the S3 origin in the properties.\n\n### Adding Custom Headers\n\nYou can configure CloudFront to add custom headers to the requests that it sends to your origin. These custom headers enable you to send and gather information from your origin that you don’t get with typical viewer requests. These headers can even be customized for each origin. CloudFront supports custom headers for both for custom and Amazon S3 origins.\n\n```ts\nimport { aws_cloudfront as cloudfront } from 'aws-cdk-lib';\nimport { aws_cloudfront_origins as origins } from 'aws-cdk-lib';\n\nconst myBucket = new s3.Bucket(this, 'myBucket');\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.S3Origin(myBucket, {\n customHeaders: {\n Foo: 'bar',\n },\n })},\n});\n```\n\n## ELBv2 Load Balancer\n\nAn Elastic Load Balancing (ELB) v2 load balancer may be used as an origin. In order for a load balancer to serve as an origin, it must be publicly\naccessible (`internetFacing` is true). Both Application and Network load balancers are supported.\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\n\nconst vpc = new ec2.Vpc(...);\n// Create an application load balancer in a VPC. 'internetFacing' must be 'true'\n// for CloudFront to access the load balancer and use it as an origin.\nconst lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {\n vpc,\n internetFacing: true\n});\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.LoadBalancerV2Origin(lb) },\n});\n```\n\nThe origin can also be customized to respond on different ports, have different connection properties, etc.\n\n```ts\nconst origin = new origins.LoadBalancerV2Origin(loadBalancer, {\n connectionAttempts: 3,\n connectionTimeout: Duration.seconds(5),\n protocolPolicy: cloudfront.OriginProtocolPolicy.MATCH_VIEWER,\n});\n```\n\n## From an HTTP endpoint\n\nOrigins can also be created from any other HTTP endpoint, given the domain name, and optionally, other origin properties.\n\n```ts\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: { origin: new origins.HttpOrigin('www.example.com') },\n});\n```\n\nSee the documentation of `@aws-cdk/aws-cloudfront` for more information.\n\n## Failover Origins (Origin Groups)\n\nYou can set up CloudFront with origin failover for scenarios that require high availability.\nTo get started, you create an origin group with two origins: a primary and a secondary.\nIf the primary origin is unavailable, or returns specific HTTP response status codes that indicate a failure,\nCloudFront automatically switches to the secondary origin.\nYou achieve that behavior in the CDK using the `OriginGroup` class:\n\n```ts\nnew cloudfront.Distribution(this, 'myDist', {\n defaultBehavior: {\n origin: new origins.OriginGroup({\n primaryOrigin: new origins.S3Origin(myBucket),\n fallbackOrigin: new origins.HttpOrigin('www.example.com'),\n // optional, defaults to: 500, 502, 503 and 504\n fallbackStatusCodes: [404],\n }),\n },\n});\n```\n"
|
|
655
|
-
},
|
|
656
473
|
"targets": {
|
|
657
474
|
"dotnet": {
|
|
658
475
|
"namespace": "Amazon.CDK.AWS.CloudFront.Origins"
|
|
@@ -666,13 +483,6 @@
|
|
|
666
483
|
}
|
|
667
484
|
},
|
|
668
485
|
"aws-cdk-lib.aws_cloudtrail": {
|
|
669
|
-
"locationInModule": {
|
|
670
|
-
"filename": "lib/index.ts",
|
|
671
|
-
"line": 35
|
|
672
|
-
},
|
|
673
|
-
"readme": {
|
|
674
|
-
"markdown": "# AWS CloudTrail Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Trail\n\nAWS CloudTrail enables governance, compliance, and operational and risk auditing of your AWS account. Actions taken by\na user, role, or an AWS service are recorded as events in CloudTrail. Learn more at the [CloudTrail\ndocumentation](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html).\n\nThe `Trail` construct enables ongoing delivery of events as log files to an Amazon S3 bucket. Learn more about [Creating\na Trail for Your AWS Account](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-create-and-update-a-trail.html).\nThe following code creates a simple CloudTrail for your account -\n\n```ts\nconst trail = new cloudtrail.Trail(this, 'CloudTrail');\n```\n\nBy default, this will create a new S3 Bucket that CloudTrail will write to, and choose a few other reasonable defaults\nsuch as turning on multi-region and global service events. \nThe defaults for each property and how to override them are all documented on the `TrailProps` interface.\n\n## Log File Validation\n\nIn order to validate that the CloudTrail log file was not modified after CloudTrail delivered it, CloudTrail provides a\ndigital signature for each file. Learn more at [Validating CloudTrail Log File\nIntegrity](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-intro.html).\n\nThis is enabled on the `Trail` construct by default, but can be turned off by setting `enableFileValidation` to `false`.\n\n```ts\nconst trail = new cloudtrail.Trail(this, 'CloudTrail', {\n enableFileValidation: false,\n});\n```\n\n## Notifications\n\nAmazon SNS notifications can be configured upon new log files containing Trail events are delivered to S3.\nLearn more at [Configuring Amazon SNS Notifications for\nCloudTrail](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/configure-sns-notifications-for-cloudtrail.html).\nThe following code configures an SNS topic to be notified -\n\n```ts\nconst topic = new sns.Topic(this, 'TrailTopic');\nconst trail = new cloudtrail.Trail(this, 'CloudTrail', {\n snsTopic: topic,\n});\n```\n\n## Service Integrations\n\nBesides sending trail events to S3, they can also be configured to notify other AWS services -\n\n### Amazon CloudWatch Logs\n\nCloudTrail events can be delivered to a CloudWatch Logs LogGroup. By default, a new LogGroup is created with a\ndefault retention setting. The following code enables sending CloudWatch logs but specifies a particular retention\nperiod for the created Log Group.\n\n```ts\nconst trail = new cloudtrail.Trail(this, 'CloudTrail', {\n sendToCloudWatchLogs: true,\n cloudWatchLogsRetention: logs.RetentionDays.FOUR_MONTHS, \n});\n```\n\nIf you would like to use a specific log group instead, this can be configured via `cloudwatchLogGroup`.\n\n### Amazon EventBridge\n\nAmazon EventBridge rules can be configured to be triggered when CloudTrail events occur using the `Trail.onEvent()` API.\nUsing APIs available in `aws-events`, these events can be filtered to match to those that are of interest, either from\na specific service, account or time range. See [Events delivered via\nCloudTrail](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#events-for-services-not-listed)\nto learn more about the event structure for events from CloudTrail.\n\nThe following code filters events for S3 from a specific AWS account and triggers a lambda function.\n\n```ts\nconst myFunctionHandler = new lambda.Function(this, 'MyFunction', {\n code: lambda.Code.fromAsset('resource/myfunction');\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n});\n\nconst eventRule = Trail.onEvent(this, 'MyCloudWatchEvent', {\n target: new eventTargets.LambdaFunction(myFunctionHandler),\n});\n\neventRule.addEventPattern({\n account: '123456789012',\n source: 'aws.s3',\n});\n```\n\n## Multi-Region & Global Service Events\n\nBy default, a `Trail` is configured to deliver log files from multiple regions to a single S3 bucket for a given\naccount. This creates shadow trails (replication of the trails) in all of the other regions. Learn more about [How\nCloudTrail Behaves Regionally](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-regional-and-global-services)\nand about the [`IsMultiRegion`\nproperty](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudtrail-trail.html#cfn-cloudtrail-trail-ismultiregiontrail).\n\nFor most services, events are recorded in the region where the action occurred. For global services such as AWS IAM,\nAWS STS, Amazon CloudFront, Route 53, etc., events are delivered to any trail that includes global services. Learn more\n[About Global Service Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-global-service-events).\n\nEvents for global services are turned on by default for `Trail` constructs in the CDK.\n\nThe following code disables multi-region trail delivery and trail delivery for global services for a specific `Trail` -\n\n```ts\nconst trail = new cloudtrail.Trail(this, 'CloudTrail', {\n // ...\n isMultiRegionTrail: false,\n includeGlobalServiceEvents: false,\n});\n```\n\n## Events Types\n\n**Management events** provide information about management operations that are performed on resources in your AWS\naccount. These are also known as control plane operations. Learn more about [Management\nEvents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-events).\n\nBy default, a `Trail` logs all management events. However, they can be configured to either be turned off, or to only\nlog 'Read' or 'Write' events. \n\nThe following code configures the `Trail` to only track management events that are of type 'Read'.\n\n```ts\nconst trail = new cloudtrail.Trail(this, 'CloudTrail', {\n // ...\n managementEvents: ReadWriteType.READ_ONLY,\n});\n```\n\n**Data events** provide information about the resource operations performed on or in a resource. These are also known\nas data plane operations. Learn more about [Data\nEvents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-events).\nBy default, no data events are logged for a `Trail`.\n\nAWS CloudTrail supports data event logging for Amazon S3 objects and AWS Lambda functions.\n\nThe `logAllS3DataEvents()` API configures the trail to log all S3 data events while the `addS3EventSelector()` API can\nbe used to configure logging of S3 data events for specific buckets and specific object prefix. The following code\nconfigures logging of S3 data events for `fooBucket` and with object prefix `bar/`.\n\n```ts\nimport { aws_cloudtrail as cloudtrail } from 'aws-cdk-lib';\n\nconst trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail');\n\n// Adds an event selector to the bucket foo\ntrail.addS3EventSelector([{\n bucket: fooBucket, // 'fooBucket' is of type s3.IBucket\n objectPrefix: 'bar/',\n}]);\n```\n\nSimilarly, the `logAllLambdaDataEvents()` configures the trail to log all Lambda data events while the\n`addLambdaEventSelector()` API can be used to configure logging for specific Lambda functions. The following code\nconfigures logging of Lambda data events for a specific Function.\n\n```ts\nconst trail = new cloudtrail.Trail(this, 'MyAmazingCloudTrail');\nconst amazingFunction = new lambda.Function(stack, 'AnAmazingFunction', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: \"hello.handler\",\n code: lambda.Code.fromAsset(\"lambda\"),\n});\n\n// Add an event selector to log data events for the provided Lambda functions.\ntrail.addLambdaEventSelector([ lambdaFunction ]);\n```\n"
|
|
675
|
-
},
|
|
676
486
|
"targets": {
|
|
677
487
|
"dotnet": {
|
|
678
488
|
"namespace": "Amazon.CDK.AWS.CloudTrail"
|
|
@@ -686,13 +496,6 @@
|
|
|
686
496
|
}
|
|
687
497
|
},
|
|
688
498
|
"aws-cdk-lib.aws_cloudwatch": {
|
|
689
|
-
"locationInModule": {
|
|
690
|
-
"filename": "lib/index.ts",
|
|
691
|
-
"line": 36
|
|
692
|
-
},
|
|
693
|
-
"readme": {
|
|
694
|
-
"markdown": "# Amazon CloudWatch Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Metric objects\n\nMetric objects represent a metric that is emitted by AWS services or your own\napplication, such as `CPUUsage`, `FailureCount` or `Bandwidth`.\n\nMetric objects can be constructed directly or are exposed by resources as\nattributes. Resources that expose metrics will have functions that look\nlike `metricXxx()` which will return a Metric object, initialized with defaults\nthat make sense.\n\nFor example, `lambda.Function` objects have the `fn.metricErrors()` method, which\nrepresents the amount of errors reported by that Lambda function:\n\n```ts\nconst errors = fn.metricErrors();\n```\n\n`Metric` objects can be account and region aware. You can specify `account` and `region` as properties of the metric, or use the `metric.attachTo(Construct)` method. `metric.attachTo()` will automatically copy the `region` and `account` fields of the `Construct`, which can come from anywhere in the Construct tree.\n\nYou can also instantiate `Metric` objects to reference any\n[published metric](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html)\nthat's not exposed using a convenience method on the CDK construct.\nFor example:\n\n```ts\nconst hostedZone = new route53.HostedZone(this, 'MyHostedZone', { zoneName: \"example.org\" });\nconst metric = new Metric({\n namespace: 'AWS/Route53',\n metricName: 'DNSQueries',\n dimensionsMap: {\n HostedZoneId: hostedZone.hostedZoneId\n }\n})\n```\n\n### Instantiating a new Metric object\n\nIf you want to reference a metric that is not yet exposed by an existing construct,\nyou can instantiate a `Metric` object to represent it. For example:\n\n```ts\nconst metric = new Metric({\n namespace: 'MyNamespace',\n metricName: 'MyMetric',\n dimensionsMap: {\n ProcessingStep: 'Download'\n }\n});\n```\n\n### Metric Math\n\nMath expressions are supported by instantiating the `MathExpression` class.\nFor example, a math expression that sums two other metrics looks like this:\n\n```ts\nconst allProblems = new MathExpression({\n expression: \"errors + faults\",\n usingMetrics: {\n errors: myConstruct.metricErrors(),\n faults: myConstruct.metricFaults(),\n }\n})\n```\n\nYou can use `MathExpression` objects like any other metric, including using\nthem in other math expressions:\n\n```ts\nconst problemPercentage = new MathExpression({\n expression: \"(problems / invocations) * 100\",\n usingMetrics: {\n problems: allProblems,\n invocations: myConstruct.metricInvocations()\n }\n})\n```\n\n### Aggregation\n\nTo graph or alarm on metrics you must aggregate them first, using a function\nlike `Average` or a percentile function like `P99`. By default, most Metric objects\nreturned by CDK libraries will be configured as `Average` over `300 seconds` (5 minutes).\nThe exception is if the metric represents a count of discrete events, such as\nfailures. In that case, the Metric object will be configured as `Sum` over `300\nseconds`, i.e. it represents the number of times that event occurred over the\ntime period.\n\nIf you want to change the default aggregation of the Metric object (for example,\nthe function or the period), you can do so by passing additional parameters\nto the metric function call:\n\n```ts\nconst minuteErrorRate = fn.metricErrors({\n statistic: 'avg',\n period: Duration.minutes(1),\n label: 'Lambda failure rate'\n});\n```\n\nThis function also allows changing the metric label or color (which will be\nuseful when embedding them in graphs, see below).\n\n> Rates versus Sums\n>\n> The reason for using `Sum` to count discrete events is that *some* events are\n> emitted as either `0` or `1` (for example `Errors` for a Lambda) and some are\n> only emitted as `1` (for example `NumberOfMessagesPublished` for an SNS\n> topic).\n>\n> In case `0`-metrics are emitted, it makes sense to take the `Average` of this\n> metric: the result will be the fraction of errors over all executions.\n>\n> If `0`-metrics are not emitted, the `Average` will always be equal to `1`,\n> and not be very useful.\n>\n> In order to simplify the mental model of `Metric` objects, we default to\n> aggregating using `Sum`, which will be the same for both metrics types. If you\n> happen to know the Metric you want to alarm on makes sense as a rate\n> (`Average`) you can always choose to change the statistic.\n\n## Alarms\n\nAlarms can be created on metrics in one of two ways. Either create an `Alarm`\nobject, passing the `Metric` object to set the alarm on:\n\n\n```ts\nnew Alarm(this, 'Alarm', {\n metric: fn.metricErrors(),\n threshold: 100,\n evaluationPeriods: 2,\n});\n```\n\nAlternatively, you can call `metric.createAlarm()`:\n\n```ts\nfn.metricErrors().createAlarm(this, 'Alarm', {\n threshold: 100,\n evaluationPeriods: 2,\n});\n```\n\nThe most important properties to set while creating an Alarms are:\n\n- `threshold`: the value to compare the metric against.\n- `comparisonOperator`: the comparison operation to use, defaults to `metric >= threshold`.\n- `evaluationPeriods`: how many consecutive periods the metric has to be\n breaching the the threshold for the alarm to trigger.\n\nTo create a cross-account alarm, make sure you have enabled [cross-account functionality](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html) in CloudWatch. Then, set the `account` property in the `Metric` object either manually or via the `metric.attachTo()` method.\n\n### Alarm Actions\n\nTo add actions to an alarm, use the integration classes from the\n`@aws-cdk/aws-cloudwatch-actions` package. For example, to post a message to\nan SNS topic when an alarm breaches, do the following:\n\n```ts\nimport { aws_cloudwatch_actions as cw_actions } from 'aws-cdk-lib';\n\n// ...\nconst topic = new sns.Topic(stack, 'Topic');\nconst alarm = new cloudwatch.Alarm(stack, 'Alarm', { /* ... */ });\n\nalarm.addAlarmAction(new cw_actions.SnsAction(topic));\n```\n\n### Composite Alarms\n\n[Composite Alarms](https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-cloudwatch-now-allows-you-to-combine-multiple-alarms/) \ncan be created from existing Alarm resources.\n\n```ts\nconst alarmRule = AlarmRule.anyOf(\n AlarmRule.allOf(\n AlarmRule.anyOf(\n alarm1,\n AlarmRule.fromAlarm(alarm2, AlarmState.OK),\n alarm3,\n ),\n AlarmRule.not(AlarmRule.fromAlarm(alarm4, AlarmState.INSUFFICIENT_DATA)),\n ),\n AlarmRule.fromBoolean(false),\n);\n\nnew CompositeAlarm(this, 'MyAwesomeCompositeAlarm', {\n alarmRule,\n});\n```\n\n### A note on units\n\nIn CloudWatch, Metrics datums are emitted with units, such as `seconds` or\n`bytes`. When `Metric` objects are given a `unit` attribute, it will be used to\n*filter* the stream of metric datums for datums emitted using the same `unit`\nattribute.\n\nIn particular, the `unit` field is *not* used to rescale datums or alarm threshold\nvalues (for example, it cannot be used to specify an alarm threshold in\n*Megabytes* if the metric stream is being emitted as *bytes*).\n\nYou almost certainly don't want to specify the `unit` property when creating\n`Metric` objects (which will retrieve all datums regardless of their unit),\nunless you have very specific requirements. Note that in any case, CloudWatch\nonly supports filtering by `unit` for Alarms, not in Dashboard graphs.\n\nPlease see the following GitHub issue for a discussion on real unit\ncalculations in CDK: https://github.com/aws/aws-cdk/issues/5595\n\n## Dashboards\n\nDashboards are set of Widgets stored server-side which can be accessed quickly\nfrom the AWS console. Available widgets are graphs of a metric over time, the\ncurrent value of a metric, or a static piece of Markdown which explains what the\ngraphs mean.\n\nThe following widgets are available:\n\n- `GraphWidget` -- shows any number of metrics on both the left and right\n vertical axes.\n- `AlarmWidget` -- shows the graph and alarm line for a single alarm.\n- `SingleValueWidget` -- shows the current value of a set of metrics.\n- `TextWidget` -- shows some static Markdown.\n- `AlarmStatusWidget` -- shows the status of your alarms in a grid view.\n\n### Graph widget\n\nA graph widget can display any number of metrics on either the `left` or\n`right` vertical axis:\n\n```ts\ndashboard.addWidgets(new GraphWidget({\n title: \"Executions vs error rate\",\n\n left: [executionCountMetric],\n\n right: [errorCountMetric.with({\n statistic: \"average\",\n label: \"Error rate\",\n color: Color.GREEN\n })]\n}));\n```\n\nUsing the methods `addLeftMetric()` and `addRightMetric()` you can add metrics to a graph widget later on.\n\nGraph widgets can also display annotations attached to the left or the right y-axis.\n\n```ts\ndashboard.addWidgets(new GraphWidget({\n // ...\n // ...\n\n leftAnnotations: [\n { value: 1800, label: Duration.minutes(30).toHumanString(), color: Color.RED, },\n { value: 3600, label: '1 hour', color: '#2ca02c', }\n ],\n}));\n```\n\nThe graph legend can be adjusted from the default position at bottom of the widget.\n\n```ts\ndashboard.addWidgets(new GraphWidget({\n // ...\n // ...\n\n legendPosition: LegendPosition.RIGHT,\n}));\n```\n\nThe graph can publish live data within the last minute that has not been fully aggregated.\n\n```ts\ndashboard.addWidgets(new GraphWidget({\n // ...\n // ...\n\n liveData: true,\n}));\n```\n\nThe graph view can be changed from default 'timeSeries' to 'bar' or 'pie'.\n\n```ts\ndashboard.addWidgets(new GraphWidget({\n // ...\n // ...\n\n view: GraphWidgetView.BAR,\n}));\n```\n\n### Alarm widget\n\nAn alarm widget shows the graph and the alarm line of a single alarm:\n\n```ts\ndashboard.addWidgets(new AlarmWidget({\n title: \"Errors\",\n alarm: errorAlarm,\n}));\n```\n\n### Single value widget\n\nA single-value widget shows the latest value of a set of metrics (as opposed\nto a graph of the value over time):\n\n```ts\ndashboard.addWidgets(new SingleValueWidget({\n metrics: [visitorCount, purchaseCount],\n}));\n```\n\nShow as many digits as can fit, before rounding.\n\n```ts\ndashboard.addWidgets(new SingleValueWidget({\n // ..\n // ..\n\n fullPrecision: true,\n}));\n```\n\n### Text widget\n\nA text widget shows an arbitrary piece of MarkDown. Use this to add explanations\nto your dashboard:\n\n```ts\ndashboard.addWidgets(new TextWidget({\n markdown: '# Key Performance Indicators'\n}));\n```\n\n### Alarm Status widget\n\nAn alarm status widget displays instantly the status of any type of alarms and gives the\nability to aggregate one or more alarms together in a small surface.\n\n```ts\ndashboard.addWidgets(\n new AlarmStatusWidget({\n alarms: [errorAlarm],\n })\n);\n```\n\n### Query results widget\n\nA `LogQueryWidget` shows the results of a query from Logs Insights:\n\n```ts\ndashboard.addWidgets(new LogQueryWidget({\n logGroupNames: ['my-log-group'],\n view: LogQueryVisualizationType.TABLE,\n // The lines will be automatically combined using '\\n|'.\n queryLines: [\n 'fields @message',\n 'filter @message like /Error/',\n ]\n}));\n```\n\n### Dashboard Layout\n\nThe widgets on a dashboard are visually laid out in a grid that is 24 columns\nwide. Normally you specify X and Y coordinates for the widgets on a Dashboard,\nbut because this is inconvenient to do manually, the library contains a simple\nlayout system to help you lay out your dashboards the way you want them to.\n\nWidgets have a `width` and `height` property, and they will be automatically\nlaid out either horizontally or vertically stacked to fill out the available\nspace.\n\nWidgets are added to a Dashboard by calling `add(widget1, widget2, ...)`.\nWidgets given in the same call will be laid out horizontally. Widgets given\nin different calls will be laid out vertically. To make more complex layouts,\nyou can use the following widgets to pack widgets together in different ways:\n\n- `Column`: stack two or more widgets vertically.\n- `Row`: lay out two or more widgets horizontally.\n- `Spacer`: take up empty space\n"
|
|
695
|
-
},
|
|
696
499
|
"targets": {
|
|
697
500
|
"dotnet": {
|
|
698
501
|
"namespace": "Amazon.CDK.AWS.CloudWatch"
|
|
@@ -706,13 +509,6 @@
|
|
|
706
509
|
}
|
|
707
510
|
},
|
|
708
511
|
"aws-cdk-lib.aws_cloudwatch_actions": {
|
|
709
|
-
"locationInModule": {
|
|
710
|
-
"filename": "lib/index.ts",
|
|
711
|
-
"line": 37
|
|
712
|
-
},
|
|
713
|
-
"readme": {
|
|
714
|
-
"markdown": "# CloudWatch Alarm Actions library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library contains a set of classes which can be used as CloudWatch Alarm actions.\n\nThe currently implemented actions are: EC2 Actions, SNS Actions, Autoscaling Actions and Aplication Autoscaling Actions\n\n\n## EC2 Action Example\n\n```ts\nimport { aws_cloudwatch as cw } from 'aws-cdk-lib';\n// Alarm must be configured with an EC2 per-instance metric\nlet alarm: cw.Alarm;\n// Attach a reboot when alarm triggers\nalarm.addAlarmAction(\n new Ec2Action(Ec2InstanceActions.REBOOT)\n);\n```\n\nSee `@aws-cdk/aws-cloudwatch` for more information.\n"
|
|
715
|
-
},
|
|
716
512
|
"targets": {
|
|
717
513
|
"dotnet": {
|
|
718
514
|
"namespace": "Amazon.CDK.AWS.CloudWatch.Actions"
|
|
@@ -726,10 +522,6 @@
|
|
|
726
522
|
}
|
|
727
523
|
},
|
|
728
524
|
"aws-cdk-lib.aws_codeartifact": {
|
|
729
|
-
"locationInModule": {
|
|
730
|
-
"filename": "lib/index.ts",
|
|
731
|
-
"line": 38
|
|
732
|
-
},
|
|
733
525
|
"targets": {
|
|
734
526
|
"dotnet": {
|
|
735
527
|
"namespace": "Amazon.CDK.AWS.CodeArtifact"
|
|
@@ -743,13 +535,6 @@
|
|
|
743
535
|
}
|
|
744
536
|
},
|
|
745
537
|
"aws-cdk-lib.aws_codebuild": {
|
|
746
|
-
"locationInModule": {
|
|
747
|
-
"filename": "lib/index.ts",
|
|
748
|
-
"line": 39
|
|
749
|
-
},
|
|
750
|
-
"readme": {
|
|
751
|
-
"markdown": "# AWS CodeBuild Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAWS CodeBuild is a fully managed continuous integration service that compiles\nsource code, runs tests, and produces software packages that are ready to\ndeploy. With CodeBuild, you don’t need to provision, manage, and scale your own\nbuild servers. CodeBuild scales continuously and processes multiple builds\nconcurrently, so your builds are not left waiting in a queue. You can get\nstarted quickly by using prepackaged build environments, or you can create\ncustom build environments that use your own build tools. With CodeBuild, you are\ncharged by the minute for the compute resources you use.\n\n## Installation\n\nInstall the module:\n\n```console\n$ npm i @aws-cdk/aws-codebuild\n```\n\nImport it into your code:\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\n```\n\nThe `codebuild.Project` construct represents a build project resource. See the\nreference documentation for a comprehensive list of initialization properties,\nmethods and attributes.\n\n## Source\n\nBuild projects are usually associated with a _source_, which is specified via\nthe `source` property which accepts a class that extends the `Source`\nabstract base class.\nThe default is to have no source associated with the build project;\nthe `buildSpec` option is required in that case.\n\nHere's a CodeBuild project with no source which simply prints `Hello,\nCodeBuild!`:\n\n[Minimal Example](./test/integ.defaults.lit.ts)\n\n### `CodeCommitSource`\n\nUse an AWS CodeCommit repository as the source of this build:\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\nimport { aws_codecommit as codecommit } from 'aws-cdk-lib';\n\nconst repository = new codecommit.Repository(this, 'MyRepo', { repositoryName: 'foo' });\nnew codebuild.Project(this, 'MyFirstCodeCommitProject', {\n source: codebuild.Source.codeCommit({ repository }),\n});\n```\n\n### `S3Source`\n\nCreate a CodeBuild project with an S3 bucket as the source:\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\n\nconst bucket = new s3.Bucket(this, 'MyBucket');\nnew codebuild.Project(this, 'MyProject', {\n source: codebuild.Source.s3({\n bucket: bucket,\n path: 'path/to/file.zip',\n }),\n});\n```\n\nThe CodeBuild role will be granted to read just the given path from the given `bucket`.\n\n### `GitHubSource` and `GitHubEnterpriseSource`\n\nThese source types can be used to build code from a GitHub repository.\nExample:\n\n```ts\nconst gitHubSource = codebuild.Source.gitHub({\n owner: 'awslabs',\n repo: 'aws-cdk',\n webhook: true, // optional, default: true if `webhookFilters` were provided, false otherwise\n webhookTriggersBatchBuild: true, // optional, default is false\n webhookFilters: [\n codebuild.FilterGroup\n .inEventOf(codebuild.EventAction.PUSH)\n .andBranchIs('master')\n .andCommitMessageIs('the commit message'),\n ], // optional, by default all pushes and Pull Requests will trigger a build\n});\n```\n\nTo provide GitHub credentials, please either go to AWS CodeBuild Console to connect\nor call `ImportSourceCredentials` to persist your personal access token.\nExample:\n\n```console\naws codebuild import-source-credentials --server-type GITHUB --auth-type PERSONAL_ACCESS_TOKEN --token <token_value>\n```\n\n### `BitBucketSource`\n\nThis source type can be used to build code from a BitBucket repository.\n\n```ts\nconst bbSource = codebuild.Source.bitBucket({\n owner: 'owner',\n repo: 'repo',\n});\n```\n\n### For all Git sources\n\nFor all Git sources, you can fetch submodules while cloing git repo.\n\n```ts\nconst gitHubSource = codebuild.Source.gitHub({\n owner: 'awslabs',\n repo: 'aws-cdk',\n fetchSubmodules: true,\n});\n```\n\n## Artifacts\n\nCodeBuild Projects can produce Artifacts and upload them to S3. For example:\n\n```ts\nconst project = codebuild.Project(stack, 'MyProject', {\n buildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n }),\n artifacts: codebuild.Artifacts.s3({\n bucket,\n includeBuildId: false,\n packageZip: true,\n path: 'another/path',\n identifier: 'AddArtifact1',\n }),\n});\n```\n\nIf you'd prefer your buildspec to be rendered as YAML in the template,\nuse the `fromObjectToYaml()` method instead of `fromObject()`.\n\nBecause we've not set the `name` property, this example will set the\n`overrideArtifactName` parameter, and produce an artifact named as defined in\nthe Buildspec file, uploaded to an S3 bucket (`bucket`). The path will be\n`another/path` and the artifact will be a zipfile.\n\n## CodePipeline\n\nTo add a CodeBuild Project as an Action to CodePipeline,\nuse the `PipelineProject` class instead of `Project`.\nIt's a simple class that doesn't allow you to specify `sources`,\n`secondarySources`, `artifacts` or `secondaryArtifacts`,\nas these are handled by setting input and output CodePipeline `Artifact` instances on the Action,\ninstead of setting them on the Project.\n\n```ts\nconst project = new codebuild.PipelineProject(this, 'Project', {\n // properties as above...\n})\n```\n\nFor more details, see the readme of the `@aws-cdk/@aws-codepipeline-actions` package.\n\n## Caching\n\nYou can save time when your project builds by using a cache. A cache can store reusable pieces of your build environment and use them across multiple builds. Your build project can use one of two types of caching: Amazon S3 or local. In general, S3 caching is a good option for small and intermediate build artifacts that are more expensive to build than to download. Local caching is a good option for large intermediate build artifacts because the cache is immediately available on the build host.\n\n### S3 Caching\n\nWith S3 caching, the cache is stored in an S3 bucket which is available from multiple hosts.\n\n```ts\nnew codebuild.Project(this, 'Project', {\n source: codebuild.Source.bitBucket({\n owner: 'awslabs',\n repo: 'aws-cdk',\n }),\n cache: codebuild.Cache.bucket(new Bucket(this, 'Bucket'))\n});\n```\n\n### Local Caching\n\nWith local caching, the cache is stored on the codebuild instance itself. This is simple,\ncheap and fast, but CodeBuild cannot guarantee a reuse of instance and hence cannot\nguarantee cache hits. For example, when a build starts and caches files locally, if two subsequent builds start at the same time afterwards only one of those builds would get the cache. Three different cache modes are supported, which can be turned on individually.\n\n* `LocalCacheMode.SOURCE` caches Git metadata for primary and secondary sources.\n* `LocalCacheMode.DOCKER_LAYER` caches existing Docker layers.\n* `LocalCacheMode.CUSTOM` caches directories you specify in the buildspec file.\n\n```ts\nnew codebuild.Project(this, 'Project', {\n source: codebuild.Source.gitHubEnterprise({\n httpsCloneUrl: 'https://my-github-enterprise.com/owner/repo',\n }),\n\n // Enable Docker AND custom caching\n cache: codebuild.Cache.local(LocalCacheMode.DOCKER_LAYER, LocalCacheMode.CUSTOM)\n});\n```\n\n## Environment\n\nBy default, projects use a small instance with an Ubuntu 18.04 image. You\ncan use the `environment` property to customize the build environment:\n\n* `buildImage` defines the Docker image used. See [Images](#images) below for\n details on how to define build images.\n* `certificate` defines the location of a PEM encoded certificate to import.\n* `computeType` defines the instance type used for the build.\n* `privileged` can be set to `true` to allow privileged access.\n* `environmentVariables` can be set at this level (and also at the project\n level).\n\n## Images\n\nThe CodeBuild library supports both Linux and Windows images via the\n`LinuxBuildImage` and `WindowsBuildImage` classes, respectively.\n\nYou can specify one of the predefined Windows/Linux images by using one\nof the constants such as `WindowsBuildImage.WIN_SERVER_CORE_2019_BASE`,\n`WindowsBuildImage.WINDOWS_BASE_2_0` or `LinuxBuildImage.STANDARD_2_0`.\n\nAlternatively, you can specify a custom image using one of the static methods on\n`LinuxBuildImage`:\n\n* `LinuxBuildImage.fromDockerRegistry(image[, { secretsManagerCredentials }])` to reference an image in any public or private Docker registry.\n* `LinuxBuildImage.fromEcrRepository(repo[, tag])` to reference an image available in an\n ECR repository.\n* `LinuxBuildImage.fromAsset(parent, id, props)` to use an image created from a\n local asset.\n* `LinuxBuildImage.fromCodeBuildImageId(id)` to reference a pre-defined, CodeBuild-provided Docker image.\n\nor one of the corresponding methods on `WindowsBuildImage`:\n\n* `WindowsBuildImage.fromDockerRegistry(image[, { secretsManagerCredentials }, imageType])`\n* `WindowsBuildImage.fromEcrRepository(repo[, tag, imageType])`\n* `WindowsBuildImage.fromAsset(parent, id, props, [, imageType])`\n\nNote that the `WindowsBuildImage` version of the static methods accepts an optional parameter of type `WindowsImageType`,\nwhich can be either `WindowsImageType.STANDARD`, the default, or `WindowsImageType.SERVER_2019`:\n\n```ts\nnew codebuild.Project(this, 'Project', {\n environment: {\n buildImage: codebuild.WindowsBuildImage.fromEcrRepository(ecrRepository, 'v1.0', codebuild.WindowsImageType.SERVER_2019),\n // optional certificate to include in the build image\n certificate: {\n bucket: s3.Bucket.fromBucketName(this, 'Bucket', 'my-bucket'),\n objectKey: 'path/to/cert.pem',\n },\n },\n ...\n})\n```\n\nThe following example shows how to define an image from a Docker asset:\n\n[Docker asset example](./test/integ.docker-asset.lit.ts)\n\nThe following example shows how to define an image from an ECR repository:\n\n[ECR example](./test/integ.ecr.lit.ts)\n\nThe following example shows how to define an image from a private docker registry:\n\n[Docker Registry example](./test/integ.docker-registry.lit.ts)\n\n### GPU images\n\nThe class `LinuxGpuBuildImage` contains constants for working with\n[AWS Deep Learning Container images](https://aws.amazon.com/releasenotes/available-deep-learning-containers-images):\n\n\n```ts\nnew codebuild.Project(this, 'Project', {\n environment: {\n buildImage: codebuild.LinuxGpuBuildImage.DLC_TENSORFLOW_2_1_0_INFERENCE,\n },\n ...\n})\n```\n\nOne complication is that the repositories for the DLC images are in\ndifferent accounts in different AWS regions.\nIn most cases, the CDK will handle providing the correct account for you;\nin rare cases (for example, deploying to new regions)\nwhere our information might be out of date,\nyou can always specify the account\n(along with the repository name and tag)\nexplicitly using the `awsDeepLearningContainersImage` method:\n\n```ts\nnew codebuild.Project(this, 'Project', {\n environment: {\n buildImage: codebuild.LinuxGpuBuildImage.awsDeepLearningContainersImage(\n 'tensorflow-inference', '2.1.0-gpu-py36-cu101-ubuntu18.04', '123456789012'),\n },\n ...\n})\n```\n\n## Logs\n\nCodeBuild lets you specify an S3 Bucket, CloudWatch Log Group or both to receive logs from your projects.\n\nBy default, logs will go to cloudwatch.\n\n### CloudWatch Logs Example\n\n```ts\nnew codebuild.Project(this, 'Project', {\n logging: {\n cloudWatch: {\n logGroup: new cloudwatch.LogGroup(this, `MyLogGroup`),\n }\n },\n ...\n})\n```\n\n### S3 Logs Example\n\n```ts\nnew codebuild.Project(this, 'Project', {\n logging: {\n s3: {\n bucket: new s3.Bucket(this, `LogBucket`)\n }\n },\n ...\n})\n```\n\n## Credentials\n\nCodeBuild allows you to store credentials used when communicating with various sources,\nlike GitHub:\n\n```ts\nnew codebuild.GitHubSourceCredentials(this, 'CodeBuildGitHubCreds', {\n accessToken: cdk.SecretValue.secretsManager('my-token'),\n});\n// GitHub Enterprise is almost the same,\n// except the class is called GitHubEnterpriseSourceCredentials\n```\n\nand BitBucket:\n\n```ts\nnew codebuild.BitBucketSourceCredentials(this, 'CodeBuildBitBucketCreds', {\n username: cdk.SecretValue.secretsManager('my-bitbucket-creds', { jsonField: 'username' }),\n password: cdk.SecretValue.secretsManager('my-bitbucket-creds', { jsonField: 'password' }),\n});\n```\n\n**Note**: the credentials are global to a given account in a given region -\nthey are not defined per CodeBuild project.\nCodeBuild only allows storing a single credential of a given type\n(GitHub, GitHub Enterprise or BitBucket)\nin a given account in a given region -\nany attempt to save more than one will result in an error.\nYou can use the [`list-source-credentials` AWS CLI operation](https://docs.aws.amazon.com/cli/latest/reference/codebuild/list-source-credentials.html)\nto inspect what credentials are stored in your account.\n\n## Test reports\n\nYou can specify a test report in your buildspec:\n\n```ts\nconst project = new codebuild.Project(this, 'Project', {\n buildSpec: codebuild.BuildSpec.fromObject({\n // ...\n reports: {\n myReport: {\n files: '**/*',\n 'base-directory': 'build/test-results',\n },\n },\n }),\n});\n```\n\nThis will create a new test report group,\nwith the name `<ProjectName>-myReport`.\n\nThe project's role in the CDK will always be granted permissions to create and use report groups\nwith names starting with the project's name;\nif you'd rather not have those permissions added,\nyou can opt out of it when creating the project:\n\n```ts\nconst project = new codebuild.Project(this, 'Project', {\n // ...\n grantReportGroupPermissions: false,\n});\n```\n\nAlternatively, you can specify an ARN of an existing resource group,\ninstead of a simple name, in your buildspec:\n\n```ts\n// create a new ReportGroup\nconst reportGroup = new codebuild.ReportGroup(this, 'ReportGroup');\n\nconst project = new codebuild.Project(this, 'Project', {\n buildSpec: codebuild.BuildSpec.fromObject({\n // ...\n reports: {\n [reportGroup.reportGroupArn]: {\n files: '**/*',\n 'base-directory': 'build/test-results',\n },\n },\n }),\n});\n```\n\nIf you do that, you need to grant the project's role permissions to write reports to that report group:\n\n```ts\nreportGroup.grantWrite(project);\n```\n\nFor more information on the test reports feature,\nsee the [AWS CodeBuild documentation](https://docs.aws.amazon.com/codebuild/latest/userguide/test-reporting.html).\n\n## Events\n\nCodeBuild projects can be used either as a source for events or be triggered\nby events via an event rule.\n\n### Using Project as an event target\n\nThe `@aws-cdk/aws-events-targets.CodeBuildProject` allows using an AWS CodeBuild\nproject as a AWS CloudWatch event rule target:\n\n```ts\n// start build when a commit is pushed\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\n\ncodeCommitRepository.onCommit('OnCommit', {\n target: new targets.CodeBuildProject(project),\n});\n```\n\n### Using Project as an event source\n\nTo define Amazon CloudWatch event rules for build projects, use one of the `onXxx`\nmethods:\n\n```ts\nconst rule = project.onStateChange('BuildStateChange', {\n target: new targets.LambdaFunction(fn)\n});\n```\n\n## CodeStar Notifications\n\nTo define CodeStar Notification rules for Projects, use one of the `notifyOnXxx()` methods.\nThey are very similar to `onXxx()` methods for CloudWatch events:\n\n```ts\nconst target = new chatbot.SlackChannelConfiguration(stack, 'MySlackChannel', {\n slackChannelConfigurationName: 'YOUR_CHANNEL_NAME',\n slackWorkspaceId: 'YOUR_SLACK_WORKSPACE_ID',\n slackChannelId: 'YOUR_SLACK_CHANNEL_ID',\n});\n\nconst rule = project.notifyOnBuildSucceeded('NotifyOnBuildSucceeded', target);\n```\n\n## Secondary sources and artifacts\n\nCodeBuild Projects can get their sources from multiple places, and produce\nmultiple outputs. For example:\n\n```ts\nconst project = new codebuild.Project(this, 'MyProject', {\n secondarySources: [\n codebuild.Source.codeCommit({\n identifier: 'source2',\n repository: repo,\n }),\n ],\n secondaryArtifacts: [\n codebuild.Artifacts.s3({\n identifier: 'artifact2',\n bucket: bucket,\n path: 'some/path',\n name: 'file.zip',\n }),\n ],\n // ...\n});\n```\n\nNote that the `identifier` property is required for both secondary sources and\nartifacts.\n\nThe contents of the secondary source is available to the build under the\ndirectory specified by the `CODEBUILD_SRC_DIR_<identifier>` environment variable\n(so, `CODEBUILD_SRC_DIR_source2` in the above case).\n\nThe secondary artifacts have their own section in the buildspec, under the\nregular `artifacts` one. Each secondary artifact has its own section, beginning\nwith their identifier.\n\nSo, a buildspec for the above Project could look something like this:\n\n```ts\nconst project = new codebuild.Project(this, 'MyProject', {\n // secondary sources and artifacts as above...\n buildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n phases: {\n build: {\n commands: [\n 'cd $CODEBUILD_SRC_DIR_source2',\n 'touch output2.txt',\n ],\n },\n },\n artifacts: {\n 'secondary-artifacts': {\n 'artifact2': {\n 'base-directory': '$CODEBUILD_SRC_DIR_source2',\n 'files': [\n 'output2.txt',\n ],\n },\n },\n },\n }),\n});\n```\n\n### Definition of VPC configuration in CodeBuild Project\n\nTypically, resources in an VPC are not accessible by AWS CodeBuild. To enable\naccess, you must provide additional VPC-specific configuration information as\npart of your CodeBuild project configuration. This includes the VPC ID, the\nVPC subnet IDs, and the VPC security group IDs. VPC-enabled builds are then\nable to access resources inside your VPC.\n\nFor further Information see https://docs.aws.amazon.com/codebuild/latest/userguide/vpc-support.html\n\n**Use Cases**\nVPC connectivity from AWS CodeBuild builds makes it possible to:\n\n* Run integration tests from your build against data in an Amazon RDS database that's isolated on a private subnet.\n* Query data in an Amazon ElastiCache cluster directly from tests.\n* Interact with internal web services hosted on Amazon EC2, Amazon ECS, or services that use internal Elastic Load Balancing.\n* Retrieve dependencies from self-hosted, internal artifact repositories, such as PyPI for Python, Maven for Java, and npm for Node.js.\n* Access objects in an Amazon S3 bucket configured to allow access through an Amazon VPC endpoint only.\n* Query external web services that require fixed IP addresses through the Elastic IP address of the NAT gateway or NAT instance associated with your subnet(s).\n\nYour builds can access any resource that's hosted in your VPC.\n\n**Enable Amazon VPC Access in your CodeBuild Projects**\n\nPass the VPC when defining your Project, then make sure to\ngive the CodeBuild's security group the right permissions\nto access the resources that it needs by using the\n`connections` object.\n\nFor example:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'MyVPC');\nconst project = new codebuild.Project(this, 'MyProject', {\n vpc: vpc,\n buildSpec: codebuild.BuildSpec.fromObject({\n // ...\n }),\n});\n\nproject.connections.allowTo(loadBalancer, ec2.Port.tcp(443));\n```\n\n## Project File System Location EFS\n\nAdd support for CodeBuild to build on AWS EFS file system mounts using\nthe new ProjectFileSystemLocation.\nThe `fileSystemLocations` property which accepts a list `ProjectFileSystemLocation`\nas represented by the interface `IFileSystemLocations`.\nThe only supported file system type is `EFS`.\n\nFor example:\n\n```ts\nnew codebuild.Project(stack, 'MyProject', {\n buildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n }),\n fileSystemLocations: [\n codebuild.FileSystemLocation.efs({\n identifier: \"myidentifier2\",\n location: \"myclodation.mydnsroot.com:/loc\",\n mountPoint: \"/media\",\n mountOptions: \"opts\"\n })\n ]\n});\n```\n\nHere's a CodeBuild project with a simple example that creates a project mounted on AWS EFS:\n\n[Minimal Example](./test/integ.project-file-system-location.ts)\n\n## Batch builds\n\nTo enable batch builds you should call `enableBatchBuilds()` on the project instance.\n\nIt returns an object containing the batch service role that was created,\nor `undefined` if batch builds could not be enabled, for example if the project was imported.\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\n\nconst project = new codebuild.Project(this, 'MyProject', { ... });\n\nif (project.enableBatchBuilds()) {\n console.log('Batch builds were enabled');\n}\n```\n\n## Timeouts\n\nThere are two types of timeouts that can be set when creating your Project.\nThe `timeout` property can be used to set an upper limit on how long your Project is able to run without being marked as completed.\nThe default is 60 minutes.\nAn example of overriding the default follows.\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\n\nnew codebuild.Project(stack, 'MyProject', {\n timeout: Duration.minutes(90)\n});\n```\n\nThe `queuedTimeout` property can be used to set an upper limit on how your Project remains queued to run.\nThere is no default value for this property.\nAs an example, to allow your Project to queue for up to thirty (30) minutes before the build fails,\nuse the following code.\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\n\nnew codebuild.Project(stack, 'MyProject', {\n queuedTimeout: Duration.minutes(30)\n});\n```\n\n## Limiting concurrency\n\nBy default if a new build is triggered it will be run even if there is a previous build already in progress.\nIt is possible to limit the maximum concurrent builds to value between 1 and the account specific maximum limit.\nBy default there is no explicit limit.\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\n\nnew codebuild.Project(stack, 'MyProject', {\n concurrentBuildLimit: 1\n});\n```\n"
|
|
752
|
-
},
|
|
753
538
|
"targets": {
|
|
754
539
|
"dotnet": {
|
|
755
540
|
"namespace": "Amazon.CDK.AWS.CodeBuild"
|
|
@@ -763,13 +548,6 @@
|
|
|
763
548
|
}
|
|
764
549
|
},
|
|
765
550
|
"aws-cdk-lib.aws_codecommit": {
|
|
766
|
-
"locationInModule": {
|
|
767
|
-
"filename": "lib/index.ts",
|
|
768
|
-
"line": 40
|
|
769
|
-
},
|
|
770
|
-
"readme": {
|
|
771
|
-
"markdown": "# AWS CodeCommit Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAWS CodeCommit is a version control service that enables you to privately store and manage Git repositories in the AWS cloud.\n\nFor further information on CodeCommit,\nsee the [AWS CodeCommit documentation](https://docs.aws.amazon.com/codecommit).\n\nTo add a CodeCommit Repository to your stack:\n\n```ts\nimport { aws_codecommit as codecommit } from 'aws-cdk-lib';\n\nconst repo = new codecommit.Repository(this, 'Repository' ,{\n repositoryName: 'MyRepositoryName',\n description: 'Some description.', // optional property\n});\n```\n\nUse the `repositoryCloneUrlHttp`, `repositoryCloneUrlSsh` or `repositoryCloneUrlGrc`\nproperty to clone your repository.\n\nTo add an Amazon SNS trigger to your repository:\n\n```ts\n// trigger is established for all repository actions on all branches by default.\nrepo.notify('arn:aws:sns:*:123456789012:my_topic');\n```\n\n## Events\n\nCodeCommit repositories emit Amazon CloudWatch events for certain activities.\nUse the `repo.onXxx` methods to define rules that trigger on these events\nand invoke targets as a result:\n\n```ts\n// starts a CodeBuild project when a commit is pushed to the \"master\" branch of the repo\nrepo.onCommit('CommitToMaster', {\n target: new targets.CodeBuildProject(project),\n branches: ['master'],\n});\n\n// publishes a message to an Amazon SNS topic when a comment is made on a pull request\nconst rule = repo.onCommentOnPullRequest('CommentOnPullRequest', {\n target: new targets.SnsTopic(myTopic),\n});\n```\n\n## CodeStar Notifications\n\nTo define CodeStar Notification rules for Repositories, use one of the `notifyOnXxx()` methods.\nThey are very similar to `onXxx()` methods for CloudWatch events:\n\n```ts\nconst target = new chatbot.SlackChannelConfiguration(stack, 'MySlackChannel', {\n slackChannelConfigurationName: 'YOUR_CHANNEL_NAME',\n slackWorkspaceId: 'YOUR_SLACK_WORKSPACE_ID',\n slackChannelId: 'YOUR_SLACK_CHANNEL_ID',\n});\nconst rule = repository.notifyOnPullRequestCreated('NotifyOnPullRequestCreated', target);\n"
|
|
772
|
-
},
|
|
773
551
|
"targets": {
|
|
774
552
|
"dotnet": {
|
|
775
553
|
"namespace": "Amazon.CDK.AWS.CodeCommit"
|
|
@@ -783,13 +561,6 @@
|
|
|
783
561
|
}
|
|
784
562
|
},
|
|
785
563
|
"aws-cdk-lib.aws_codedeploy": {
|
|
786
|
-
"locationInModule": {
|
|
787
|
-
"filename": "lib/index.ts",
|
|
788
|
-
"line": 41
|
|
789
|
-
},
|
|
790
|
-
"readme": {
|
|
791
|
-
"markdown": "# AWS CodeDeploy Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAWS CodeDeploy is a deployment service that automates application deployments to\nAmazon EC2 instances, on-premises instances, serverless Lambda functions, or\nAmazon ECS services.\n\nThe CDK currently supports Amazon EC2, on-premise and AWS Lambda applications.\n\n## EC2/on-premise Applications\n\nTo create a new CodeDeploy Application that deploys to EC2/on-premise instances:\n\n```ts\nimport { aws_codedeploy as codedeploy } from 'aws-cdk-lib';\n\nconst application = new codedeploy.ServerApplication(this, 'CodeDeployApplication', {\n applicationName: 'MyApplication', // optional property\n});\n```\n\nTo import an already existing Application:\n\n```ts\nconst application = codedeploy.ServerApplication.fromServerApplicationName(\n this, 'ExistingCodeDeployApplication', 'MyExistingApplication'\n);\n```\n\n## EC2/on-premise Deployment Groups\n\nTo create a new CodeDeploy Deployment Group that deploys to EC2/on-premise instances:\n\n```ts\nconst deploymentGroup = new codedeploy.ServerDeploymentGroup(this, 'CodeDeployDeploymentGroup', {\n application,\n deploymentGroupName: 'MyDeploymentGroup',\n autoScalingGroups: [asg1, asg2],\n // adds User Data that installs the CodeDeploy agent on your auto-scaling groups hosts\n // default: true\n installAgent: true,\n // adds EC2 instances matching tags\n ec2InstanceTags: new codedeploy.InstanceTagSet(\n {\n // any instance with tags satisfying\n // key1=v1 or key1=v2 or key2 (any value) or value v3 (any key)\n // will match this group\n 'key1': ['v1', 'v2'],\n 'key2': [],\n '': ['v3'],\n },\n ),\n // adds on-premise instances matching tags\n onPremiseInstanceTags: new codedeploy.InstanceTagSet(\n // only instances with tags (key1=v1 or key1=v2) AND key2=v3 will match this set\n {\n 'key1': ['v1', 'v2'],\n },\n {\n 'key2': ['v3'],\n },\n ),\n // CloudWatch alarms\n alarms: [\n new cloudwatch.Alarm(/* ... */),\n ],\n // whether to ignore failure to fetch the status of alarms from CloudWatch\n // default: false\n ignorePollAlarmsFailure: false,\n // auto-rollback configuration\n autoRollback: {\n failedDeployment: true, // default: true\n stoppedDeployment: true, // default: false\n deploymentInAlarm: true, // default: true if you provided any alarms, false otherwise\n },\n});\n```\n\nAll properties are optional - if you don't provide an Application,\none will be automatically created.\n\nTo import an already existing Deployment Group:\n\n```ts\nconst deploymentGroup = codedeploy.ServerDeploymentGroup.fromLambdaDeploymentGroupAttributes(this, 'ExistingCodeDeployDeploymentGroup', {\n application,\n deploymentGroupName: 'MyExistingDeploymentGroup',\n});\n```\n\n### Load balancers\n\nYou can [specify a load balancer](https://docs.aws.amazon.com/codedeploy/latest/userguide/integrations-aws-elastic-load-balancing.html)\nwith the `loadBalancer` property when creating a Deployment Group.\n\n`LoadBalancer` is an abstract class with static factory methods that allow you to create instances of it from various sources.\n\nWith Classic Elastic Load Balancer, you provide it directly:\n\n```ts\nimport { aws_elasticloadbalancing as lb } from 'aws-cdk-lib';\n\nconst elb = new lb.LoadBalancer(this, 'ELB', {\n // ...\n});\nelb.addTarget(/* ... */);\nelb.addListener({\n // ...\n});\n\nconst deploymentGroup = new codedeploy.ServerDeploymentGroup(this, 'DeploymentGroup', {\n loadBalancer: codedeploy.LoadBalancer.classic(elb),\n});\n```\n\nWith Application Load Balancer or Network Load Balancer,\nyou provide a Target Group as the load balancer:\n\n```ts\nimport { aws_elasticloadbalancingv2 as lbv2 } from 'aws-cdk-lib';\n\nconst alb = new lbv2.ApplicationLoadBalancer(this, 'ALB', {\n // ...\n});\nconst listener = alb.addListener('Listener', {\n // ...\n});\nconst targetGroup = listener.addTargets('Fleet', {\n // ...\n});\n\nconst deploymentGroup = new codedeploy.ServerDeploymentGroup(this, 'DeploymentGroup', {\n loadBalancer: codedeploy.LoadBalancer.application(targetGroup),\n});\n```\n\n## Deployment Configurations\n\nYou can also pass a Deployment Configuration when creating the Deployment Group:\n\n```ts\nconst deploymentGroup = new codedeploy.ServerDeploymentGroup(this, 'CodeDeployDeploymentGroup', {\n deploymentConfig: codedeploy.ServerDeploymentConfig.ALL_AT_ONCE,\n});\n```\n\nThe default Deployment Configuration is `ServerDeploymentConfig.ONE_AT_A_TIME`.\n\nYou can also create a custom Deployment Configuration:\n\n```ts\nconst deploymentConfig = new codedeploy.ServerDeploymentConfig(this, 'DeploymentConfiguration', {\n deploymentConfigName: 'MyDeploymentConfiguration', // optional property\n // one of these is required, but both cannot be specified at the same time\n minHealthyHostCount: 2,\n minHealthyHostPercentage: 75,\n});\n```\n\nOr import an existing one:\n\n```ts\nconst deploymentConfig = codedeploy.ServerDeploymentConfig.fromServerDeploymentConfigName(\n this, 'ExistingDeploymentConfiguration', 'MyExistingDeploymentConfiguration'\n);\n```\n\n## Lambda Applications\n\nTo create a new CodeDeploy Application that deploys to a Lambda function:\n\n```ts\nimport { aws_codedeploy as codedeploy } from 'aws-cdk-lib';\n\nconst application = new codedeploy.LambdaApplication(this, 'CodeDeployApplication', {\n applicationName: 'MyApplication', // optional property\n});\n```\n\nTo import an already existing Application:\n\n```ts\nconst application = codedeploy.LambdaApplication.fromLambdaApplicationName(\n this, 'ExistingCodeDeployApplication', 'MyExistingApplication'\n);\n```\n\n## Lambda Deployment Groups\n\nTo enable traffic shifting deployments for Lambda functions, CodeDeploy uses Lambda Aliases, which can balance incoming traffic between two different versions of your function.\nBefore deployment, the alias sends 100% of invokes to the version used in production.\nWhen you publish a new version of the function to your stack, CodeDeploy will send a small percentage of traffic to the new version, monitor, and validate before shifting 100% of traffic to the new version.\n\nTo create a new CodeDeploy Deployment Group that deploys to a Lambda function:\n\n```ts\nimport { aws_codedeploy as codedeploy } from 'aws-cdk-lib';\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\n\nconst myApplication = new codedeploy.LambdaApplication(..);\nconst func = new lambda.Function(..);\nconst version = func.addVersion('1');\nconst version1Alias = new lambda.Alias(this, 'alias', {\n aliasName: 'prod',\n version\n});\n\nconst deploymentGroup = new codedeploy.LambdaDeploymentGroup(stack, 'BlueGreenDeployment', {\n application: myApplication, // optional property: one will be created for you if not provided\n alias: version1Alias,\n deploymentConfig: codedeploy.LambdaDeploymentConfig.LINEAR_10PERCENT_EVERY_1MINUTE,\n});\n```\n\nIn order to deploy a new version of this function:\n\n1. Increment the version, e.g. `const version = func.addVersion('2')`.\n2. Re-deploy the stack (this will trigger a deployment).\n3. Monitor the CodeDeploy deployment as traffic shifts between the versions.\n\n\n### Create a custom Deployment Config\n\nCodeDeploy for Lambda comes with built-in configurations for traffic shifting.\nIf you want to specify your own strategy,\nyou can do so with the CustomLambdaDeploymentConfig construct,\nletting you specify precisely how fast a new function version is deployed.\n\n```ts\nconst config = new codedeploy.CustomLambdaDeploymentConfig(stack, 'CustomConfig', {\n type: codedeploy.CustomLambdaDeploymentConfigType.CANARY,\n interval: Duration.minutes(1),\n percentage: 5,\n});\nconst deploymentGroup = new codedeploy.LambdaDeploymentGroup(stack, 'BlueGreenDeployment', {\n application,\n alias,\n deploymentConfig: config,\n});\n```\n\nYou can specify a custom name for your deployment config, but if you do you will not be able to update the interval/percentage through CDK.\n\n```ts\nconst config = new codedeploy.CustomLambdaDeploymentConfig(stack, 'CustomConfig', {\n type: codedeploy.CustomLambdaDeploymentConfigType.CANARY,\n interval: Duration.minutes(1),\n percentage: 5,\n deploymentConfigName: 'MyDeploymentConfig',\n});\n```\n\n### Rollbacks and Alarms\n\nCodeDeploy will roll back if the deployment fails. You can optionally trigger a rollback when one or more alarms are in a failed state:\n\n```ts\nconst deploymentGroup = new codedeploy.LambdaDeploymentGroup(stack, 'BlueGreenDeployment', {\n alias,\n deploymentConfig: codedeploy.LambdaDeploymentConfig.LINEAR_10PERCENT_EVERY_1MINUTE,\n alarms: [\n // pass some alarms when constructing the deployment group\n new cloudwatch.Alarm(stack, 'Errors', {\n comparisonOperator: cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD,\n threshold: 1,\n evaluationPeriods: 1,\n metric: alias.metricErrors()\n })\n ]\n});\n\n// or add alarms to an existing group\ndeploymentGroup.addAlarm(new cloudwatch.Alarm(stack, 'BlueGreenErrors', {\n comparisonOperator: cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD,\n threshold: 1,\n evaluationPeriods: 1,\n metric: blueGreenAlias.metricErrors()\n}));\n```\n\n### Pre and Post Hooks\n\nCodeDeploy allows you to run an arbitrary Lambda function before traffic shifting actually starts (PreTraffic Hook) and after it completes (PostTraffic Hook).\nWith either hook, you have the opportunity to run logic that determines whether the deployment must succeed or fail.\nFor example, with PreTraffic hook you could run integration tests against the newly created Lambda version (but not serving traffic). With PostTraffic hook, you could run end-to-end validation checks.\n\n```ts\nconst warmUpUserCache = new lambda.Function(..);\nconst endToEndValidation = new lambda.Function(..);\n\n// pass a hook whe creating the deployment group\nconst deploymentGroup = new codedeploy.LambdaDeploymentGroup(stack, 'BlueGreenDeployment', {\n alias: alias,\n deploymentConfig: codedeploy.LambdaDeploymentConfig.LINEAR_10PERCENT_EVERY_1MINUTE,\n preHook: warmUpUserCache,\n});\n\n// or configure one on an existing deployment group\ndeploymentGroup.onPostHook(endToEndValidation);\n```\n\n### Import an existing Deployment Group\n\nTo import an already existing Deployment Group:\n\n```ts\nconst deploymentGroup = codedeploy.LambdaDeploymentGroup.import(this, 'ExistingCodeDeployDeploymentGroup', {\n application,\n deploymentGroupName: 'MyExistingDeploymentGroup',\n});\n```\n"
|
|
792
|
-
},
|
|
793
564
|
"targets": {
|
|
794
565
|
"dotnet": {
|
|
795
566
|
"namespace": "Amazon.CDK.AWS.CodeDeploy"
|
|
@@ -803,13 +574,6 @@
|
|
|
803
574
|
}
|
|
804
575
|
},
|
|
805
576
|
"aws-cdk-lib.aws_codeguruprofiler": {
|
|
806
|
-
"locationInModule": {
|
|
807
|
-
"filename": "lib/index.ts",
|
|
808
|
-
"line": 42
|
|
809
|
-
},
|
|
810
|
-
"readme": {
|
|
811
|
-
"markdown": "# AWS::CodeGuruProfiler Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAmazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance.\n\n## Installation\n\nImport to your project:\n\n```ts\nimport { aws_codeguruprofiler as codeguruprofiler } from 'aws-cdk-lib';\n```\n\n## Basic usage\n\nHere's how to setup a profiling group and give your compute role permissions to publish to the profiling group to the profiling agent can publish profiling information:\n\n```ts\n// The execution role of your application that publishes to the ProfilingGroup via CodeGuru Profiler Profiling Agent. (the following is merely an example)\nconst publishAppRole = new Role(stack, 'PublishAppRole', {\n assumedBy: new AccountRootPrincipal(),\n});\n\nconst profilingGroup = new ProfilingGroup(stack, 'MyProfilingGroup');\nprofilingGroup.grantPublish(publishAppRole);\n```\n\n## Compute Platform configuration\n\nCode Guru Profiler supports multiple compute environments.\nThey can be configured when creating a Profiling Group by using the `computePlatform` property:\n\n```ts\nconst profilingGroup = new ProfilingGroup(stack, 'MyProfilingGroup', {\n computePlatform: ComputePlatform.AWS_LAMBDA,\n});\n```\n"
|
|
812
|
-
},
|
|
813
577
|
"targets": {
|
|
814
578
|
"dotnet": {
|
|
815
579
|
"namespace": "Amazon.CDK.AWS.CodeGuruProfiler"
|
|
@@ -823,10 +587,6 @@
|
|
|
823
587
|
}
|
|
824
588
|
},
|
|
825
589
|
"aws-cdk-lib.aws_codegurureviewer": {
|
|
826
|
-
"locationInModule": {
|
|
827
|
-
"filename": "lib/index.ts",
|
|
828
|
-
"line": 43
|
|
829
|
-
},
|
|
830
590
|
"targets": {
|
|
831
591
|
"dotnet": {
|
|
832
592
|
"namespace": "Amazon.CDK.AWS.CodeGuruReviewer"
|
|
@@ -840,13 +600,6 @@
|
|
|
840
600
|
}
|
|
841
601
|
},
|
|
842
602
|
"aws-cdk-lib.aws_codepipeline": {
|
|
843
|
-
"locationInModule": {
|
|
844
|
-
"filename": "lib/index.ts",
|
|
845
|
-
"line": 44
|
|
846
|
-
},
|
|
847
|
-
"readme": {
|
|
848
|
-
"markdown": "# AWS CodePipeline Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Pipeline\n\nTo construct an empty Pipeline:\n\n```ts\nimport { aws_codepipeline as codepipeline } from 'aws-cdk-lib';\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyFirstPipeline');\n```\n\nTo give the Pipeline a nice, human-readable name:\n\n```ts\nconst pipeline = new codepipeline.Pipeline(this, 'MyFirstPipeline', {\n pipelineName: 'MyPipeline',\n});\n```\n\nBe aware that in the default configuration, the `Pipeline` construct creates\nan AWS Key Management Service (AWS KMS) Customer Master Key (CMK) for you to\nencrypt the artifacts in the artifact bucket, which incurs a cost of\n**$1/month**. This default configuration is necessary to allow cross-account\nactions.\n\nIf you do not intend to perform cross-account deployments, you can disable\nthe creation of the Customer Master Keys by passing `crossAccountKeys: false`\nwhen defining the Pipeline:\n\n```ts\nconst pipeline = new codepipeline.Pipeline(this, 'MyFirstPipeline', {\n crossAccountKeys: false,\n});\n```\n\n## Stages\n\nYou can provide Stages when creating the Pipeline:\n\n```ts\nconst pipeline = new codepipeline.Pipeline(this, 'MyFirstPipeline', {\n stages: [\n {\n stageName: 'Source',\n actions: [\n // see below...\n ],\n },\n ],\n});\n```\n\nOr append a Stage to an existing Pipeline:\n\n```ts\nconst sourceStage = pipeline.addStage({\n stageName: 'Source',\n actions: [ // optional property\n // see below...\n ],\n});\n```\n\nYou can insert the new Stage at an arbitrary point in the Pipeline:\n\n```ts\nconst someStage = pipeline.addStage({\n stageName: 'SomeStage',\n placement: {\n // note: you can only specify one of the below properties\n rightBefore: anotherStage,\n justAfter: anotherStage\n }\n});\n```\n\n## Actions\n\nActions live in a separate package, `@aws-cdk/aws-codepipeline-actions`.\n\nTo add an Action to a Stage, you can provide it when creating the Stage,\nin the `actions` property,\nor you can use the `IStage.addAction()` method to mutate an existing Stage:\n\n```ts\nsourceStage.addAction(someAction);\n```\n\n## Cross-account CodePipelines\n\n> Cross-account Pipeline actions require that the Pipeline has *not* been\n> created with `crossAccountKeys: false`.\n\nMost pipeline Actions accept an AWS resource object to operate on. For example:\n\n* `S3DeployAction` accepts an `s3.IBucket`.\n* `CodeBuildAction` accepts a `codebuild.IProject`.\n* etc.\n\nThese resources can be either newly defined (`new s3.Bucket(...)`) or imported\n(`s3.Bucket.fromBucketAttributes(...)`) and identify the resource that should\nbe changed.\n\nThese resources can be in different accounts than the pipeline itself. For\nexample, the following action deploys to an imported S3 bucket from a\ndifferent account:\n\n```ts\nstage.addAction(new codepipeline_actions.S3DeployAction({\n bucket: s3.Bucket.fromBucketAttributes(this, 'Bucket', {\n account: '123456789012',\n // ...\n }),\n // ...\n}));\n```\n\nActions that don't accept a resource object accept an explicit `account` parameter:\n\n```ts\nstage.addAction(new codepipeline_actions.CloudFormationCreateUpdateStackAction({\n account: '123456789012',\n // ...\n}));\n```\n\nThe `Pipeline` construct automatically defines an **IAM Role** for you in the\ntarget account which the pipeline will assume to perform that action. This\nRole will be defined in a **support stack** named\n`<PipelineStackName>-support-<account>`, that will automatically be deployed\nbefore the stack containing the pipeline.\n\nIf you do not want to use the generated role, you can also explicitly pass a\n`role` when creating the action. In that case, the action will operate in the\naccount the role belongs to:\n\n```ts\nstage.addAction(new codepipeline_actions.CloudFormationCreateUpdateStackAction({\n // ...\n role: iam.Role.fromRoleArn(this, 'ActionRole', '...'),\n}));\n```\n\n## Cross-region CodePipelines\n\nSimilar to how you set up a cross-account Action, the AWS resource object you\npass to actions can also be in different *Regions*. For example, the\nfollowing Action deploys to an imported S3 bucket from a different Region:\n\n```ts\nstage.addAction(new codepipeline_actions.S3DeployAction({\n bucket: s3.Bucket.fromBucketAttributes(this, 'Bucket', {\n region: 'us-west-1',\n // ...\n }),\n // ...\n}));\n```\n\nActions that don't take an AWS resource will accept an explicit `region`\nparameter:\n\n```ts\nstage.addAction(new codepipeline_actions.CloudFormationCreateUpdateStackAction({\n // ...\n region: 'us-west-1',\n}));\n```\n\nThe `Pipeline` construct automatically defines a **replication bucket** for\nyou in the target region, which the pipeline will replicate artifacts to and\nfrom. This Bucket will be defined in a **support stack** named\n`<PipelineStackName>-support-<region>`, that will automatically be deployed\nbefore the stack containing the pipeline.\n\nIf you don't want to use these support stacks, and already have buckets in\nplace to serve as replication buckets, you can supply these at Pipeline definition\ntime using the `crossRegionReplicationBuckets` parameter. Example:\n\n```ts\nconst pipeline = new codepipeline.Pipeline(this, 'MyFirstPipeline', {\n // ...\n\n crossRegionReplicationBuckets: {\n // note that a physical name of the replication Bucket must be known at synthesis time\n 'us-west-1': s3.Bucket.fromBucketAttributes(this, 'UsWest1ReplicationBucket', {\n bucketName: 'my-us-west-1-replication-bucket',\n // optional KMS key\n encryptionKey: kms.Key.fromKeyArn(this, 'UsWest1ReplicationKey',\n 'arn:aws:kms:us-west-1:123456789012:key/1234-5678-9012'\n ),\n }),\n },\n});\n```\n\nSee [the AWS docs here](https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-create-cross-region.html)\nfor more information on cross-region CodePipelines.\n\n### Creating an encrypted replication bucket\n\nIf you're passing a replication bucket created in a different stack,\nlike this:\n\n```ts\nconst replicationStack = new Stack(app, 'ReplicationStack', {\n env: {\n region: 'us-west-1',\n },\n});\nconst key = new kms.Key(replicationStack, 'ReplicationKey');\nconst replicationBucket = new s3.Bucket(replicationStack, 'ReplicationBucket', {\n // like was said above - replication buckets need a set physical name\n bucketName: PhysicalName.GENERATE_IF_NEEDED,\n encryptionKey: key, // does not work!\n});\n\n// later...\nnew codepipeline.Pipeline(pipelineStack, 'Pipeline', {\n crossRegionReplicationBuckets: {\n 'us-west-1': replicationBucket,\n },\n});\n```\n\nWhen trying to encrypt it\n(and note that if any of the cross-region actions happen to be cross-account as well,\nthe bucket *has to* be encrypted - otherwise the pipeline will fail at runtime),\nyou cannot use a key directly - KMS keys don't have physical names,\nand so you can't reference them across environments.\n\nIn this case, you need to use an alias in place of the key when creating the bucket:\n\n```ts\nconst key = new kms.Key(replicationStack, 'ReplicationKey');\nconst alias = new kms.Alias(replicationStack, 'ReplicationAlias', {\n // aliasName is required\n aliasName: PhysicalName.GENERATE_IF_NEEDED,\n targetKey: key,\n});\nconst replicationBucket = new s3.Bucket(replicationStack, 'ReplicationBucket', {\n bucketName: PhysicalName.GENERATE_IF_NEEDED,\n encryptionKey: alias,\n});\n```\n\n## Variables\n\nThe library supports the CodePipeline Variables feature.\nEach action class that emits variables has a separate variables interface,\naccessed as a property of the action instance called `variables`.\nYou instantiate the action class and assign it to a local variable;\nwhen you want to use a variable in the configuration of a different action,\nyou access the appropriate property of the interface returned from `variables`,\nwhich represents a single variable.\nExample:\n\n```ts\n// MyAction is some action type that produces variables\nconst myAction = new MyAction({\n // ...\n});\nnew OtherAction({\n // ...\n config: myAction.variables.myVariable,\n});\n```\n\nThe namespace name that will be used will be automatically generated by the pipeline construct,\nbased on the stage and action name;\nyou can pass a custom name when creating the action instance:\n\n```ts\nconst myAction = new MyAction({\n // ...\n variablesNamespace: 'MyNamespace',\n});\n```\n\nThere are also global variables available,\nnot tied to any action;\nthese are accessed through static properties of the `GlobalVariables` class:\n\n```ts\nnew OtherAction({\n // ...\n config: codepipeline.GlobalVariables.executionId,\n});\n```\n\nCheck the documentation of the `@aws-cdk/aws-codepipeline-actions`\nfor details on how to use the variables for each action class.\n\nSee the [CodePipeline documentation](https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-variables.html)\nfor more details on how to use the variables feature.\n\n## Events\n\n### Using a pipeline as an event target\n\nA pipeline can be used as a target for a CloudWatch event rule:\n\n```ts\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\nimport { aws_events as events } from 'aws-cdk-lib';\n\n// kick off the pipeline every day\nconst rule = new events.Rule(this, 'Daily', {\n schedule: events.Schedule.rate(Duration.days(1)),\n});\n\nrule.addTarget(new targets.CodePipeline(pipeline));\n```\n\nWhen a pipeline is used as an event target, the\n\"codepipeline:StartPipelineExecution\" permission is granted to the AWS\nCloudWatch Events service.\n\n### Event sources\n\nPipelines emit CloudWatch events. To define event rules for events emitted by\nthe pipeline, stages or action, use the `onXxx` methods on the respective\nconstruct:\n\n```ts\nmyPipeline.onStateChange('MyPipelineStateChange', target);\nmyStage.onStateChange('MyStageStateChange', target);\nmyAction.onStateChange('MyActionStateChange', target);\n```\n\n## CodeStar Notifications\n\nTo define CodeStar Notification rules for Pipelines, use one of the `notifyOnXxx()` methods.\nThey are very similar to `onXxx()` methods for CloudWatch events:\n\n```ts\nconst target = new chatbot.SlackChannelConfiguration(stack, 'MySlackChannel', {\n slackChannelConfigurationName: 'YOUR_CHANNEL_NAME',\n slackWorkspaceId: 'YOUR_SLACK_WORKSPACE_ID',\n slackChannelId: 'YOUR_SLACK_CHANNEL_ID',\n});\n\nconst rule = pipeline.notifyOnExecutionStateChange('NotifyOnExecutionStateChange', target);\n```\n"
|
|
849
|
-
},
|
|
850
603
|
"targets": {
|
|
851
604
|
"dotnet": {
|
|
852
605
|
"namespace": "Amazon.CDK.AWS.CodePipeline"
|
|
@@ -860,13 +613,6 @@
|
|
|
860
613
|
}
|
|
861
614
|
},
|
|
862
615
|
"aws-cdk-lib.aws_codepipeline_actions": {
|
|
863
|
-
"locationInModule": {
|
|
864
|
-
"filename": "lib/index.ts",
|
|
865
|
-
"line": 45
|
|
866
|
-
},
|
|
867
|
-
"readme": {
|
|
868
|
-
"markdown": "# AWS CodePipeline Actions\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis package contains Actions that can be used in a CodePipeline.\n\n```ts\nimport { aws_codepipeline as codepipeline } from 'aws-cdk-lib';\nimport { aws_codepipeline_actions as codepipeline_actions } from 'aws-cdk-lib';\n```\n\n## Sources\n\n### AWS CodeCommit\n\nTo use a CodeCommit Repository in a CodePipeline:\n\n```ts\nimport { aws_codecommit as codecommit } from 'aws-cdk-lib';\n\nconst repo = new codecommit.Repository(this, 'Repo', {\n // ...\n});\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline', {\n pipelineName: 'MyPipeline',\n});\nconst sourceOutput = new codepipeline.Artifact();\nconst sourceAction = new codepipeline_actions.CodeCommitSourceAction({\n actionName: 'CodeCommit',\n repository: repo,\n output: sourceOutput,\n});\npipeline.addStage({\n stageName: 'Source',\n actions: [sourceAction],\n});\n```\n\nIf you want to use existing role which can be used by on commit event rule.\nYou can specify the role object in eventRole property.\n\n```ts\nconst eventRole = iam.Role.fromRoleArn(this, 'Event-role', 'roleArn');\nconst sourceAction = new codepipeline_actions.CodeCommitSourceAction({\n actionName: 'CodeCommit',\n repository: repo,\n output: new codepipeline.Artifact(),\n eventRole,\n});\n```\n\nIf you want to clone the entire CodeCommit repository (only available for CodeBuild actions),\nyou can set the `codeBuildCloneOutput` property to `true`:\n\n```ts\nconst sourceOutput = new codepipeline.Artifact();\nconst sourceAction = new codepipeline_actions.CodeCommitSourceAction({\n actionName: 'CodeCommit',\n repository: repo,\n output: sourceOutput,\n codeBuildCloneOutput: true,\n});\n\nconst buildAction = new codepipeline_actions.CodeBuildAction({\n actionName: 'CodeBuild',\n project,\n input: sourceOutput, // The build action must use the CodeCommitSourceAction output as input.\n outputs: [new codepipeline.Artifact()], // optional\n});\n```\n\nThe CodeCommit source action emits variables:\n\n```ts\nconst sourceAction = new codepipeline_actions.CodeCommitSourceAction({\n // ...\n variablesNamespace: 'MyNamespace', // optional - by default, a name will be generated for you\n});\n\n// later:\n\nnew codepipeline_actions.CodeBuildAction({\n // ...\n environmentVariables: {\n COMMIT_ID: {\n value: sourceAction.variables.commitId,\n },\n },\n});\n```\n\n### GitHub\n\nIf you want to use a GitHub repository as the source, you must create:\n\n* A [GitHub Access Token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line),\n with scopes **repo** and **admin:repo_hook**.\n* A [Secrets Manager Secret](https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html)\n with the value of the **GitHub Access Token**. Pick whatever name you want (for example `my-github-token`).\n This token can be stored either as Plaintext or as a Secret key/value.\n If you stored the token as Plaintext,\n set `cdk.SecretValue.secretsManager('my-github-token')` as the value of `oauthToken`.\n If you stored it as a Secret key/value,\n you must set `cdk.SecretValue.secretsManager('my-github-token', { jsonField : 'my-github-token' })` as the value of `oauthToken`.\n\nTo use GitHub as the source of a CodePipeline:\n\n```ts\n// Read the secret from Secrets Manager\nconst sourceOutput = new codepipeline.Artifact();\nconst sourceAction = new codepipeline_actions.GitHubSourceAction({\n actionName: 'GitHub_Source',\n owner: 'awslabs',\n repo: 'aws-cdk',\n oauthToken: cdk.SecretValue.secretsManager('my-github-token'),\n output: sourceOutput,\n branch: 'develop', // default: 'master'\n});\npipeline.addStage({\n stageName: 'Source',\n actions: [sourceAction],\n});\n```\n\nThe GitHub source action emits variables:\n\n```ts\nconst sourceAction = new codepipeline_actions.GitHubSourceAction({\n // ...\n variablesNamespace: 'MyNamespace', // optional - by default, a name will be generated for you\n});\n\n// later:\n\nnew codepipeline_actions.CodeBuildAction({\n // ...\n environmentVariables: {\n COMMIT_URL: {\n value: sourceAction.variables.commitUrl,\n },\n },\n});\n```\n\n### BitBucket\n\nCodePipeline can use a BitBucket Git repository as a source:\n\n**Note**: you have to manually connect CodePipeline through the AWS Console with your BitBucket account.\nThis is a one-time operation for a given AWS account in a given region.\nThe simplest way to do that is to either start creating a new CodePipeline,\nor edit an existing one, while being logged in to BitBucket.\nChoose BitBucket as the source,\nand grant CodePipeline permissions to your BitBucket account.\nCopy & paste the Connection ARN that you get in the console,\nor use the [`codestar-connections list-connections` AWS CLI operation](https://docs.aws.amazon.com/cli/latest/reference/codestar-connections/list-connections.html)\nto find it.\nAfter that, you can safely abort creating or editing the pipeline -\nthe connection has already been created.\n\n```ts\nconst sourceOutput = new codepipeline.Artifact();\nconst sourceAction = new codepipeline_actions.CodeStarConnectionsSourceAction({\n actionName: 'BitBucket_Source',\n owner: 'aws',\n repo: 'aws-cdk',\n output: sourceOutput,\n connectionArn: 'arn:aws:codestar-connections:us-east-1:123456789012:connection/12345678-abcd-12ab-34cdef5678gh',\n});\n```\n\nYou can also use the `CodeStarConnectionsSourceAction` to connect to GitHub, in the same way\n(you just have to select GitHub as the source when creating the connection in the console).\n\n### AWS S3 Source\n\nTo use an S3 Bucket as a source in CodePipeline:\n\n```ts\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\n\nconst sourceBucket = new s3.Bucket(this, 'MyBucket', {\n versioned: true, // a Bucket used as a source in CodePipeline must be versioned\n});\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline');\nconst sourceOutput = new codepipeline.Artifact();\nconst sourceAction = new codepipeline_actions.S3SourceAction({\n actionName: 'S3Source',\n bucket: sourceBucket,\n bucketKey: 'path/to/file.zip',\n output: sourceOutput,\n});\npipeline.addStage({\n stageName: 'Source',\n actions: [sourceAction],\n});\n```\n\nThe region of the action will be determined by the region the bucket itself is in.\nWhen using a newly created bucket,\nthat region will be taken from the stack the bucket belongs to;\nfor an imported bucket,\nyou can specify the region explicitly:\n\n```ts\nconst sourceBucket = s3.Bucket.fromBucketAttributes(this, 'SourceBucket', {\n bucketName: 'my-bucket',\n region: 'ap-southeast-1',\n});\n```\n\nBy default, the Pipeline will poll the Bucket to detect changes.\nYou can change that behavior to use CloudWatch Events by setting the `trigger`\nproperty to `S3Trigger.EVENTS` (it's `S3Trigger.POLL` by default).\nIf you do that, make sure the source Bucket is part of an AWS CloudTrail Trail -\notherwise, the CloudWatch Events will not be emitted,\nand your Pipeline will not react to changes in the Bucket.\nYou can do it through the CDK:\n\n```ts\nimport { aws_cloudtrail as cloudtrail } from 'aws-cdk-lib';\n\nconst key = 'some/key.zip';\nconst trail = new cloudtrail.Trail(this, 'CloudTrail');\ntrail.addS3EventSelector([{\n bucket: sourceBucket,\n objectPrefix: key,\n}], {\n readWriteType: cloudtrail.ReadWriteType.WRITE_ONLY,\n});\nconst sourceAction = new codepipeline_actions.S3SourceAction({\n actionName: 'S3Source',\n bucketKey: key,\n bucket: sourceBucket,\n output: sourceOutput,\n trigger: codepipeline_actions.S3Trigger.EVENTS, // default: S3Trigger.POLL\n});\n```\n\nThe S3 source action emits variables:\n\n```ts\nconst sourceAction = new codepipeline_actions.S3SourceAction({\n // ...\n variablesNamespace: 'MyNamespace', // optional - by default, a name will be generated for you\n});\n\n// later:\n\nnew codepipeline_actions.CodeBuildAction({\n // ...\n environmentVariables: {\n VERSION_ID: {\n value: sourceAction.variables.versionId,\n },\n },\n});\n```\n\n### AWS ECR\n\nTo use an ECR Repository as a source in a Pipeline:\n\n```ts\nimport { aws_ecr as ecr } from 'aws-cdk-lib';\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline');\nconst sourceOutput = new codepipeline.Artifact();\nconst sourceAction = new codepipeline_actions.EcrSourceAction({\n actionName: 'ECR',\n repository: ecrRepository,\n imageTag: 'some-tag', // optional, default: 'latest'\n output: sourceOutput,\n});\npipeline.addStage({\n stageName: 'Source',\n actions: [sourceAction],\n});\n```\n\nThe ECR source action emits variables:\n\n```ts\nconst sourceAction = new codepipeline_actions.EcrSourceAction({\n // ...\n variablesNamespace: 'MyNamespace', // optional - by default, a name will be generated for you\n});\n\n// later:\n\nnew codepipeline_actions.CodeBuildAction({\n // ...\n environmentVariables: {\n IMAGE_URI: {\n value: sourceAction.variables.imageUri,\n },\n },\n});\n```\n\n## Build & test\n\n### AWS CodeBuild\n\nExample of a CodeBuild Project used in a Pipeline, alongside CodeCommit:\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\nimport { aws_codecommit as codecommit } from 'aws-cdk-lib';\n\nconst repository = new codecommit.Repository(this, 'MyRepository', {\n repositoryName: 'MyRepository',\n});\nconst project = new codebuild.PipelineProject(this, 'MyProject');\n\nconst sourceOutput = new codepipeline.Artifact();\nconst sourceAction = new codepipeline_actions.CodeCommitSourceAction({\n actionName: 'CodeCommit',\n repository,\n output: sourceOutput,\n});\nconst buildAction = new codepipeline_actions.CodeBuildAction({\n actionName: 'CodeBuild',\n project,\n input: sourceOutput,\n outputs: [new codepipeline.Artifact()], // optional\n executeBatchBuild: true, // optional, defaults to false\n combineBatchBuildArtifacts: true, // optional, defaults to false\n});\n\nnew codepipeline.Pipeline(this, 'MyPipeline', {\n stages: [\n {\n stageName: 'Source',\n actions: [sourceAction],\n },\n {\n stageName: 'Build',\n actions: [buildAction],\n },\n ],\n});\n```\n\nThe default category of the CodeBuild Action is `Build`;\nif you want a `Test` Action instead,\noverride the `type` property:\n\n```ts\nconst testAction = new codepipeline_actions.CodeBuildAction({\n actionName: 'IntegrationTest',\n project,\n input: sourceOutput,\n type: codepipeline_actions.CodeBuildActionType.TEST, // default is BUILD\n});\n```\n\n#### Multiple inputs and outputs\n\nWhen you want to have multiple inputs and/or outputs for a Project used in a\nPipeline, instead of using the `secondarySources` and `secondaryArtifacts`\nproperties of the `Project` class, you need to use the `extraInputs` and\n`outputs` properties of the CodeBuild CodePipeline\nActions. Example:\n\n```ts\nconst sourceOutput1 = new codepipeline.Artifact();\nconst sourceAction1 = new codepipeline_actions.CodeCommitSourceAction({\n actionName: 'Source1',\n repository: repository1,\n output: sourceOutput1,\n});\nconst sourceOutput2 = new codepipeline.Artifact('source2');\nconst sourceAction2 = new codepipeline_actions.CodeCommitSourceAction({\n actionName: 'Source2',\n repository: repository2,\n output: sourceOutput2,\n});\n\nconst buildAction = new codepipeline_actions.CodeBuildAction({\n actionName: 'Build',\n project,\n input: sourceOutput1,\n extraInputs: [\n sourceOutput2, // this is where 'source2' comes from\n ],\n outputs: [\n new codepipeline.Artifact('artifact1'), // for better buildspec readability - see below\n new codepipeline.Artifact('artifact2'),\n ],\n});\n```\n\n**Note**: when a CodeBuild Action in a Pipeline has more than one output, it\nonly uses the `secondary-artifacts` field of the buildspec, never the\nprimary output specification directly under `artifacts`. Because of that, it\npays to explicitly name all output artifacts of that Action, like we did\nabove, so that you know what name to use in the buildspec.\n\nExample buildspec for the above project:\n\n```ts\nconst project = new codebuild.PipelineProject(this, 'MyProject', {\n buildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n phases: {\n build: {\n commands: [\n // By default, you're in a directory with the contents of the repository from sourceAction1.\n // Use the CODEBUILD_SRC_DIR_source2 environment variable\n // to get a path to the directory with the contents of the second input repository.\n ],\n },\n },\n artifacts: {\n 'secondary-artifacts': {\n 'artifact1': {\n // primary Action output artifact,\n // available as buildAction.outputArtifact\n },\n 'artifact2': {\n // additional output artifact,\n // available as buildAction.additionalOutputArtifact('artifact2')\n },\n },\n },\n }),\n // ...\n});\n```\n\n#### Variables\n\nThe CodeBuild action emits variables.\nUnlike many other actions, the variables are not static,\nbut dynamic, defined in the buildspec,\nin the 'exported-variables' subsection of the 'env' section.\nExample:\n\n```ts\nconst buildAction = new codepipeline_actions.CodeBuildAction({\n actionName: 'Build1',\n input: sourceOutput,\n project: new codebuild.PipelineProject(this, 'Project', {\n buildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n env: {\n 'exported-variables': [\n 'MY_VAR',\n ],\n },\n phases: {\n build: {\n commands: 'export MY_VAR=\"some value\"',\n },\n },\n }),\n }),\n variablesNamespace: 'MyNamespace', // optional - by default, a name will be generated for you\n});\n\n// later:\n\nnew codepipeline_actions.CodeBuildAction({\n // ...\n environmentVariables: {\n MyVar: {\n value: buildAction.variable('MY_VAR'),\n },\n },\n});\n```\n\n### Jenkins\n\nIn order to use Jenkins Actions in the Pipeline,\nyou first need to create a `JenkinsProvider`:\n\n```ts\nconst jenkinsProvider = new codepipeline_actions.JenkinsProvider(this, 'JenkinsProvider', {\n providerName: 'MyJenkinsProvider',\n serverUrl: 'http://my-jenkins.com:8080',\n version: '2', // optional, default: '1'\n});\n```\n\nIf you've registered a Jenkins provider in a different CDK app,\nor outside the CDK (in the CodePipeline AWS Console, for example),\nyou can import it:\n\n```ts\nconst jenkinsProvider = codepipeline_actions.JenkinsProvider.import(this, 'JenkinsProvider', {\n providerName: 'MyJenkinsProvider',\n serverUrl: 'http://my-jenkins.com:8080',\n version: '2', // optional, default: '1'\n});\n```\n\nNote that a Jenkins provider\n(identified by the provider name-category(build/test)-version tuple)\nmust always be registered in the given account, in the given AWS region,\nbefore it can be used in CodePipeline.\n\nWith a `JenkinsProvider`,\nwe can create a Jenkins Action:\n\n```ts\nconst buildAction = new codepipeline_actions.JenkinsAction({\n actionName: 'JenkinsBuild',\n jenkinsProvider: jenkinsProvider,\n projectName: 'MyProject',\n type: codepipeline_actions.JenkinsActionType.BUILD,\n});\n```\n\n## Deploy\n\n### AWS CloudFormation\n\nThis module contains Actions that allows you to deploy to CloudFormation from AWS CodePipeline.\n\nFor example, the following code fragment defines a pipeline that automatically deploys a CloudFormation template\ndirectly from a CodeCommit repository, with a manual approval step in between to confirm the changes:\n\n[example Pipeline to deploy CloudFormation](test/integ.cfn-template-from-repo.lit.ts)\n\nSee [the AWS documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline.html)\nfor more details about using CloudFormation in CodePipeline.\n\n#### Actions defined by this package\n\nThis package contains the following CloudFormation actions:\n\n* **CloudFormationCreateUpdateStackAction** - Deploy a CloudFormation template directly from the pipeline. The indicated stack is created,\n or updated if it already exists. If the stack is in a failure state, deployment will fail (unless `replaceOnFailure`\n is set to `true`, in which case it will be destroyed and recreated).\n* **CloudFormationDeleteStackAction** - Delete the stack with the given name.\n* **CloudFormationCreateReplaceChangeSetAction** - Prepare a change set to be applied later. You will typically use change sets if you want\n to manually verify the changes that are being staged, or if you want to separate the people (or system) preparing the\n changes from the people (or system) applying the changes.\n* **CloudFormationExecuteChangeSetAction** - Execute a change set prepared previously.\n\n#### Lambda deployed through CodePipeline\n\nIf you want to deploy your Lambda through CodePipeline,\nand you don't use assets (for example, because your CDK code and Lambda code are separate),\nyou can use a special Lambda `Code` class, `CfnParametersCode`.\nNote that your Lambda must be in a different Stack than your Pipeline.\nThe Lambda itself will be deployed, alongside the entire Stack it belongs to,\nusing a CloudFormation CodePipeline Action. Example:\n\n[Example of deploying a Lambda through CodePipeline](test/integ.lambda-deployed-through-codepipeline.lit.ts)\n\n#### Cross-account actions\n\nIf you want to update stacks in a different account,\npass the `account` property when creating the action:\n\n```ts\nnew codepipeline_actions.CloudFormationCreateUpdateStackAction({\n // ...\n account: '123456789012',\n});\n```\n\nThis will create a new stack, called `<PipelineStackName>-support-123456789012`, in your `App`,\nthat will contain the role that the pipeline will assume in account 123456789012 before executing this action.\nThis support stack will automatically be deployed before the stack containing the pipeline.\n\nYou can also pass a role explicitly when creating the action -\nin that case, the `account` property is ignored,\nand the action will operate in the same account the role belongs to:\n\n```ts\nimport { PhysicalName } from 'aws-cdk-lib';\n\n// in stack for account 123456789012...\nconst actionRole = new iam.Role(otherAccountStack, 'ActionRole', {\n assumedBy: new iam.AccountPrincipal(pipelineAccount),\n // the role has to have a physical name set\n roleName: PhysicalName.GENERATE_IF_NEEDED,\n});\n\n// in the pipeline stack...\nnew codepipeline_actions.CloudFormationCreateUpdateStackAction({\n // ...\n role: actionRole, // this action will be cross-account as well\n});\n```\n\n### AWS CodeDeploy\n\n#### Server deployments\n\nTo use CodeDeploy for EC2/on-premise deployments in a Pipeline:\n\n```ts\nimport { aws_codedeploy as codedeploy } from 'aws-cdk-lib';\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline', {\n pipelineName: 'MyPipeline',\n});\n\n// add the source and build Stages to the Pipeline...\n\nconst deployAction = new codepipeline_actions.CodeDeployServerDeployAction({\n actionName: 'CodeDeploy',\n input: buildOutput,\n deploymentGroup,\n});\npipeline.addStage({\n stageName: 'Deploy',\n actions: [deployAction],\n});\n```\n\n##### Lambda deployments\n\nTo use CodeDeploy for blue-green Lambda deployments in a Pipeline:\n\n```ts\nconst lambdaCode = lambda.Code.fromCfnParameters();\nconst func = new lambda.Function(lambdaStack, 'Lambda', {\n code: lambdaCode,\n handler: 'index.handler',\n runtime: lambda.Runtime.NODEJS_12_X,\n});\n// used to make sure each CDK synthesis produces a different Version\nconst version = func.addVersion('NewVersion');\nconst alias = new lambda.Alias(lambdaStack, 'LambdaAlias', {\n aliasName: 'Prod',\n version,\n});\n\nnew codedeploy.LambdaDeploymentGroup(lambdaStack, 'DeploymentGroup', {\n alias,\n deploymentConfig: codedeploy.LambdaDeploymentConfig.LINEAR_10PERCENT_EVERY_1MINUTE,\n});\n```\n\nThen, you need to create your Pipeline Stack,\nwhere you will define your Pipeline,\nand deploy the `lambdaStack` using a CloudFormation CodePipeline Action\n(see above for a complete example).\n\n### ECS\n\nCodePipeline can deploy an ECS service.\nThe deploy Action receives one input Artifact which contains the [image definition file]:\n\n```ts\nconst deployStage = pipeline.addStage({\n stageName: 'Deploy',\n actions: [\n new codepipeline_actions.EcsDeployAction({\n actionName: 'DeployAction',\n service,\n // if your file is called imagedefinitions.json,\n // use the `input` property,\n // and leave out the `imageFile` property\n input: buildOutput,\n // if your file name is _not_ imagedefinitions.json,\n // use the `imageFile` property,\n // and leave out the `input` property\n imageFile: buildOutput.atPath('imageDef.json'),\n deploymentTimeout: cdk.Duration.minutes(60), // optional, default is 60 minutes\n }),\n ],\n});\n```\n\n[image definition file]: https://docs.aws.amazon.com/codepipeline/latest/userguide/pipelines-create.html#pipelines-create-image-definitions\n\n#### Deploying ECS applications stored in a separate source code repository\n\nThe idiomatic CDK way of deploying an ECS application is to have your Dockerfiles and your CDK code in the same source code repository,\nleveraging [Docker Assets](https://docs.aws.amazon.com/cdk/latest/guide/assets.html#assets_types_docker),\nand use the [CDK Pipelines module](https://docs.aws.amazon.com/cdk/api/latest/docs/pipelines-readme.html).\n\nHowever, if you want to deploy a Docker application whose source code is kept in a separate version control repository than the CDK code,\nyou can use the `TagParameterContainerImage` class from the ECS module.\nHere's an example:\n\n[example ECS pipeline for an application in a separate source code repository](test/integ.pipeline-ecs-separate-source.lit.ts)\n\n### AWS S3 Deployment\n\nTo use an S3 Bucket as a deployment target in CodePipeline:\n\n```ts\nconst targetBucket = new s3.Bucket(this, 'MyBucket', {});\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline');\nconst deployAction = new codepipeline_actions.S3DeployAction({\n actionName: 'S3Deploy',\n stage: deployStage,\n bucket: targetBucket,\n input: sourceOutput,\n});\nconst deployStage = pipeline.addStage({\n stageName: 'Deploy',\n actions: [deployAction],\n});\n```\n\n#### Invalidating the CloudFront cache when deploying to S3\n\nThere is currently no native support in CodePipeline for invalidating a CloudFront cache after deployment.\nOne workaround is to add another build step after the deploy step,\nand use the AWS CLI to invalidate the cache:\n\n```ts\n// Create a Cloudfront Web Distribution\nconst distribution = new cloudfront.Distribution(this, `Distribution`, {\n // ...\n});\n\n// Create the build project that will invalidate the cache\nconst invalidateBuildProject = new codebuild.PipelineProject(this, `InvalidateProject`, {\n buildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n phases: {\n build: {\n commands:[\n 'aws cloudfront create-invalidation --distribution-id ${CLOUDFRONT_ID} --paths \"/*\"',\n // Choose whatever files or paths you'd like, or all files as specified here\n ],\n },\n },\n }),\n environmentVariables: {\n CLOUDFRONT_ID: { value: distribution.distributionId },\n },\n});\n\n// Add Cloudfront invalidation permissions to the project\nconst distributionArn = `arn:aws:cloudfront::${this.account}:distribution/${distribution.distributionId}`;\ninvalidateBuildProject.addToRolePolicy(new iam.PolicyStatement({\n resources: [distributionArn],\n actions: [\n 'cloudfront:CreateInvalidation',\n ],\n}));\n\n// Create the pipeline (here only the S3 deploy and Invalidate cache build)\nnew codepipeline.Pipeline(this, 'Pipeline', {\n stages: [\n // ...\n {\n stageName: 'Deploy',\n actions: [\n new codepipelineActions.S3DeployAction({\n actionName: 'S3Deploy',\n bucket: deployBucket,\n input: deployInput,\n runOrder: 1,\n }),\n new codepipelineActions.CodeBuildAction({\n actionName: 'InvalidateCache',\n project: invalidateBuildProject,\n input: deployInput,\n runOrder: 2,\n }),\n ],\n },\n ],\n});\n```\n\n### Alexa Skill\n\nYou can deploy to Alexa using CodePipeline with the following Action:\n\n```ts\n// Read the secrets from ParameterStore\nconst clientId = cdk.SecretValue.secretsManager('AlexaClientId');\nconst clientSecret = cdk.SecretValue.secretsManager('AlexaClientSecret');\nconst refreshToken = cdk.SecretValue.secretsManager('AlexaRefreshToken');\n\n// Add deploy action\nnew codepipeline_actions.AlexaSkillDeployAction({\n actionName: 'DeploySkill',\n runOrder: 1,\n input: sourceOutput,\n clientId: clientId.toString(),\n clientSecret: clientSecret,\n refreshToken: refreshToken,\n skillId: 'amzn1.ask.skill.12345678-1234-1234-1234-123456789012',\n});\n```\n\nIf you need manifest overrides you can specify them as `parameterOverridesArtifact` in the action:\n\n```ts\nimport { aws_cloudformation as cloudformation } from 'aws-cdk-lib';\n\n// Deploy some CFN change set and store output\nconst executeOutput = new codepipeline.Artifact('CloudFormation');\nconst executeChangeSetAction = new codepipeline_actions.CloudFormationExecuteChangeSetAction({\n actionName: 'ExecuteChangesTest',\n runOrder: 2,\n stackName,\n changeSetName,\n outputFileName: 'overrides.json',\n output: executeOutput,\n});\n\n// Provide CFN output as manifest overrides\nnew codepipeline_actions.AlexaSkillDeployAction({\n actionName: 'DeploySkill',\n runOrder: 1,\n input: sourceOutput,\n parameterOverridesArtifact: executeOutput,\n clientId: clientId.toString(),\n clientSecret: clientSecret,\n refreshToken: refreshToken,\n skillId: 'amzn1.ask.skill.12345678-1234-1234-1234-123456789012',\n});\n```\n\n### AWS Service Catalog\n\nYou can deploy a CloudFormation template to an existing Service Catalog product with the following Action:\n\n```ts\nconst serviceCatalogDeployAction = new codepipeline_actions.ServiceCatalogDeployActionBeta1({\n actionName: 'ServiceCatalogDeploy',\n templatePath: cdkBuildOutput.atPath(\"Sample.template.json\"),\n productVersionName: \"Version - \" + Date.now.toString,\n productType: \"CLOUD_FORMATION_TEMPLATE\",\n productVersionDescription: \"This is a version from the pipeline with a new description.\",\n productId: \"prod-XXXXXXXX\",\n});\n```\n\n## Approve & invoke\n\n### Manual approval Action\n\nThis package contains an Action that stops the Pipeline until someone manually clicks the approve button:\n\n```ts\nconst manualApprovalAction = new codepipeline_actions.ManualApprovalAction({\n actionName: 'Approve',\n notificationTopic: new sns.Topic(this, 'Topic'), // optional\n notifyEmails: [\n 'some_email@example.com',\n ], // optional\n additionalInformation: 'additional info', // optional\n});\napproveStage.addAction(manualApprovalAction);\n// `manualApprovalAction.notificationTopic` can be used to access the Topic\n// after the Action has been added to a Pipeline\n```\n\nIf the `notificationTopic` has not been provided,\nbut `notifyEmails` were,\na new SNS Topic will be created\n(and accessible through the `notificationTopic` property of the Action).\n\nIf you want to grant a principal permissions to approve the changes,\nyou can invoke the method `grantManualApproval` passing it a `IGrantable`:\n\n```ts\nconst manualApprovalAction = new codepipeline_actions.ManualApprovalAction({\n actionName: 'Approve',\n});\napproveStage.addAction(manualApprovalAction);\n\nconst role = iam.Role.fromRoleArn(this, 'Admin', Arn.format({ service: 'iam', resource: 'role', resourceName: 'Admin' }, stack));\nmanualApprovalAction.grantManualApproval(role);\n```\n\n### AWS Lambda\n\nThis module contains an Action that allows you to invoke a Lambda function in a Pipeline:\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline');\nconst lambdaAction = new codepipeline_actions.LambdaInvokeAction({\n actionName: 'Lambda',\n lambda: fn,\n});\npipeline.addStage({\n stageName: 'Lambda',\n actions: [lambdaAction],\n});\n```\n\nThe Lambda Action can have up to 5 inputs,\nand up to 5 outputs:\n\n```ts\n\nconst lambdaAction = new codepipeline_actions.LambdaInvokeAction({\n actionName: 'Lambda',\n inputs: [\n sourceOutput,\n buildOutput,\n ],\n outputs: [\n new codepipeline.Artifact('Out1'),\n new codepipeline.Artifact('Out2'),\n ],\n lambda: fn\n});\n```\n\nThe Lambda invoke action emits variables.\nUnlike many other actions, the variables are not static,\nbut dynamic, defined by the function calling the `PutJobSuccessResult`\nAPI with the `outputVariables` property filled with the map of variables\nExample:\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\n\nconst lambdaInvokeAction = new codepipeline_actions.LambdaInvokeAction({\n actionName: 'Lambda',\n lambda: new lambda.Function(this, 'Func', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromInline(`\n const AWS = require('aws-sdk');\n\n exports.handler = async function(event, context) {\n const codepipeline = new AWS.CodePipeline();\n await codepipeline.putJobSuccessResult({\n jobId: event['CodePipeline.job'].id,\n outputVariables: {\n MY_VAR: \"some value\",\n },\n }).promise();\n }\n `),\n }),\n variablesNamespace: 'MyNamespace', // optional - by default, a name will be generated for you\n});\n\n// later:\n\nnew codepipeline_actions.CodeBuildAction({\n // ...\n environmentVariables: {\n MyVar: {\n value: lambdaInvokeAction.variable('MY_VAR'),\n },\n },\n});\n```\n\nSee [the AWS documentation](https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-invoke-lambda-function.html)\non how to write a Lambda function invoked from CodePipeline.\n\n### AWS Step Functions\n\nThis module contains an Action that allows you to invoke a Step Function in a Pipeline:\n\n```ts\nimport { aws_stepfunctions as stepfunction } from 'aws-cdk-lib';\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline');\nconst startState = new stepfunction.Pass(stack, 'StartState');\nconst simpleStateMachine = new stepfunction.StateMachine(stack, 'SimpleStateMachine', {\n definition: startState,\n});\nconst stepFunctionAction = new codepipeline_actions.StepFunctionsInvokeAction({\n actionName: 'Invoke',\n stateMachine: simpleStateMachine,\n stateMachineInput: codepipeline_actions.StateMachineInput.literal({ IsHelloWorldExample: true }),\n});\npipeline.addStage({\n stageName: 'StepFunctions',\n actions: [stepFunctionAction],\n});\n```\n\nThe `StateMachineInput` can be created with one of 2 static factory methods:\n`literal`, which takes an arbitrary map as its only argument, or `filePath`:\n\n```ts\nimport { aws_stepfunctions as stepfunction } from 'aws-cdk-lib';\n\nconst pipeline = new codepipeline.Pipeline(this, 'MyPipeline');\nconst inputArtifact = new codepipeline.Artifact();\nconst startState = new stepfunction.Pass(stack, 'StartState');\nconst simpleStateMachine = new stepfunction.StateMachine(stack, 'SimpleStateMachine', {\n definition: startState,\n});\nconst stepFunctionAction = new codepipeline_actions.StepFunctionsInvokeAction({\n actionName: 'Invoke',\n stateMachine: simpleStateMachine,\n stateMachineInput: codepipeline_actions.StateMachineInput.filePath(inputArtifact.atPath('assets/input.json')),\n});\npipeline.addStage({\n stageName: 'StepFunctions',\n actions: [stepFunctionAction],\n});\n```\n\nSee [the AWS documentation](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference-StepFunctions.html)\nfor information on Action structure reference.\n"
|
|
869
|
-
},
|
|
870
616
|
"targets": {
|
|
871
617
|
"dotnet": {
|
|
872
618
|
"namespace": "Amazon.CDK.AWS.CodePipeline.Actions"
|
|
@@ -880,10 +626,6 @@
|
|
|
880
626
|
}
|
|
881
627
|
},
|
|
882
628
|
"aws-cdk-lib.aws_codestar": {
|
|
883
|
-
"locationInModule": {
|
|
884
|
-
"filename": "lib/index.ts",
|
|
885
|
-
"line": 46
|
|
886
|
-
},
|
|
887
629
|
"targets": {
|
|
888
630
|
"dotnet": {
|
|
889
631
|
"namespace": "Amazon.CDK.AWS.Codestar"
|
|
@@ -897,10 +639,6 @@
|
|
|
897
639
|
}
|
|
898
640
|
},
|
|
899
641
|
"aws-cdk-lib.aws_codestarconnections": {
|
|
900
|
-
"locationInModule": {
|
|
901
|
-
"filename": "lib/index.ts",
|
|
902
|
-
"line": 47
|
|
903
|
-
},
|
|
904
642
|
"targets": {
|
|
905
643
|
"dotnet": {
|
|
906
644
|
"namespace": "Amazon.CDK.AWS.CodeStarConnections"
|
|
@@ -914,13 +652,6 @@
|
|
|
914
652
|
}
|
|
915
653
|
},
|
|
916
654
|
"aws-cdk-lib.aws_codestarnotifications": {
|
|
917
|
-
"locationInModule": {
|
|
918
|
-
"filename": "lib/index.ts",
|
|
919
|
-
"line": 48
|
|
920
|
-
},
|
|
921
|
-
"readme": {
|
|
922
|
-
"markdown": "# AWS CodeStarNotifications Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## NotificationRule\n\nThe `NotificationRule` construct defines an AWS CodeStarNotifications rule.\nThe rule specifies the events you want notifications about and the targets\n(such as Amazon SNS topics or AWS Chatbot clients configured for Slack)\nwhere you want to receive them.\nNotification targets are objects that implement the `INotificationRuleTarget`\ninterface and notification source is object that implement the `INotificationRuleSource` interface.\n\n## Notification Targets\n\nThis module includes classes that implement the `INotificationRuleTarget` interface for SNS and slack in AWS Chatbot.\n\nThe following targets are supported:\n\n* `SNS`: specify event and notify to SNS topic.\n* `AWS Chatbot`: specify event and notify to slack channel and only support `SlackChannelConfiguration`.\n\n## Examples\n\n```ts\nimport { aws_codestarnotifications as notifications } from 'aws-cdk-lib';\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\nimport { aws_sns as sns } from 'aws-cdk-lib';\nimport { aws_chatbot as chatbot } from 'aws-cdk-lib';\n\nconst project = new codebuild.PipelineProject(stack, 'MyProject');\n\nconst topic = new sns.Topic(stack, 'MyTopic1');\n\nconst slack = new chatbot.SlackChannelConfiguration(stack, 'MySlackChannel', {\n slackChannelConfigurationName: 'YOUR_CHANNEL_NAME',\n slackWorkspaceId: 'YOUR_SLACK_WORKSPACE_ID',\n slackChannelId: 'YOUR_SLACK_CHANNEL_ID',\n});\n\nconst rule = new notifications.NotificationRule(stack, 'NotificationRule', {\n source: project,\n events: [\n 'codebuild-project-build-state-succeeded',\n 'codebuild-project-build-state-failed',\n ],\n targets: [topic],\n});\nrule.addTarget(slack);\n```\n\n## Notification Source\n\nThis module includes classes that implement the `INotificationRuleSource` interface for AWS CodeBuild,\nAWS CodePipeline and will support AWS CodeCommit, AWS CodeDeploy in future.\n\nThe following sources are supported:\n\n* `AWS CodeBuild`: support codebuild project to trigger notification when event specified.\n* `AWS CodePipeline`: support codepipeline to trigger notification when event specified.\n\n## Events\n\nFor the complete list of supported event types for CodeBuild and CodePipeline, see:\n\n* [Events for notification rules on build projects](https://docs.aws.amazon.com/dtconsole/latest/userguide/concepts.html#events-ref-buildproject).\n* [Events for notification rules on pipelines](https://docs.aws.amazon.com/dtconsole/latest/userguide/concepts.html#events-ref-pipeline).\n"
|
|
923
|
-
},
|
|
924
655
|
"targets": {
|
|
925
656
|
"dotnet": {
|
|
926
657
|
"namespace": "Amazon.CDK.AWS.CodeStarNotifications"
|
|
@@ -934,13 +665,6 @@
|
|
|
934
665
|
}
|
|
935
666
|
},
|
|
936
667
|
"aws-cdk-lib.aws_cognito": {
|
|
937
|
-
"locationInModule": {
|
|
938
|
-
"filename": "lib/index.ts",
|
|
939
|
-
"line": 49
|
|
940
|
-
},
|
|
941
|
-
"readme": {
|
|
942
|
-
"markdown": "# Amazon Cognito Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\nFeatures | Stability\n-------------------------------------------|--------------------------------------------------------\nCFN Resources | \nHigher level constructs for User Pools | \nHigher level constructs for Identity Pools | \n\n> **CFN Resources:** All classes with the `Cfn` prefix in this module ([CFN Resources]) are always\n> stable and safe to use.\n>\n> [CFN Resources]: https://docs.aws.amazon.com/cdk/latest/guide/constructs.html#constructs_lib\n\n<!-- -->\n\n> **Stable:** Higher level constructs in this module that are marked stable will not undergo any\n> breaking changes. They will strictly follow the [Semantic Versioning](https://semver.org/) model.\n\n---\n\n<!--END STABILITY BANNER-->\n\n[Amazon Cognito](https://docs.aws.amazon.com/cognito/latest/developerguide/what-is-amazon-cognito.html) provides\nauthentication, authorization, and user management for your web and mobile apps. Your users can sign in directly with a\nuser name and password, or through a third party such as Facebook, Amazon, Google or Apple.\n\nThe two main components of Amazon Cognito are [user\npools](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html) and [identity\npools](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). User pools are user directories\nthat provide sign-up and sign-in options for your app users. Identity pools enable you to grant your users access to\nother AWS services.\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Table of Contents\n\n- [User Pools](#user-pools)\n - [Sign Up](#sign-up)\n - [Sign In](#sign-in)\n - [Attributes](#attributes)\n - [Security](#security)\n - [Multi-factor Authentication](#multi-factor-authentication-mfa)\n - [Account Recovery Settings](#account-recovery-settings)\n - [Emails](#emails)\n - [Device Tracking](#device-tracking)\n - [Lambda Triggers](#lambda-triggers)\n - [Trigger Permissions](#trigger-permissions)\n - [Import](#importing-user-pools)\n - [Identity Providers](#identity-providers)\n - [App Clients](#app-clients)\n - [Resource Servers](#resource-servers)\n - [Domains](#domains)\n\n## User Pools\n\nUser pools allow creating and managing your own directory of users that can sign up and sign in. They enable easy\nintegration with social identity providers such as Facebook, Google, Amazon, Microsoft Active Directory, etc. through\nSAML.\n\nUsing the CDK, a new user pool can be created as part of the stack using the construct's constructor. You may specify\nthe `userPoolName` to give your own identifier to the user pool. If not, CloudFormation will generate a name.\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n userPoolName: 'myawesomeapp-userpool',\n});\n```\n\nThe default set up for the user pool is configured such that only administrators will be allowed\nto create users. Features such as Multi-factor authentication (MFAs) and Lambda Triggers are not\nconfigured by default.\n\n### Sign Up\n\nUsers can either be signed up by the app's administrators or can sign themselves up. Once a user has signed up, their\naccount needs to be confirmed. Cognito provides several ways to sign users up and confirm their accounts. Learn more\nabout [user sign up here](https://docs.aws.amazon.com/cognito/latest/developerguide/signing-up-users-in-your-app.html).\n\nWhen a user signs up, email and SMS messages are used to verify their account and contact methods. The following code\nsnippet configures a user pool with properties relevant to these verification messages -\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n selfSignUpEnabled: true,\n userVerification: {\n emailSubject: 'Verify your email for our awesome app!',\n emailBody: 'Thanks for signing up to our awesome app! Your verification code is {####}',\n emailStyle: cognito.VerificationEmailStyle.CODE,\n smsMessage: 'Thanks for signing up to our awesome app! Your verification code is {####}',\n }\n});\n```\n\nBy default, self sign up is disabled. Learn more about [email and SMS verification messages\nhere](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html).\n\nBesides users signing themselves up, an administrator of any user pool can sign users up. The user then receives an\ninvitation to join the user pool. The following code snippet configures a user pool with properties relevant to the\ninvitation messages -\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n userInvitation: {\n emailSubject: 'Invite to join our awesome app!',\n emailBody: 'Hello {username}, you have been invited to join our awesome app! Your temporary password is {####}',\n smsMessage: 'Your temporary password for our awesome app is {####}'\n }\n});\n```\n\nAll email subjects, bodies and SMS messages for both invitation and verification support Cognito's message templating.\nLearn more about [message templates\nhere](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-templates.html).\n\n### Sign In\n\nUsers registering or signing in into your application can do so with multiple identifiers. There are 4 options\navailable:\n\n- `username`: Allow signing in using the one time immutable user name that the user chose at the time of sign up.\n- `email`: Allow signing in using the email address that is associated with the account.\n- `phone`: Allow signing in using the phone number that is associated with the account.\n- `preferredUsername`: Allow signing in with an alternate user name that the user can change at any time. However, this\n is not available if the `username` option is not chosen.\n\nThe following code sets up a user pool so that the user can sign in with either their username or their email address -\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n // ...\n signInAliases: {\n username: true,\n email: true\n },\n});\n```\n\nUser pools can either be configured so that user name is primary sign in form, but also allows for the other three to be\nused additionally; or it can be configured so that email and/or phone numbers are the only ways a user can register and\nsign in. Read more about this\n[here](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases-settings).\n\n⚠️ The Cognito service prevents changing the `signInAlias` property for an existing user pool.\n\nTo match with 'Option 1' in the above link, with a verified email, `signInAliases` should be set to\n`{ username: true, email: true }`. To match with 'Option 2' in the above link with both a verified\nemail and phone number, this property should be set to `{ email: true, phone: true }`.\n\nCognito recommends that email and phone number be automatically verified, if they are one of the sign in methods for\nthe user pool. Read more about that\n[here](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases).\nThe CDK does this by default, when email and/or phone number are specified as part of `signInAliases`. This can be\noverridden by specifying the `autoVerify` property.\n\nThe following code snippet sets up only email as a sign in alias, but both email and phone number to be auto-verified.\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n // ...\n signInAliases: { username: true, email: true },\n autoVerify: { email: true, phone: true }\n});\n```\n\nA user pool can optionally ignore case when evaluating sign-ins. When `signInCaseSensitive` is false, Cognito will not\ncheck the capitalization of the alias when signing in. Default is true.\n\n### Attributes\n\nAttributes represent the various properties of each user that's collected and stored in the user pool. Cognito\nprovides a set of standard attributes that are available for all user pools. Users are allowed to select any of these\nstandard attributes to be required. Users will not be able to sign up to the user pool without providing the required\nattributes. Besides these, additional attributes can be further defined, and are known as custom attributes.\n\nLearn more on [attributes in Cognito's\ndocumentation](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html).\n\nThe following code configures a user pool with two standard attributes (name and address) as required and mutable, and adds\nfour custom attributes.\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n standardAttributes: {\n fullname: {\n required: true,\n mutable: false,\n },\n address: {\n required: false,\n mutable: true,\n },\n },\n customAttributes: {\n 'myappid': new cognito.StringAttribute({ minLen: 5, maxLen: 15, mutable: false }),\n 'callingcode': new cognito.NumberAttribute({ min: 1, max: 3, mutable: true }),\n 'isEmployee': new cognito.BooleanAttribute({ mutable: true }),\n 'joinedOn': new cognito.DateTimeAttribute(),\n },\n});\n```\n\nAs shown in the code snippet, there are data types that are available for custom attributes. The 'String' and 'Number'\ndata types allow for further constraints on their length and values, respectively.\n\nCustom attributes cannot be marked as required.\n\nAll custom attributes share the property `mutable` that specifies whether the value of the attribute can be changed.\nThe default value is `false`.\n\nUser pools come with two 'built-in' attributes - `email_verified` and `phone_number_verified`. These cannot be\nconfigured (required-ness or mutability) as part of user pool creation. However, user pool administrators can modify\nthem for specific users using the [AdminUpdateUserAttributes API].\n\n[AdminUpdateUserAttributes API]: https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html\n\n### Security\n\nCognito sends various messages to its users via SMS, for different actions, ranging from account verification to\nmarketing. In order to send SMS messages, Cognito needs an IAM role that it can assume, with permissions that allow it\nto send SMS messages.\n\nBy default, the CDK looks at all of the specified properties (and their defaults when not explicitly specified) and\nautomatically creates an SMS role, when needed. For example, if MFA second factor by SMS is enabled, the CDK will\ncreate a new role. The `smsRole` property can be used to specify the user supplied role that should be used instead.\nAdditionally, the property `enableSmsRole` can be used to override the CDK's default behaviour to either enable or\nsuppress automatic role creation.\n\n```ts\nconst poolSmsRole = new iam.Role(this, 'userpoolsmsrole', {\n assumedBy: new iam.ServicePrincipal('foo'),\n});\n\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n smsRole: poolSmsRole,\n smsRoleExternalId: 'c87467be-4f34-11ea-b77f-2e728ce88125'\n});\n```\n\nWhen the `smsRole` property is specified, the `smsRoleExternalId` may also be specified. The value of\n`smsRoleExternalId` will be used as the `sts:ExternalId` when the Cognito service assumes the role. In turn, the role's\nassume role policy should be configured to accept this value as the ExternalId. Learn more about [ExternalId\nhere](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html).\n\n#### Multi-factor Authentication (MFA)\n\nUser pools can be configured to enable multi-factor authentication (MFA). It can either be turned off, set to optional\nor made required. Setting MFA to optional means that individual users can choose to enable it.\nAdditionally, the MFA code can be sent either via SMS text message or via a time-based software token.\nSee the [documentation on MFA](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-mfa.html) to\nlearn more.\n\nThe following code snippet marks MFA for the user pool as required. This means that all users are required to\nconfigure an MFA token and use it for sign in. It also allows for the users to use both SMS based MFA, as well,\n[time-based one time password\n(TOTP)](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-mfa-totp.html).\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n mfa: cognito.Mfa.REQUIRED,\n mfaSecondFactor: {\n sms: true,\n otp: true,\n },\n});\n```\n\nUser pools can be configured with policies around a user's password. This includes the password length and the\ncharacter sets that they must contain.\n\nFurther to this, it can also be configured with the validity of the auto-generated temporary password. A temporary\npassword is generated by the user pool either when an admin signs up a user or when a password reset is requested.\nThe validity of this password dictates how long to give the user to use this password before expiring it.\n\nThe following code snippet configures these properties -\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n passwordPolicy: {\n minLength: 12,\n requireLowercase: true,\n requireUppercase: true,\n requireDigits: true,\n requireSymbols: true,\n tempPasswordValidity: Duration.days(3),\n },\n});\n```\n\nNote that, `tempPasswordValidity` can be specified only in whole days. Specifying fractional days would throw an error.\n\n#### Account Recovery Settings\n\nUser pools can be configured on which method a user should use when recovering the password for their account. This\ncan either be email and/or SMS. Read more at [Recovering User Accounts](https://docs.aws.amazon.com/cognito/latest/developerguide/how-to-recover-a-user-account.html)\n\n```ts\nnew cognito.UserPool(this, 'UserPool', {\n // ...\n accountRecovery: cognito.AccountRecovery.EMAIL_ONLY,\n})\n```\n\nThe default for account recovery is by phone if available and by email otherwise.\nA user will not be allowed to reset their password via phone if they are also using it for MFA.\n\n### Emails\n\nCognito sends emails to users in the user pool, when particular actions take place, such as welcome emails, invitation\nemails, password resets, etc. The address from which these emails are sent can be configured on the user pool.\nRead more about [email settings here](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-email.html).\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n emailSettings: {\n from: 'noreply@myawesomeapp.com',\n replyTo: 'support@myawesomeapp.com',\n },\n});\n```\n\nBy default, user pools are configured to use Cognito's built-in email capability, but it can also be configured to use\nAmazon SES, however, support for Amazon SES is not available in the CDK yet. If you would like this to be implemented,\ngive [this issue](https://github.com/aws/aws-cdk/issues/6768) a +1. Until then, you can use the [cfn\nlayer](https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html) to configure this.\n\nIf an email address contains non-ASCII characters, it will be encoded using the [punycode\nencoding](https://en.wikipedia.org/wiki/Punycode) when generating the template for Cloudformation.\n\n### Device Tracking\n\nUser pools can be configured to track devices that users have logged in to.\nRead more at [Device Tracking](https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-device-tracking.html)\n\n```ts\nnew cognito.UserPool(this, 'myuserpool', {\n // ...\n deviceTracking: {\n challengeRequiredOnNewDevice: true,\n deviceOnlyRememberedOnUserPrompt: true,\n },\n});\n```\n\nThe default is to not track devices.\n\n### Lambda Triggers\n\nUser pools can be configured such that AWS Lambda functions can be triggered when certain user operations or actions\noccur, such as, sign up, user confirmation, sign in, etc. They can also be used to add custom authentication\nchallenges, user migrations and custom verification messages. Learn more about triggers at [User Pool Workflows with\nTriggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html).\n\nLambda triggers can either be specified as part of the `UserPool` initialization, or it can be added later, via methods\non the construct, as so -\n\n```ts\nconst authChallengeFn = new lambda.Function(this, 'authChallengeFn', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromAsset(/* path to lambda asset */),\n});\n\nconst userpool = new cognito.UserPool(this, 'myuserpool', {\n // ...\n lambdaTriggers: {\n createAuthChallenge: authChallengeFn,\n // ...\n }\n});\n\nuserpool.addTrigger(cognito.UserPoolOperation.USER_MIGRATION, new lambda.Function(this, 'userMigrationFn', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromAsset(/* path to lambda asset */),\n}));\n```\n\nThe following table lists the set of triggers available, and their corresponding method to add it to the user pool.\nFor more information on the function of these triggers and how to configure them, read [User Pool Workflows with\nTriggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html).\n\n#### Trigger Permissions\n\nThe `function.attachToRolePolicy()` API can be used to add additional IAM permissions to the lambda trigger\nas necessary.\n\n⚠️ Using the `attachToRolePolicy` API to provide permissions to your user pool will result in a circular dependency. See [aws/aws-cdk#7016](https://github.com/aws/aws-cdk/issues/7016).\nError message when running `cdk synth` or `cdk deploy`:\n> Circular dependency between resources: [pool056F3F7E, fnPostAuthFnCognitoA630A2B1, ...]\n\nTo work around the circular dependency issue, use the `attachInlinePolicy()` API instead, as shown below.\n\n```ts fixture=with-lambda-trigger\n// provide permissions to describe the user pool scoped to the ARN the user pool\npostAuthFn.role?.attachInlinePolicy(new iam.Policy(this, 'userpool-policy', {\n statements: [new iam.PolicyStatement({\n actions: ['cognito-idp:DescribeUserPool'],\n resources: [userpool.userPoolArn],\n })],\n}));\n```\n\n### Importing User Pools\n\nAny user pool that has been created outside of this stack, can be imported into the CDK app. Importing a user pool\nallows for it to be used in other parts of the CDK app that reference an `IUserPool`. However, imported user pools have\nlimited configurability. As a rule of thumb, none of the properties that are part of the\n[`AWS::Cognito::UserPool`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cognito-userpool.html)\nCloudFormation resource can be configured.\n\nUser pools can be imported either using their id via the `UserPool.fromUserPoolId()`, or by using their ARN, via the\n`UserPool.fromUserPoolArn()` API.\n\n```ts\nconst awesomePool = cognito.UserPool.fromUserPoolId(this, 'awesome-user-pool', 'us-east-1_oiuR12Abd');\n\nconst otherAwesomePool = cognito.UserPool.fromUserPoolArn(this, 'other-awesome-user-pool',\n 'arn:aws:cognito-idp:eu-west-1:123456789012:userpool/us-east-1_mtRyYQ14D');\n```\n\n### Identity Providers\n\nUsers that are part of a user pool can sign in either directly through a user pool, or federate through a third-party\nidentity provider. Once configured, the Cognito backend will take care of integrating with the third-party provider.\nRead more about [Adding User Pool Sign-in Through a Third\nParty](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-identity-federation.html).\n\nThe following third-party identity providers are currently supported in the CDK -\n\n- [Login With Amazon](https://developer.amazon.com/apps-and-games/login-with-amazon)\n- [Facebook Login](https://developers.facebook.com/docs/facebook-login/)\n- [Google Login](https://developers.google.com/identity/sign-in/web/sign-in)\n- [Sign In With Apple](https://developer.apple.com/sign-in-with-apple/get-started/)\n\nThe following code configures a user pool to federate with the third party provider, 'Login with Amazon'. The identity\nprovider needs to be configured with a set of credentials that the Cognito backend can use to federate with the\nthird-party identity provider.\n\n```ts\nconst userpool = new cognito.UserPool(this, 'Pool');\n\nconst provider = new cognito.UserPoolIdentityProviderAmazon(this, 'Amazon', {\n clientId: 'amzn-client-id',\n clientSecret: 'amzn-client-secret',\n userPool: userpool,\n});\n```\n\nAttribute mapping allows mapping attributes provided by the third-party identity providers to [standard and custom\nattributes](#Attributes) of the user pool. Learn more about [Specifying Identity Provider Attribute Mappings for Your\nUser Pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html).\n\nThe following code shows how different attributes provided by 'Login With Amazon' can be mapped to standard and custom\nuser pool attributes.\n\n```ts\nconst userpool = new cognito.UserPool(this, 'Pool');\n\nnew cognito.UserPoolIdentityProviderAmazon(this, 'Amazon', {\n clientId: 'amzn-client-id',\n clientSecret: 'amzn-client-secret',\n userPool: userpool,\n attributeMapping: {\n email: cognito.ProviderAttribute.AMAZON_EMAIL,\n website: cognito.ProviderAttribute.other('url'), // use other() when an attribute is not pre-defined in the CDK\n custom: {\n // custom user pool attributes go here\n uniqueId: cognito.ProviderAttribute.AMAZON_USER_ID,\n }\n }\n});\n```\n\n### App Clients\n\nAn app is an entity within a user pool that has permission to call unauthenticated APIs (APIs that do not have an\nauthenticated user), such as APIs to register, sign in, and handle forgotten passwords. To call these APIs, you need an\napp client ID and an optional client secret. Read [Configuring a User Pool App\nClient](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html) to learn more.\n\nThe following code creates an app client and retrieves the client id -\n\n```ts\nconst pool = new cognito.UserPool(this, 'pool');\nconst client = pool.addClient('customer-app-client');\nconst clientId = client.userPoolClientId;\n```\n\nExisting app clients can be imported into the CDK app using the `UserPoolClient.fromUserPoolClientId()` API. For new\nand imported user pools, clients can also be created via the `UserPoolClient` constructor, as so -\n\n```ts\nconst importedPool = cognito.UserPool.fromUserPoolId(this, 'imported-pool', 'us-east-1_oiuR12Abd');\nnew cognito.UserPoolClient(this, 'customer-app-client', {\n userPool: importedPool\n});\n```\n\nClients can be configured with authentication flows. Authentication flows allow users on a client to be authenticated\nwith a user pool. Cognito user pools provide several different types of authentication, such as, SRP (Secure\nRemote Password) authentication, username-and-password authentication, etc. Learn more about this at [UserPool Authentication\nFlow](https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-authentication-flow.html).\n\nThe following code configures a client to use both SRP and username-and-password authentication -\n\n```ts\nconst pool = new cognito.UserPool(this, 'pool');\npool.addClient('app-client', {\n authFlows: {\n userPassword: true,\n userSrp: true,\n }\n});\n```\n\nCustom authentication protocols can be configured by setting the `custom` property under `authFlow` and defining lambda\nfunctions for the corresponding user pool [triggers](#lambda-triggers). Learn more at [Custom Authentication\nFlow](https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-authentication-flow.html#amazon-cognito-user-pools-custom-authentication-flow).\n\nIn addition to these authentication mechanisms, Cognito user pools also support using OAuth 2.0 framework for\nauthenticating users. User pool clients can be configured with OAuth 2.0 authorization flows and scopes. Learn more\nabout the [OAuth 2.0 authorization framework](https://tools.ietf.org/html/rfc6749) and [Cognito user pool's\nimplementation of\nOAuth2.0](https://aws.amazon.com/blogs/mobile/understanding-amazon-cognito-user-pool-oauth-2-0-grants/).\n\nThe following code configures an app client with the authorization code grant flow and registers the the app's welcome\npage as a callback (or redirect) URL. It also configures the access token scope to 'openid'. All of these concepts can\nbe found in the [OAuth 2.0 RFC](https://tools.ietf.org/html/rfc6749).\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\npool.addClient('app-client', {\n oAuth: {\n flows: {\n authorizationCodeGrant: true,\n },\n scopes: [ cognito.OAuthScope.OPENID ],\n callbackUrls: [ 'https://my-app-domain.com/welcome' ],\n logoutUrls: [ 'https://my-app-domain.com/signin' ],\n }\n});\n```\n\nAn app client can be configured to prevent user existence errors. This\ninstructs the Cognito authentication API to return generic authentication\nfailure responses instead of an UserNotFoundException. By default, the flag\nis not set, which means different things for existing and new stacks. See the\n[documentation](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-managing-errors.html)\nfor the full details on the behavior of this flag.\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\npool.addClient('app-client', {\n preventUserExistenceErrors: true,\n});\n```\n\nAll identity providers created in the CDK app are automatically registered into the corresponding user pool. All app\nclients created in the CDK have all of the identity providers enabled by default. The 'Cognito' identity provider,\nthat allows users to register and sign in directly with the Cognito user pool, is also enabled by default.\nAlternatively, the list of supported identity providers for a client can be explicitly specified -\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\npool.addClient('app-client', {\n // ...\n supportedIdentityProviders: [\n cognito.UserPoolClientIdentityProvider.AMAZON,\n cognito.UserPoolClientIdentityProvider.COGNITO,\n ]\n});\n```\n\nIf the identity provider and the app client are created in the same stack, specify the dependency between both constructs to make sure that the identity provider already exists when the app client will be created. The app client cannot handle the dependency to the identity provider automatically because the client does not have access to the provider's construct.\n\n```ts\nconst provider = new cognito.UserPoolIdentityProviderAmazon(this, 'Amazon', {\n // ...\n});\nconst client = pool.addClient('app-client', {\n // ...\n supportedIdentityProviders: [\n cognito.UserPoolClientIdentityProvider.AMAZON,\n ],\n}\nclient.node.addDependency(provider);\n```\n\nIn accordance with the OIDC open standard, Cognito user pool clients provide access tokens, ID tokens and refresh tokens.\nMore information is available at [Using Tokens with User Pools](https://docs.aws.amazon.com/en_us/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html).\nThe expiration time for these tokens can be configured as shown below.\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\npool.addClient('app-client', {\n // ...\n accessTokenValidity: Duration.minutes(60),\n idTokenValidity: Duration.minutes(60),\n refreshTokenValidity: Duration.days(30),\n});\n```\n\nClients can (and should) be allowed to read and write relevant user attributes only. Usually every client can be allowed to read the `given_name`\nattribute but not every client should be allowed to set the `email_verified` attribute.\nThe same criteria applies for both standard and custom attributes, more info is available at\n[Attribute Permissions and Scopes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-attribute-permissions-and-scopes).\nThe default behaviour is to allow read and write permissions on all attributes. The following code shows how this can be configured for a client.\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\n\nconst clientWriteAttributes = (new ClientAttributes())\n .withStandardAttributes({fullname: true, email: true})\n .withCustomAttributes('favouritePizza', 'favouriteBeverage');\n\nconst clientReadAttributes = clientWriteAttributes\n .withStandardAttributes({emailVerified: true})\n .withCustomAttributes('pointsEarned');\n\npool.addClient('app-client', {\n // ...\n readAttributes: clientReadAttributes,\n writeAttributes: clientWriteAttributes,\n});\n```\n\n[Token revocation](https://docs.aws.amazon.com/cognito/latest/developerguide/token-revocation.html\n) can be configured to be able to revoke refresh tokens in app clients. By default, token revocation is enabled for new user pools. The property can be used to enable the token revocation in existing app clients or to change the default behavior.\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\npool.addClient('app-client', {\n // ...\n enableTokenRevocation: true,\n});\n``` \n\n### Resource Servers\n\nA resource server is a server for access-protected resources. It handles authenticated requests from an app that has an\naccess token. See [Defining Resource\nServers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-define-resource-servers.html)\nfor more information.\n\nAn application may choose to model custom permissions via OAuth. Resource Servers provide this capability via custom scopes\nthat are attached to an app client. The following example sets up a resource server for the 'users' resource for two different\napp clients and configures the clients to use these scopes.\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\n\nconst readOnlyScope = new ResourceServerScope({ scopeName: 'read', scopeDescription: 'Read-only access' });\nconst fullAccessScope = new ResourceServerScope({ scopeName: '*', scopeDescription: 'Full access' });\n\nconst userServer = pool.addResourceServer('ResourceServer', {\n identifier: 'users',\n scopes: [ readOnlyScope, fullAccessScope ],\n});\n\nconst readOnlyClient = pool.addClient('read-only-client', {\n // ...\n oAuth: {\n // ...\n scopes: [ OAuthScope.resourceServer(userServer, readOnlyScope) ],\n },\n});\n\nconst fullAccessClient = pool.addClient('full-access-client', {\n // ...\n oAuth: {\n // ...\n scopes: [ OAuthScope.resourceServer(userServer, fullAccessScope) ],\n },\n});\n```\n\n\n### Domains\n\nAfter setting up an [app client](#app-clients), the address for the user pool's sign-up and sign-in webpages can be\nconfigured using domains. There are two ways to set up a domain - either the Amazon Cognito hosted domain can be chosen\nwith an available domain prefix, or a custom domain name can be chosen. The custom domain must be one that is already\nowned, and whose certificate is registered in AWS Certificate Manager.\n\nThe following code sets up a user pool domain in Amazon Cognito hosted domain with the prefix 'my-awesome-app', and another domain with the custom domain 'user.myapp.com' -\n\n```ts\nconst pool = new cognito.UserPool(this, 'Pool');\n\npool.addDomain('CognitoDomain', {\n cognitoDomain: {\n domainPrefix: 'my-awesome-app',\n },\n});\n\nconst certificateArn = 'arn:aws:acm:us-east-1:123456789012:certificate/11-3336f1-44483d-adc7-9cd375c5169d';\n\nconst domainCert = certificatemanager.Certificate.fromCertificateArn(this, 'domainCert', certificateArn);\npool.addDomain('CustomDomain', {\n customDomain: {\n domainName: 'user.myapp.com',\n certificate: domainCert,\n },\n});\n```\n\nRead more about [Using the Amazon Cognito\nDomain](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-assign-domain-prefix.html) and [Using Your Own\nDomain](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-add-custom-domain.html).\n\nThe `signInUrl()` methods returns the fully qualified URL to the login page for the user pool. This page comes from the\nhosted UI configured with Cognito. Learn more at [Hosted UI with the Amazon Cognito\nConsole](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-app-integration.html#cognito-user-pools-create-an-app-integration).\n\n```ts\nconst userpool = new cognito.UserPool(this, 'UserPool', {\n // ...\n});\nconst client = userpool.addClient('Client', {\n // ...\n oAuth: {\n flows: {\n implicitCodeGrant: true,\n },\n callbackUrls: [\n 'https://myapp.com/home',\n 'https://myapp.com/users',\n ]\n }\n})\nconst domain = userpool.addDomain('Domain', {\n // ...\n});\nconst signInUrl = domain.signInUrl(client, {\n redirectUri: 'https://myapp.com/home', // must be a URL configured under 'callbackUrls' with the client\n})\n```\n\nExisting domains can be imported into CDK apps using `UserPoolDomain.fromDomainName()` API\n\n```ts\nconst myUserPoolDomain = cognito.UserPoolDomain.fromDomainName(this, 'my-user-pool-domain', 'domain-name');\n```\n"
|
|
943
|
-
},
|
|
944
668
|
"targets": {
|
|
945
669
|
"dotnet": {
|
|
946
670
|
"namespace": "Amazon.CDK.AWS.Cognito"
|
|
@@ -954,13 +678,6 @@
|
|
|
954
678
|
}
|
|
955
679
|
},
|
|
956
680
|
"aws-cdk-lib.aws_config": {
|
|
957
|
-
"locationInModule": {
|
|
958
|
-
"filename": "lib/index.ts",
|
|
959
|
-
"line": 50
|
|
960
|
-
},
|
|
961
|
-
"readme": {
|
|
962
|
-
"markdown": "# AWS Config Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\nFeatures | Stability\n---------------------------------------------------------------------------------------|------------\nCFN Resources | \nHigher level constructs for Config Rules | \nHigher level constructs for initial set-up (delivery channel & configuration recorder) | \n\n> **CFN Resources:** All classes with the `Cfn` prefix in this module ([CFN Resources]) are always\n> stable and safe to use.\n>\n> [CFN Resources]: https://docs.aws.amazon.com/cdk/latest/guide/constructs.html#constructs_lib\n\n<!-- -->\n\n> **Stable:** Higher level constructs in this module that are marked stable will not undergo any\n> breaking changes. They will strictly follow the [Semantic Versioning](https://semver.org/) model.\n\n---\n\n<!--END STABILITY BANNER-->\n\n[AWS Config](https://docs.aws.amazon.com/config/latest/developerguide/WhatIsConfig.html) provides a detailed view of the configuration of AWS resources in your AWS account.\nThis includes how the resources are related to one another and how they were configured in the\npast so that you can see how the configurations and relationships change over time. \n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Initial Setup\n\nBefore using the constructs provided in this module, you need to set up AWS Config\nin the region in which it will be used. This setup includes the one-time creation of the\nfollowing resources per region:\n\n- `ConfigurationRecorder`: Configure which resources will be recorded for config changes.\n- `DeliveryChannel`: Configure where to store the recorded data.\n\nThe following guides provide the steps for getting started with AWS Config:\n\n- [Using the AWS Console](https://docs.aws.amazon.com/config/latest/developerguide/gs-console.html)\n- [Using the AWS CLI](https://docs.aws.amazon.com/config/latest/developerguide/gs-cli.html)\n\n## Rules\n\nAWS Config can evaluate the configuration settings of your AWS resources by creating AWS Config rules,\nwhich represent your ideal configuration settings.\n\nSee [Evaluating Resources with AWS Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) to learn more about AWS Config rules.\n\n### AWS Managed Rules\n\nAWS Config provides AWS managed rules, which are predefined, customizable rules that AWS Config\nuses to evaluate whether your AWS resources comply with common best practices.\n\nFor example, you could create a managed rule that checks whether active access keys are rotated\nwithin the number of days specified.\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\nimport * as cdk from 'aws-cdk-lib';\n\n// https://docs.aws.amazon.com/config/latest/developerguide/access-keys-rotated.html\nnew config.ManagedRule(this, 'AccessKeysRotated', {\n identifier: config.ManagedRuleIdentifiers.ACCESS_KEYS_ROTATED,\n inputParameters: {\n maxAccessKeyAge: 60 // default is 90 days\n },\n maximumExecutionFrequency: config.MaximumExecutionFrequency.TWELVE_HOURS // default is 24 hours\n});\n```\n\nIdentifiers for AWS managed rules are available through static constants in the `ManagedRuleIdentifiers` class.\nYou can find supported input parameters in the [List of AWS Config Managed Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html).\n\nThe following higher level constructs for AWS managed rules are available.\n\n#### Access Key rotation\n\nChecks whether your active access keys are rotated within the number of days specified.\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\nimport { aws_cdk as cdk } from 'aws-cdk-lib';\n\n// compliant if access keys have been rotated within the last 90 days\nnew config.AccessKeysRotated(this, 'AccessKeyRotated');\n```\n\n#### CloudFormation Stack drift detection\n\nChecks whether your CloudFormation stack's actual configuration differs, or has drifted,\nfrom it's expected configuration. \n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\nimport { aws_cdk as cdk } from 'aws-cdk-lib';\n\n// compliant if stack's status is 'IN_SYNC'\n// non-compliant if the stack's drift status is 'DRIFTED'\nnew config.CloudFormationStackDriftDetectionCheck(stack, 'Drift', {\n ownStackOnly: true, // checks only the stack containing the rule\n});\n```\n\n#### CloudFormation Stack notifications\n\nChecks whether your CloudFormation stacks are sending event notifications to a SNS topic.\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\nimport { aws_cdk as cdk } from 'aws-cdk-lib';\n\n// topics to which CloudFormation stacks may send event notifications\nconst topic1 = new sns.Topic(stack, 'AllowedTopic1');\nconst topic2 = new sns.Topic(stack, 'AllowedTopic2');\n\n// non-compliant if CloudFormation stack does not send notifications to 'topic1' or 'topic2'\nnew config.CloudFormationStackNotificationCheck(this, 'NotificationCheck', {\n topics: [topic1, topic2],\n})\n```\n\n### Custom rules\n\nYou can develop custom rules and add them to AWS Config. You associate each custom rule with an\nAWS Lambda function, which contains the logic that evaluates whether your AWS resources comply\nwith the rule.\n\n### Triggers\n\nAWS Lambda executes functions in response to events that are published by AWS Services.\nThe function for a custom Config rule receives an event that is published by AWS Config,\nand is responsible for evaluating the compliance of the rule.\n\nEvaluations can be triggered by configuration changes, periodically, or both.\nTo create a custom rule, define a `CustomRule` and specify the Lambda Function\nto run and the trigger types.\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\n\nnew config.CustomRule(this, 'CustomRule', {\n lambdaFunction: evalComplianceFn,\n configurationChanges: true,\n periodic: true,\n maximumExecutionFrequency: config.MaximumExecutionFrequency.SIX_HOURS, // default is 24 hours\n});\n```\n\nWhen the trigger for a rule occurs, the Lambda function is invoked by publishing an event.\nSee [example events for AWS Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_develop-rules_example-events.html) \n\nThe AWS documentation has examples of Lambda functions for evaluations that are\n[triggered by configuration changes](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_develop-rules_nodejs-sample.html#event-based-example-rule) and [triggered periodically](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_develop-rules_nodejs-sample.html#periodic-example-rule)\n\n\n### Scope\n\nBy default rules are triggered by changes to all [resources](https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources).\n\nUse the `RuleScope` APIs (`fromResource()`, `fromResources()` or `fromTag()`) to restrict\nthe scope of both managed and custom rules:\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\n\nconst sshRule = new config.ManagedRule(this, 'SSH', {\n identifier: config.ManagedRuleIdentifiers.EC2_SECURITY_GROUPS_INCOMING_SSH_DISABLED,\n ruleScope: config.RuleScope.fromResource(config.ResourceType.EC2_SECURITY_GROUP, 'sg-1234567890abcdefgh'), // restrict to specific security group\n});\n\nconst customRule = new config.CustomRule(this, 'Lambda', {\n lambdaFunction: evalComplianceFn,\n configurationChanges: true\n ruleScope: config.RuleScope.fromResources([config.ResourceType.CLOUDFORMATION_STACK, config.ResourceType.S3_BUCKET]), // restrict to all CloudFormation stacks and S3 buckets\n});\n\nconst tagRule = new config.CustomRule(this, 'CostCenterTagRule', {\n lambdaFunction: evalComplianceFn,\n configurationChanges: true\n ruleScope: config.RuleScope.fromTag('Cost Center', 'MyApp'), // restrict to a specific tag\n});\n```\n\n### Events\n\nYou can define Amazon EventBridge event rules which trigger when a compliance check fails\nor when a rule is re-evaluated.\n\nUse the `onComplianceChange()` APIs to trigger an EventBridge event when a compliance check\nof your AWS Config Rule fails:\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\nimport { aws_sns as sns } from 'aws-cdk-lib';\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\n\n// Topic to which compliance notification events will be published\nconst complianceTopic = new sns.Topic(this, 'ComplianceTopic');\n\nconst rule = new config.CloudFormationStackDriftDetectionCheck(this, 'Drift');\nrule.onComplianceChange('TopicEvent', {\n target: new targets.SnsTopic(complianceTopic),\n});\n```\n\nUse the `onReEvaluationStatus()` status to trigger an EventBridge event when an AWS Config\nrule is re-evaluated.\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\nimport { aws_sns as sns } from 'aws-cdk-lib';\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\n\n// Topic to which re-evaluation notification events will be published\nconst reEvaluationTopic = new sns.Topic(this, 'ComplianceTopic');\nrule.onReEvaluationStatus('ReEvaluationEvent', {\n target: new targets.SnsTopic(reEvaluationTopic),\n})\n```\n\n### Example\n\nThe following example creates a custom rule that evaluates whether EC2 instances are compliant.\nCompliance events are published to an SNS topic.\n\n```ts\nimport { aws_config as config } from 'aws-cdk-lib';\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_sns as sns } from 'aws-cdk-lib';\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\n\n// Lambda function containing logic that evaluates compliance with the rule.\nconst evalComplianceFn = new lambda.Function(this, 'CustomFunction', {\n code: lambda.AssetCode.fromInline('exports.handler = (event) => console.log(event);'),\n handler: 'index.handler',\n runtime: lambda.Runtime.NODEJS_12_X,\n});\n\n// A custom rule that runs on configuration changes of EC2 instances\nconst customRule = new config.CustomRule(this, 'Custom', {\n configurationChanges: true,\n lambdaFunction: evalComplianceFn,\n ruleScope: config.RuleScope.fromResource([config.ResourceType.EC2_INSTANCE]),\n});\n\n// A rule to detect stack drifts\nconst driftRule = new config.CloudFormationStackDriftDetectionCheck(this, 'Drift');\n\n// Topic to which compliance notification events will be published\nconst complianceTopic = new sns.Topic(this, 'ComplianceTopic');\n\n// Send notification on compliance change events\ndriftRule.onComplianceChange('ComplianceChange', {\n target: new targets.SnsTopic(complianceTopic),\n});\n```\n"
|
|
963
|
-
},
|
|
964
681
|
"targets": {
|
|
965
682
|
"dotnet": {
|
|
966
683
|
"namespace": "Amazon.CDK.AWS.Config"
|
|
@@ -974,10 +691,6 @@
|
|
|
974
691
|
}
|
|
975
692
|
},
|
|
976
693
|
"aws-cdk-lib.aws_connect": {
|
|
977
|
-
"locationInModule": {
|
|
978
|
-
"filename": "lib/index.ts",
|
|
979
|
-
"line": 51
|
|
980
|
-
},
|
|
981
694
|
"targets": {
|
|
982
695
|
"dotnet": {
|
|
983
696
|
"namespace": "Amazon.CDK.AWS.Connect"
|
|
@@ -991,10 +704,6 @@
|
|
|
991
704
|
}
|
|
992
705
|
},
|
|
993
706
|
"aws-cdk-lib.aws_cur": {
|
|
994
|
-
"locationInModule": {
|
|
995
|
-
"filename": "lib/index.ts",
|
|
996
|
-
"line": 52
|
|
997
|
-
},
|
|
998
707
|
"targets": {
|
|
999
708
|
"dotnet": {
|
|
1000
709
|
"namespace": "Amazon.CDK.AWS.CUR"
|
|
@@ -1008,10 +717,6 @@
|
|
|
1008
717
|
}
|
|
1009
718
|
},
|
|
1010
719
|
"aws-cdk-lib.aws_customerprofiles": {
|
|
1011
|
-
"locationInModule": {
|
|
1012
|
-
"filename": "lib/index.ts",
|
|
1013
|
-
"line": 53
|
|
1014
|
-
},
|
|
1015
720
|
"targets": {
|
|
1016
721
|
"dotnet": {
|
|
1017
722
|
"namespace": "Amazon.CDK.AWS.CustomerProfiles"
|
|
@@ -1025,10 +730,6 @@
|
|
|
1025
730
|
}
|
|
1026
731
|
},
|
|
1027
732
|
"aws-cdk-lib.aws_databrew": {
|
|
1028
|
-
"locationInModule": {
|
|
1029
|
-
"filename": "lib/index.ts",
|
|
1030
|
-
"line": 54
|
|
1031
|
-
},
|
|
1032
733
|
"targets": {
|
|
1033
734
|
"dotnet": {
|
|
1034
735
|
"namespace": "Amazon.CDK.AWS.DataBrew"
|
|
@@ -1042,10 +743,6 @@
|
|
|
1042
743
|
}
|
|
1043
744
|
},
|
|
1044
745
|
"aws-cdk-lib.aws_datapipeline": {
|
|
1045
|
-
"locationInModule": {
|
|
1046
|
-
"filename": "lib/index.ts",
|
|
1047
|
-
"line": 55
|
|
1048
|
-
},
|
|
1049
746
|
"targets": {
|
|
1050
747
|
"dotnet": {
|
|
1051
748
|
"namespace": "Amazon.CDK.AWS.DataPipeline"
|
|
@@ -1059,10 +756,6 @@
|
|
|
1059
756
|
}
|
|
1060
757
|
},
|
|
1061
758
|
"aws-cdk-lib.aws_datasync": {
|
|
1062
|
-
"locationInModule": {
|
|
1063
|
-
"filename": "lib/index.ts",
|
|
1064
|
-
"line": 56
|
|
1065
|
-
},
|
|
1066
759
|
"targets": {
|
|
1067
760
|
"dotnet": {
|
|
1068
761
|
"namespace": "Amazon.CDK.AWS.DataSync"
|
|
@@ -1076,10 +769,6 @@
|
|
|
1076
769
|
}
|
|
1077
770
|
},
|
|
1078
771
|
"aws-cdk-lib.aws_dax": {
|
|
1079
|
-
"locationInModule": {
|
|
1080
|
-
"filename": "lib/index.ts",
|
|
1081
|
-
"line": 57
|
|
1082
|
-
},
|
|
1083
772
|
"targets": {
|
|
1084
773
|
"dotnet": {
|
|
1085
774
|
"namespace": "Amazon.CDK.AWS.DAX"
|
|
@@ -1093,10 +782,6 @@
|
|
|
1093
782
|
}
|
|
1094
783
|
},
|
|
1095
784
|
"aws-cdk-lib.aws_detective": {
|
|
1096
|
-
"locationInModule": {
|
|
1097
|
-
"filename": "lib/index.ts",
|
|
1098
|
-
"line": 58
|
|
1099
|
-
},
|
|
1100
785
|
"targets": {
|
|
1101
786
|
"dotnet": {
|
|
1102
787
|
"namespace": "Amazon.CDK.AWS.Detective"
|
|
@@ -1110,10 +795,6 @@
|
|
|
1110
795
|
}
|
|
1111
796
|
},
|
|
1112
797
|
"aws-cdk-lib.aws_devopsguru": {
|
|
1113
|
-
"locationInModule": {
|
|
1114
|
-
"filename": "lib/index.ts",
|
|
1115
|
-
"line": 59
|
|
1116
|
-
},
|
|
1117
798
|
"targets": {
|
|
1118
799
|
"dotnet": {
|
|
1119
800
|
"namespace": "Amazon.CDK.AWS.DevOpsGuru"
|
|
@@ -1127,10 +808,6 @@
|
|
|
1127
808
|
}
|
|
1128
809
|
},
|
|
1129
810
|
"aws-cdk-lib.aws_directoryservice": {
|
|
1130
|
-
"locationInModule": {
|
|
1131
|
-
"filename": "lib/index.ts",
|
|
1132
|
-
"line": 60
|
|
1133
|
-
},
|
|
1134
811
|
"targets": {
|
|
1135
812
|
"dotnet": {
|
|
1136
813
|
"namespace": "Amazon.CDK.AWS.DirectoryService"
|
|
@@ -1144,10 +821,6 @@
|
|
|
1144
821
|
}
|
|
1145
822
|
},
|
|
1146
823
|
"aws-cdk-lib.aws_dlm": {
|
|
1147
|
-
"locationInModule": {
|
|
1148
|
-
"filename": "lib/index.ts",
|
|
1149
|
-
"line": 61
|
|
1150
|
-
},
|
|
1151
824
|
"targets": {
|
|
1152
825
|
"dotnet": {
|
|
1153
826
|
"namespace": "Amazon.CDK.AWS.DLM"
|
|
@@ -1161,10 +834,6 @@
|
|
|
1161
834
|
}
|
|
1162
835
|
},
|
|
1163
836
|
"aws-cdk-lib.aws_dms": {
|
|
1164
|
-
"locationInModule": {
|
|
1165
|
-
"filename": "lib/index.ts",
|
|
1166
|
-
"line": 62
|
|
1167
|
-
},
|
|
1168
837
|
"targets": {
|
|
1169
838
|
"dotnet": {
|
|
1170
839
|
"namespace": "Amazon.CDK.AWS.DMS"
|
|
@@ -1178,13 +847,6 @@
|
|
|
1178
847
|
}
|
|
1179
848
|
},
|
|
1180
849
|
"aws-cdk-lib.aws_docdb": {
|
|
1181
|
-
"locationInModule": {
|
|
1182
|
-
"filename": "lib/index.ts",
|
|
1183
|
-
"line": 63
|
|
1184
|
-
},
|
|
1185
|
-
"readme": {
|
|
1186
|
-
"markdown": "# Amazon DocumentDB Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Starting a Clustered Database\n\nTo set up a clustered DocumentDB database, define a `DatabaseCluster`. You must\nalways launch a database in a VPC. Use the `vpcSubnets` attribute to control whether\nyour instances will be launched privately or publicly:\n\n```ts\nconst cluster = new DatabaseCluster(this, 'Database', {\n masterUser: {\n username: 'myuser' // NOTE: 'admin' is reserved by DocumentDB\n },\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.R5, ec2.InstanceSize.LARGE),\n vpcSubnets: {\n subnetType: ec2.SubnetType.PUBLIC,\n },\n vpc\n});\n```\n\nBy default, the master password will be generated and stored in AWS Secrets Manager with auto-generated description.\n\nYour cluster will be empty by default.\n\n## Connecting\n\nTo control who can access the cluster, use the `.connections` attribute. DocumentDB databases have a default port, so\nyou don't need to specify the port:\n\n```ts\ncluster.connections.allowDefaultPortFromAnyIpv4('Open to the world');\n```\n\nThe endpoints to access your database cluster will be available as the `.clusterEndpoint` and `.clusterReadEndpoint`\nattributes:\n\n```ts\nconst writeAddress = cluster.clusterEndpoint.socketAddress; // \"HOSTNAME:PORT\"\n```\n\nIf you have existing security groups you would like to add to the cluster, use the `addSecurityGroups` method. Security\ngroups added in this way will not be managed by the `Connections` object of the cluster.\n\n```ts\nconst securityGroup = new ec2.SecurityGroup(stack, 'SecurityGroup', {\n vpc,\n});\ncluster.addSecurityGroups(securityGroup);\n```\n\n## Deletion protection\n\nDeletion protection can be enabled on an Amazon DocumentDB cluster to prevent accidental deletion of the cluster:\n\n```ts\nconst cluster = new DatabaseCluster(this, 'Database', {\n masterUser: {\n username: 'myuser'\n },\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.R5, ec2.InstanceSize.LARGE),\n vpcSubnets: {\n subnetType: ec2.SubnetType.PUBLIC,\n },\n vpc,\n deletionProtection: true // Enable deletion protection.\n});\n```\n\n## Rotating credentials\n\nWhen the master password is generated and stored in AWS Secrets Manager, it can be rotated automatically:\n\n```ts\ncluster.addRotationSingleUser(); // Will rotate automatically after 30 days\n```\n\n[example of setting up master password rotation for a cluster](test/integ.cluster-rotation.lit.ts)\n\nThe multi user rotation scheme is also available:\n\n```ts\ncluster.addRotationMultiUser('MyUser', {\n secret: myImportedSecret // This secret must have the `masterarn` key\n});\n```\n\nIt's also possible to create user credentials together with the cluster and add rotation:\n\n```ts\nconst myUserSecret = new docdb.DatabaseSecret(this, 'MyUserSecret', {\n username: 'myuser',\n masterSecret: cluster.secret\n});\nconst myUserSecretAttached = myUserSecret.attach(cluster); // Adds DB connections information in the secret\n\ncluster.addRotationMultiUser('MyUser', { // Add rotation using the multi user scheme\n secret: myUserSecretAttached // This secret must have the `masterarn` key\n});\n```\n\n**Note**: This user must be created manually in the database using the master credentials.\nThe rotation will start as soon as this user exists.\n\nSee also [@aws-cdk/aws-secretsmanager](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/aws-secretsmanager/README.md) for credentials rotation of existing clusters.\n"
|
|
1187
|
-
},
|
|
1188
850
|
"targets": {
|
|
1189
851
|
"dotnet": {
|
|
1190
852
|
"namespace": "Amazon.CDK.AWS.DocDB"
|
|
@@ -1198,13 +860,6 @@
|
|
|
1198
860
|
}
|
|
1199
861
|
},
|
|
1200
862
|
"aws-cdk-lib.aws_dynamodb": {
|
|
1201
|
-
"locationInModule": {
|
|
1202
|
-
"filename": "lib/index.ts",
|
|
1203
|
-
"line": 64
|
|
1204
|
-
},
|
|
1205
|
-
"readme": {
|
|
1206
|
-
"markdown": "# Amazon DynamoDB Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nHere is a minimal deployable DynamoDB table definition:\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\n\nconst table = new dynamodb.Table(this, 'Table', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING }\n});\n```\n\n## Importing existing tables\n\nTo import an existing table into your CDK application, use the `Table.fromTableName`, `Table.fromTableArn` or `Table.fromTableAttributes`\nfactory method. This method accepts table name or table ARN which describes the properties of an already\nexisting table:\n\n```ts\nconst table = Table.fromTableArn(this, 'ImportedTable', 'arn:aws:dynamodb:us-east-1:111111111:table/my-table');\n// now you can just call methods on the table\ntable.grantReadWriteData(user);\n```\n\nIf you intend to use the `tableStreamArn` (including indirectly, for example by creating an\n`@aws-cdk/aws-lambda-event-source.DynamoEventSource` on the imported table), you *must* use the\n`Table.fromTableAttributes` method and the `tableStreamArn` property *must* be populated.\n\n## Keys\n\nWhen a table is defined, you must define it's schema using the `partitionKey`\n(required) and `sortKey` (optional) properties.\n\n## Billing Mode\n\nDynamoDB supports two billing modes:\n\n* PROVISIONED - the default mode where the table and global secondary indexes have configured read and write capacity.\n* PAY_PER_REQUEST - on-demand pricing and scaling. You only pay for what you use and there is no read and write capacity for the table or its global secondary indexes.\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\n\nconst table = new dynamodb.Table(this, 'Table', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n billingMode: dynamodb.BillingMode.PAY_PER_REQUEST\n});\n```\n\nFurther reading:\nhttps://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.\n\n## Configure AutoScaling for your table\n\nYou can have DynamoDB automatically raise and lower the read and write capacities\nof your table by setting up autoscaling. You can use this to either keep your\ntables at a desired utilization level, or by scaling up and down at pre-configured\ntimes of the day:\n\nAuto-scaling is only relevant for tables with the billing mode, PROVISIONED.\n\n[Example of configuring autoscaling](test/integ.autoscaling.lit.ts)\n\nFurther reading:\nhttps://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AutoScaling.html\nhttps://aws.amazon.com/blogs/database/how-to-use-aws-cloudformation-to-configure-auto-scaling-for-amazon-dynamodb-tables-and-indexes/\n\n## Amazon DynamoDB Global Tables\n\nYou can create DynamoDB Global Tables by setting the `replicationRegions` property on a `Table`:\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\n\nconst globalTable = new dynamodb.Table(this, 'Table', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n replicationRegions: ['us-east-1', 'us-east-2', 'us-west-2'],\n});\n```\n\nWhen doing so, a CloudFormation Custom Resource will be added to the stack in order to create the replica tables in the\nselected regions.\n\nThe default billing mode for Global Tables is `PAY_PER_REQUEST`.\nIf you want to use `PROVISIONED`,\nyou have to make sure write auto-scaling is enabled for that Table:\n\n```ts\nconst globalTable = new dynamodb.Table(this, 'Table', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n replicationRegions: ['us-east-1', 'us-east-2', 'us-west-2'],\n billingMode: BillingMode.PROVISIONED,\n});\n\nglobalTable.autoScaleWriteCapacity({\n minCapacity: 1,\n maxCapacity: 10,\n}).scaleOnUtilization({ targetUtilizationPercent: 75 });\n```\n\nWhen adding a replica region for a large table, you might want to increase the\ntimeout for the replication operation:\n\n```ts\nconst globalTable = new dynamodb.Table(this, 'Table', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n replicationRegions: ['us-east-1', 'us-east-2', 'us-west-2'],\n replicationTimeout: Duration.hours(2), // defaults to Duration.minutes(30)\n});\n```\n\n## Encryption\n\nAll user data stored in Amazon DynamoDB is fully encrypted at rest. When creating a new table, you can choose to encrypt using the following customer master keys (CMK) to encrypt your table:\n\n* AWS owned CMK - By default, all tables are encrypted under an AWS owned customer master key (CMK) in the DynamoDB service account (no additional charges apply).\n* AWS managed CMK - AWS KMS keys (one per region) are created in your account, managed, and used on your behalf by AWS DynamoDB (AWS KMS charges apply).\n* Customer managed CMK - You have full control over the KMS key used to encrypt the DynamoDB Table (AWS KMS charges apply).\n\nCreating a Table encrypted with a customer managed CMK:\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\n\nconst table = new dynamodb.Table(stack, 'MyTable', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n encryption: TableEncryption.CUSTOMER_MANAGED,\n});\n\n// You can access the CMK that was added to the stack on your behalf by the Table construct via:\nconst tableEncryptionKey = table.encryptionKey;\n```\n\nYou can also supply your own key:\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\nimport { aws_kms as kms } from 'aws-cdk-lib';\n\nconst encryptionKey = new kms.Key(stack, 'Key', {\n enableKeyRotation: true\n});\nconst table = new dynamodb.Table(stack, 'MyTable', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n encryption: TableEncryption.CUSTOMER_MANAGED,\n encryptionKey, // This will be exposed as table.encryptionKey\n});\n```\n\nIn order to use the AWS managed CMK instead, change the code to:\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\n\nconst table = new dynamodb.Table(stack, 'MyTable', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n encryption: TableEncryption.AWS_MANAGED,\n});\n\n// In this case, the CMK _cannot_ be accessed through table.encryptionKey.\n```\n\n## Get schema of table or secondary indexes\n\nTo get the partition key and sort key of the table or indexes you have configured:\n\n```ts\nconst { partitionKey, sortKey } = table.schema();\n\n// In case you want to get schema details for any secondary index\n\nconst { partitionKey, sortKey } = table.schema(INDEX_NAME);\n```\n\n## Kinesis Stream\n\nA Kinesis Data Stream can be configured on the DynamoDB table to capture item-level changes.\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\nimport { aws_kinesis as kinesis } from 'aws-cdk-lib';\n\nconst stream = new kinesis.Stream(this, 'Stream');\n\nconst table = new dynamodb.Table(this, 'Table', {\n partitionKey: { name: 'id', type: dynamodb.AttributeType.STRING },\n kinesisStream: stream,\n});\n```\n"
|
|
1207
|
-
},
|
|
1208
863
|
"targets": {
|
|
1209
864
|
"dotnet": {
|
|
1210
865
|
"namespace": "Amazon.CDK.AWS.DynamoDB"
|
|
@@ -1218,13 +873,6 @@
|
|
|
1218
873
|
}
|
|
1219
874
|
},
|
|
1220
875
|
"aws-cdk-lib.aws_ec2": {
|
|
1221
|
-
"locationInModule": {
|
|
1222
|
-
"filename": "lib/index.ts",
|
|
1223
|
-
"line": 65
|
|
1224
|
-
},
|
|
1225
|
-
"readme": {
|
|
1226
|
-
"markdown": "# Amazon EC2 Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\nThe `@aws-cdk/aws-ec2` package contains primitives for setting up networking and\ninstances.\n\n```ts nofixture\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\n```\n\n## VPC\n\nMost projects need a Virtual Private Cloud to provide security by means of\nnetwork partitioning. This is achieved by creating an instance of\n`Vpc`:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'VPC');\n```\n\nAll default constructs require EC2 instances to be launched inside a VPC, so\nyou should generally start by defining a VPC whenever you need to launch\ninstances for your project.\n\n### Subnet Types\n\nA VPC consists of one or more subnets that instances can be placed into. CDK\ndistinguishes three different subnet types:\n\n* **Public (`SubnetType.PUBLIC`)** - public subnets connect directly to the Internet using an\n Internet Gateway. If you want your instances to have a public IP address\n and be directly reachable from the Internet, you must place them in a\n public subnet.\n* **Private with Internet Access (`SubnetType.PRIVATE_WITH_NAT`)** - instances in private subnets are not directly routable from the\n Internet, and connect out to the Internet via a NAT gateway. By default, a\n NAT gateway is created in every public subnet for maximum availability. Be\n aware that you will be charged for NAT gateways.\n* **Isolated (`SubnetType.PRIVATE_ISOLATED`)** - isolated subnets do not route from or to the Internet, and\n as such do not require NAT gateways. They can only connect to or be\n connected to from other instances in the same VPC. A default VPC configuration\n will not include isolated subnets,\n\nA default VPC configuration will create public and **private** subnets. However, if\n`natGateways:0` **and** `subnetConfiguration` is undefined, default VPC configuration\nwill create public and **isolated** subnets. See [*Advanced Subnet Configuration*](#advanced-subnet-configuration)\nbelow for information on how to change the default subnet configuration.\n\nConstructs using the VPC will \"launch instances\" (or more accurately, create\nElastic Network Interfaces) into one or more of the subnets. They all accept\na property called `subnetSelection` (sometimes called `vpcSubnets`) to allow\nyou to select in what subnet to place the ENIs, usually defaulting to\n*private* subnets if the property is omitted.\n\nIf you would like to save on the cost of NAT gateways, you can use\n*isolated* subnets instead of *private* subnets (as described in Advanced\n*Subnet Configuration*). If you need private instances to have\ninternet connectivity, another option is to reduce the number of NAT gateways\ncreated by setting the `natGateways` property to a lower value (the default\nis one NAT gateway per availability zone). Be aware that this may have\navailability implications for your application.\n\n[Read more about\nsubnets](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html).\n\n### Control over availability zones\n\nBy default, a VPC will spread over at most 3 Availability Zones available to\nit. To change the number of Availability Zones that the VPC will spread over,\nspecify the `maxAzs` property when defining it.\n\nThe number of Availability Zones that are available depends on the *region*\nand *account* of the Stack containing the VPC. If the [region and account are\nspecified](https://docs.aws.amazon.com/cdk/latest/guide/environments.html) on\nthe Stack, the CLI will [look up the existing Availability\nZones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#using-regions-availability-zones-describe)\nand get an accurate count. If region and account are not specified, the stack\ncould be deployed anywhere and it will have to make a safe choice, limiting\nitself to 2 Availability Zones.\n\nTherefore, to get the VPC to spread over 3 or more availability zones, you\nmust specify the environment where the stack will be deployed.\n\nYou can gain full control over the availability zones selection strategy by overriding the Stack's [`get availabilityZones()`](https://github.com/aws/aws-cdk/blob/master/packages/@aws-cdk/core/lib/stack.ts) method:\n\n```ts\nclass MyStack extends Stack {\n\n get availabilityZones(): string[] {\n return ['us-west-2a', 'us-west-2b'];\n }\n\n constructor(scope: Construct, id: string, props?: StackProps) {\n super(scope, id, props);\n ...\n }\n}\n```\n\nNote that overriding the `get availabilityZones()` method will override the default behavior for all constructs defined within the Stack.\n\n### Choosing subnets for resources\n\nWhen creating resources that create Elastic Network Interfaces (such as\ndatabases or instances), there is an option to choose which subnets to place\nthem in. For example, a VPC endpoint by default is placed into a subnet in\nevery availability zone, but you can override which subnets to use. The property\nis typically called one of `subnets`, `vpcSubnets` or `subnetSelection`.\n\nThe example below will place the endpoint into two AZs (`us-east-1a` and `us-east-1c`),\nin Isolated subnets:\n\n```ts\nnew InterfaceVpcEndpoint(stack, 'VPC Endpoint', {\n vpc,\n service: new InterfaceVpcEndpointService('com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', 443),\n subnets: {\n subnetType: SubnetType.ISOLATED,\n availabilityZones: ['us-east-1a', 'us-east-1c']\n }\n});\n```\n\nYou can also specify specific subnet objects for granular control:\n\n```ts\nnew InterfaceVpcEndpoint(stack, 'VPC Endpoint', {\n vpc,\n service: new InterfaceVpcEndpointService('com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', 443),\n subnets: {\n subnets: [subnet1, subnet2]\n }\n});\n```\n\nWhich subnets are selected is evaluated as follows:\n\n* `subnets`: if specific subnet objects are supplied, these are selected, and no other\n logic is used.\n* `subnetType`/`subnetGroupName`: otherwise, a set of subnets is selected by\n supplying either type or name:\n * `subnetType` will select all subnets of the given type.\n * `subnetGroupName` should be used to distinguish between multiple groups of subnets of\n the same type (for example, you may want to separate your application instances and your\n RDS instances into two distinct groups of Isolated subnets).\n * If neither are given, the first available subnet group of a given type that\n exists in the VPC will be used, in this order: Private, then Isolated, then Public.\n In short: by default ENIs will preferentially be placed in subnets not connected to\n the Internet.\n* `availabilityZones`/`onePerAz`: finally, some availability-zone based filtering may be done.\n This filtering by availability zones will only be possible if the VPC has been created or\n looked up in a non-environment agnostic stack (so account and region have been set and\n availability zones have been looked up).\n * `availabilityZones`: only the specific subnets from the selected subnet groups that are\n in the given availability zones will be returned.\n * `onePerAz`: per availability zone, a maximum of one subnet will be returned (Useful for resource\n types that do not allow creating two ENIs in the same availability zone).\n* `subnetFilters`: additional filtering on subnets using any number of user-provided filters which\n extend `SubnetFilter`. The following methods on the `SubnetFilter` class can be used to create\n a filter:\n * `byIds`: chooses subnets from a list of ids\n * `availabilityZones`: chooses subnets in the provided list of availability zones\n * `onePerAz`: chooses at most one subnet per availability zone\n * `containsIpAddresses`: chooses a subnet which contains *any* of the listed ip addresses\n * `byCidrMask`: chooses subnets that have the provided CIDR netmask\n\n### Using NAT instances\n\nBy default, the `Vpc` construct will create NAT *gateways* for you, which\nare managed by AWS. If you would prefer to use your own managed NAT\n*instances* instead, specify a different value for the `natGatewayProvider`\nproperty, as follows:\n\n[using NAT instances](test/integ.nat-instances.lit.ts)\n\nThe construct will automatically search for the most recent NAT gateway AMI.\nIf you prefer to use a custom AMI, use `machineImage:\nMachineImage.genericLinux({ ... })` and configure the right AMI ID for the\nregions you want to deploy to.\n\nBy default, the NAT instances will route all traffic. To control what traffic\ngets routed, pass `allowAllTraffic: false` and access the\n`NatInstanceProvider.connections` member after having passed it to the VPC:\n\n```ts\nconst provider = NatProvider.instance({\n instanceType: /* ... */,\n allowAllTraffic: false,\n});\nnew Vpc(stack, 'TheVPC', {\n natGatewayProvider: provider,\n});\nprovider.connections.allowFrom(Peer.ipv4('1.2.3.4/8'), Port.tcp(80));\n```\n\n### Advanced Subnet Configuration\n\nIf the default VPC configuration (public and private subnets spanning the\nsize of the VPC) don't suffice for you, you can configure what subnets to\ncreate by specifying the `subnetConfiguration` property. It allows you\nto configure the number and size of all subnets. Specifying an advanced\nsubnet configuration could look like this:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'TheVPC', {\n // 'cidr' configures the IP range and size of the entire VPC.\n // The IP space will be divided over the configured subnets.\n cidr: '10.0.0.0/21',\n\n // 'maxAzs' configures the maximum number of availability zones to use\n maxAzs: 3,\n\n // 'subnetConfiguration' specifies the \"subnet groups\" to create.\n // Every subnet group will have a subnet for each AZ, so this\n // configuration will create `3 groups × 3 AZs = 9` subnets.\n subnetConfiguration: [\n {\n // 'subnetType' controls Internet access, as described above.\n subnetType: ec2.SubnetType.PUBLIC,\n\n // 'name' is used to name this particular subnet group. You will have to\n // use the name for subnet selection if you have more than one subnet\n // group of the same type.\n name: 'Ingress',\n\n // 'cidrMask' specifies the IP addresses in the range of of individual\n // subnets in the group. Each of the subnets in this group will contain\n // `2^(32 address bits - 24 subnet bits) - 2 reserved addresses = 254`\n // usable IP addresses.\n //\n // If 'cidrMask' is left out the available address space is evenly\n // divided across the remaining subnet groups.\n cidrMask: 24,\n },\n {\n cidrMask: 24,\n name: 'Application',\n subnetType: ec2.SubnetType.PRIVATE_WITH_NAT,\n },\n {\n cidrMask: 28,\n name: 'Database',\n subnetType: ec2.SubnetType.PRIVATE_ISOLATED,\n\n // 'reserved' can be used to reserve IP address space. No resources will\n // be created for this subnet, but the IP range will be kept available for\n // future creation of this subnet, or even for future subdivision.\n reserved: true\n }\n ],\n});\n```\n\nThe example above is one possible configuration, but the user can use the\nconstructs above to implement many other network configurations.\n\nThe `Vpc` from the above configuration in a Region with three\navailability zones will be the following:\n\nSubnet Name |Type |IP Block |AZ|Features\n------------------|----------|--------------|--|--------\nIngressSubnet1 |`PUBLIC` |`10.0.0.0/24` |#1|NAT Gateway\nIngressSubnet2 |`PUBLIC` |`10.0.1.0/24` |#2|NAT Gateway\nIngressSubnet3 |`PUBLIC` |`10.0.2.0/24` |#3|NAT Gateway\nApplicationSubnet1|`PRIVATE` |`10.0.3.0/24` |#1|Route to NAT in IngressSubnet1\nApplicationSubnet2|`PRIVATE` |`10.0.4.0/24` |#2|Route to NAT in IngressSubnet2\nApplicationSubnet3|`PRIVATE` |`10.0.5.0/24` |#3|Route to NAT in IngressSubnet3\nDatabaseSubnet1 |`ISOLATED`|`10.0.6.0/28` |#1|Only routes within the VPC\nDatabaseSubnet2 |`ISOLATED`|`10.0.6.16/28`|#2|Only routes within the VPC\nDatabaseSubnet3 |`ISOLATED`|`10.0.6.32/28`|#3|Only routes within the VPC\n\n### Accessing the Internet Gateway\n\nIf you need access to the internet gateway, you can get its ID like so:\n\n```ts\nconst igwId = vpc.internetGatewayId;\n```\n\nFor a VPC with only `ISOLATED` subnets, this value will be undefined.\n\nThis is only supported for VPCs created in the stack - currently you're\nunable to get the ID for imported VPCs. To do that you'd have to specifically\nlook up the Internet Gateway by name, which would require knowing the name\nbeforehand.\n\nThis can be useful for configuring routing using a combination of gateways:\nfor more information see [Routing](#routing) below.\n\n#### Routing\n\nIt's possible to add routes to any subnets using the `addRoute()` method. If for\nexample you want an isolated subnet to have a static route via the default\nInternet Gateway created for the public subnet - perhaps for routing a VPN\nconnection - you can do so like this:\n\n```ts\nconst vpc = ec2.Vpc(this, \"VPC\", {\n subnetConfiguration: [{\n subnetType: SubnetType.PUBLIC,\n name: 'Public',\n },{\n subnetType: SubnetType.ISOLATED,\n name: 'Isolated',\n }]\n})\n(vpc.isolatedSubnets[0] as Subnet).addRoute(\"StaticRoute\", {\n routerId: vpc.internetGatewayId,\n routerType: RouterType.GATEWAY,\n destinationCidrBlock: \"8.8.8.8/32\",\n})\n```\n\n*Note that we cast to `Subnet` here because the list of subnets only returns an\n`ISubnet`.*\n\n### Reserving subnet IP space\n\nThere are situations where the IP space for a subnet or number of subnets\nwill need to be reserved. This is useful in situations where subnets would\nneed to be added after the vpc is originally deployed, without causing IP\nrenumbering for existing subnets. The IP space for a subnet may be reserved\nby setting the `reserved` subnetConfiguration property to true, as shown\nbelow:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'TheVPC', {\n natGateways: 1,\n subnetConfiguration: [\n {\n cidrMask: 26,\n name: 'Public',\n subnetType: ec2.SubnetType.PUBLIC,\n },\n {\n cidrMask: 26,\n name: 'Application1',\n subnetType: ec2.SubnetType.PRIVATE_WITH_NAT,\n },\n {\n cidrMask: 26,\n name: 'Application2',\n subnetType: ec2.SubnetType.PRIVATE_WITH_NAT,\n reserved: true, // <---- This subnet group is reserved\n },\n {\n cidrMask: 27,\n name: 'Database',\n subnetType: ec2.SubnetType.ISOLATED,\n }\n ],\n});\n```\n\nIn the example above, the subnet for Application2 is not actually provisioned\nbut its IP space is still reserved. If in the future this subnet needs to be\nprovisioned, then the `reserved: true` property should be removed. Reserving\nparts of the IP space prevents the other subnets from getting renumbered.\n\n### Sharing VPCs between stacks\n\nIf you are creating multiple `Stack`s inside the same CDK application, you\ncan reuse a VPC defined in one Stack in another by simply passing the VPC\ninstance around:\n\n[sharing VPCs between stacks](test/integ.share-vpcs.lit.ts)\n\n### Importing an existing VPC\n\nIf your VPC is created outside your CDK app, you can use `Vpc.fromLookup()`.\nThe CDK CLI will search for the specified VPC in the the stack's region and\naccount, and import the subnet configuration. Looking up can be done by VPC\nID, but more flexibly by searching for a specific tag on the VPC.\n\nSubnet types will be determined from the `aws-cdk:subnet-type` tag on the\nsubnet if it exists, or the presence of a route to an Internet Gateway\notherwise. Subnet names will be determined from the `aws-cdk:subnet-name` tag\non the subnet if it exists, or will mirror the subnet type otherwise (i.e.\na public subnet will have the name `\"Public\"`).\n\nThe result of the `Vpc.fromLookup()` operation will be written to a file\ncalled `cdk.context.json`. You must commit this file to source control so\nthat the lookup values are available in non-privileged environments such\nas CI build steps, and to ensure your template builds are repeatable.\n\nHere's how `Vpc.fromLookup()` can be used:\n\n[importing existing VPCs](test/integ.import-default-vpc.lit.ts)\n\n`Vpc.fromLookup` is the recommended way to import VPCs. If for whatever\nreason you do not want to use the context mechanism to look up a VPC at\nsynthesis time, you can also use `Vpc.fromVpcAttributes`. This has the\nfollowing limitations:\n\n* Every subnet group in the VPC must have a subnet in each availability zone\n (for example, each AZ must have both a public and private subnet). Asymmetric\n VPCs are not supported.\n* All VpcId, SubnetId, RouteTableId, ... parameters must either be known at\n synthesis time, or they must come from deploy-time list parameters whose\n deploy-time lengths are known at synthesis time.\n\nUsing `Vpc.fromVpcAttributes()` looks like this:\n\n```ts\nconst vpc = ec2.Vpc.fromVpcAttributes(stack, 'VPC', {\n vpcId: 'vpc-1234',\n availabilityZones: ['us-east-1a', 'us-east-1b'],\n\n // Either pass literals for all IDs\n publicSubnetIds: ['s-12345', 's-67890'],\n\n // OR: import a list of known length\n privateSubnetIds: Fn.importListValue('PrivateSubnetIds', 2),\n\n // OR: split an imported string to a list of known length\n isolatedSubnetIds: Fn.split(',', ssm.StringParameter.valueForStringParameter(stack, `MyParameter`), 2),\n});\n```\n\n## Allowing Connections\n\nIn AWS, all network traffic in and out of **Elastic Network Interfaces** (ENIs)\nis controlled by **Security Groups**. You can think of Security Groups as a\nfirewall with a set of rules. By default, Security Groups allow no incoming\n(ingress) traffic and all outgoing (egress) traffic. You can add ingress rules\nto them to allow incoming traffic streams. To exert fine-grained control over\negress traffic, set `allowAllOutbound: false` on the `SecurityGroup`, after\nwhich you can add egress traffic rules.\n\nYou can manipulate Security Groups directly:\n\n```ts fixture=with-vpc\nconst mySecurityGroup = new ec2.SecurityGroup(this, 'SecurityGroup', {\n vpc,\n description: 'Allow ssh access to ec2 instances',\n allowAllOutbound: true // Can be set to false\n});\nmySecurityGroup.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(22), 'allow ssh access from the world');\n```\n\nAll constructs that create ENIs on your behalf (typically constructs that create\nEC2 instances or other VPC-connected resources) will all have security groups\nautomatically assigned. Those constructs have an attribute called\n**connections**, which is an object that makes it convenient to update the\nsecurity groups. If you want to allow connections between two constructs that\nhave security groups, you have to add an **Egress** rule to one Security Group,\nand an **Ingress** rule to the other. The connections object will automatically\ntake care of this for you:\n\n```ts fixture=conns\n// Allow connections from anywhere\nloadBalancer.connections.allowFromAnyIpv4(ec2.Port.tcp(443), 'Allow inbound HTTPS');\n\n// The same, but an explicit IP address\nloadBalancer.connections.allowFrom(ec2.Peer.ipv4('1.2.3.4/32'), ec2.Port.tcp(443), 'Allow inbound HTTPS');\n\n// Allow connection between AutoScalingGroups\nappFleet.connections.allowTo(dbFleet, ec2.Port.tcp(443), 'App can call database');\n```\n\n### Connection Peers\n\nThere are various classes that implement the connection peer part:\n\n```ts fixture=conns\n// Simple connection peers\nlet peer = ec2.Peer.ipv4('10.0.0.0/16');\npeer = ec2.Peer.anyIpv4();\npeer = ec2.Peer.ipv6('::0/0');\npeer = ec2.Peer.anyIpv6();\npeer = ec2.Peer.prefixList('pl-12345');\nappFleet.connections.allowTo(peer, ec2.Port.tcp(443), 'Allow outbound HTTPS');\n```\n\nAny object that has a security group can itself be used as a connection peer:\n\n```ts fixture=conns\n// These automatically create appropriate ingress and egress rules in both security groups\nfleet1.connections.allowTo(fleet2, ec2.Port.tcp(80), 'Allow between fleets');\n\nappFleet.connections.allowFromAnyIpv4(ec2.Port.tcp(80), 'Allow from load balancer');\n```\n\n### Port Ranges\n\nThe connections that are allowed are specified by port ranges. A number of classes provide\nthe connection specifier:\n\n```ts\nec2.Port.tcp(80)\nec2.Port.tcpRange(60000, 65535)\nec2.Port.allTcp()\nec2.Port.allTraffic()\n```\n\n> NOTE: This set is not complete yet; for example, there is no library support for ICMP at the moment.\n> However, you can write your own classes to implement those.\n\n### Default Ports\n\nSome Constructs have default ports associated with them. For example, the\nlistener of a load balancer does (it's the public port), or instances of an\nRDS database (it's the port the database is accepting connections on).\n\nIf the object you're calling the peering method on has a default port associated with it, you can call\n`allowDefaultPortFrom()` and omit the port specifier. If the argument has an associated default port, call\n`allowDefaultPortTo()`.\n\nFor example:\n\n```ts fixture=conns\n// Port implicit in listener\nlistener.connections.allowDefaultPortFromAnyIpv4('Allow public');\n\n// Port implicit in peer\nappFleet.connections.allowDefaultPortTo(rdsDatabase, 'Fleet can access database');\n```\n\n### Security group rules\n\nBy default, security group wills be added inline to the security group in the output cloud formation\ntemplate, if applicable. This includes any static rules by ip address and port range. This\noptimization helps to minimize the size of the template.\n\nIn some environments this is not desirable, for example if your security group access is controlled\nvia tags. You can disable inline rules per security group or globally via the context key\n`@aws-cdk/aws-ec2.securityGroupDisableInlineRules`.\n\n```ts fixture=with-vpc\nconst mySecurityGroupWithoutInlineRules = new ec2.SecurityGroup(this, 'SecurityGroup', {\n vpc,\n description: 'Allow ssh access to ec2 instances',\n allowAllOutbound: true,\n disableInlineRules: true\n});\n//This will add the rule as an external cloud formation construct\nmySecurityGroupWithoutInlineRules.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(22), 'allow ssh access from the world');\n```\n\n## Machine Images (AMIs)\n\nAMIs control the OS that gets launched when you start your EC2 instance. The EC2\nlibrary contains constructs to select the AMI you want to use.\n\nDepending on the type of AMI, you select it a different way. Here are some\nexamples of things you might want to use:\n\n[example of creating images](test/example.images.lit.ts)\n\n> NOTE: The AMIs selected by `MachineImage.lookup()` will be cached in\n> `cdk.context.json`, so that your AutoScalingGroup instances aren't replaced while\n> you are making unrelated changes to your CDK app.\n>\n> To query for the latest AMI again, remove the relevant cache entry from\n> `cdk.context.json`, or use the `cdk context` command. For more information, see\n> [Runtime Context](https://docs.aws.amazon.com/cdk/latest/guide/context.html) in the CDK\n> developer guide.\n>\n> `MachineImage.genericLinux()`, `MachineImage.genericWindows()` will use `CfnMapping` in\n> an agnostic stack.\n\n## Special VPC configurations\n\n### VPN connections to a VPC\n\nCreate your VPC with VPN connections by specifying the `vpnConnections` props (keys are construct `id`s):\n\n```ts\nconst vpc = new ec2.Vpc(this, 'MyVpc', {\n vpnConnections: {\n dynamic: { // Dynamic routing (BGP)\n ip: '1.2.3.4'\n },\n static: { // Static routing\n ip: '4.5.6.7',\n staticRoutes: [\n '192.168.10.0/24',\n '192.168.20.0/24'\n ]\n }\n }\n});\n```\n\nTo create a VPC that can accept VPN connections, set `vpnGateway` to `true`:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'MyVpc', {\n vpnGateway: true\n});\n```\n\nVPN connections can then be added:\n\n```ts fixture=with-vpc\nvpc.addVpnConnection('Dynamic', {\n ip: '1.2.3.4'\n});\n```\n\nBy default, routes will be propagated on the route tables associated with the private subnets. If no\nprivate subnets exists, isolated subnets are used. If no isolated subnets exists, public subnets are\nused. Use the `Vpc` property `vpnRoutePropagation` to customize this behavior.\n\nVPN connections expose [metrics (cloudwatch.Metric)](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/aws-cloudwatch/README.md) across all tunnels in the account/region and per connection:\n\n```ts fixture=with-vpc\n// Across all tunnels in the account/region\nconst allDataOut = ec2.VpnConnection.metricAllTunnelDataOut();\n\n// For a specific vpn connection\nconst vpnConnection = vpc.addVpnConnection('Dynamic', {\n ip: '1.2.3.4'\n});\nconst state = vpnConnection.metricTunnelState();\n```\n\n### VPC endpoints\n\nA VPC endpoint enables you to privately connect your VPC to supported AWS services and VPC endpoint services powered by PrivateLink without requiring an internet gateway, NAT device, VPN connection, or AWS Direct Connect connection. Instances in your VPC do not require public IP addresses to communicate with resources in the service. Traffic between your VPC and the other service does not leave the Amazon network.\n\nEndpoints are virtual devices. They are horizontally scaled, redundant, and highly available VPC components that allow communication between instances in your VPC and services without imposing availability risks or bandwidth constraints on your network traffic.\n\n[example of setting up VPC endpoints](test/integ.vpc-endpoint.lit.ts)\n\nBy default, CDK will place a VPC endpoint in one subnet per AZ. If you wish to override the AZs CDK places the VPC endpoint in,\nuse the `subnets` parameter as follows:\n\n```ts\nnew InterfaceVpcEndpoint(stack, 'VPC Endpoint', {\n vpc,\n service: new InterfaceVpcEndpointService('com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', 443),\n // Choose which availability zones to place the VPC endpoint in, based on\n // available AZs\n subnets: {\n availabilityZones: ['us-east-1a', 'us-east-1c']\n }\n});\n```\n\nPer the [AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/interface-endpoint-availability-zone/), not all\nVPC endpoint services are available in all AZs. If you specify the parameter `lookupSupportedAzs`, CDK attempts to discover which\nAZs an endpoint service is available in, and will ensure the VPC endpoint is not placed in a subnet that doesn't match those AZs.\nThese AZs will be stored in cdk.context.json.\n\n```ts\nnew InterfaceVpcEndpoint(stack, 'VPC Endpoint', {\n vpc,\n service: new InterfaceVpcEndpointService('com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', 443),\n // Choose which availability zones to place the VPC endpoint in, based on\n // available AZs\n lookupSupportedAzs: true\n});\n```\n\nPre-defined AWS services are defined in the [InterfaceVpcEndpointAwsService](lib/vpc-endpoint.ts) class, and can be used to\ncreate VPC endpoints without having to configure name, ports, etc. For example, a Keyspaces endpoint can be created for\nuse in your VPC:\n\n``` ts\nnew InterfaceVpcEndpoint(stack, 'VPC Endpoint', { vpc, service: InterfaceVpcEndpointAwsService.KEYSPACES });\n```\n\n#### Security groups for interface VPC endpoints\n\nBy default, interface VPC endpoints create a new security group and traffic is **not**\nautomatically allowed from the VPC CIDR.\n\nUse the `connections` object to allow traffic to flow to the endpoint:\n\n```ts fixture=conns\nmyEndpoint.connections.allowDefaultPortFromAnyIpv4();\n```\n\nAlternatively, existing security groups can be used by specifying the `securityGroups` prop.\n\n### VPC endpoint services\n\nA VPC endpoint service enables you to expose a Network Load Balancer(s) as a provider service to consumers, who connect to your service over a VPC endpoint. You can restrict access to your service via allowed principals (anything that extends ArnPrincipal), and require that new connections be manually accepted.\n\n```ts\nnew VpcEndpointService(this, 'EndpointService', {\n vpcEndpointServiceLoadBalancers: [networkLoadBalancer1, networkLoadBalancer2],\n acceptanceRequired: true,\n allowedPrincipals: [new ArnPrincipal('arn:aws:iam::123456789012:root')]\n});\n```\n\nEndpoint services support private DNS, which makes it easier for clients to connect to your service by automatically setting up DNS in their VPC.\nYou can enable private DNS on an endpoint service like so:\n\n```ts\nimport { VpcEndpointServiceDomainName } from 'aws-cdk-lib/aws-route53';\n\nnew VpcEndpointServiceDomainName(stack, 'EndpointDomain', {\n endpointService: vpces,\n domainName: 'my-stuff.aws-cdk.dev',\n publicHostedZone: zone,\n});\n```\n\nNote: The domain name must be owned (registered through Route53) by the account the endpoint service is in, or delegated to the account.\nThe VpcEndpointServiceDomainName will handle the AWS side of domain verification, the process for which can be found\n[here](https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-dns-validation.html)\n\n### Client VPN endpoint\n\nAWS Client VPN is a managed client-based VPN service that enables you to securely access your AWS\nresources and resources in your on-premises network. With Client VPN, you can access your resources\nfrom any location using an OpenVPN-based VPN client.\n\nUse the `addClientVpnEndpoint()` method to add a client VPN endpoint to a VPC:\n\n```ts fixture=client-vpn\nvpc.addClientVpnEndpoint('Endpoint', {\n cidr: '10.100.0.0/16',\n serverCertificateArn: 'arn:aws:acm:us-east-1:123456789012:certificate/server-certificate-id',\n // Mutual authentication\n clientCertificateArn: 'arn:aws:acm:us-east-1:123456789012:certificate/client-certificate-id',\n // User-based authentication\n userBasedAuthentication: ec2.ClientVpnUserBasedAuthentication.federated(samlProvider),\n});\n```\n\nThe endpoint must use at least one [authentication method](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/client-authentication.html):\n\n* Mutual authentication with a client certificate\n* User-based authentication (directory or federated)\n\nIf user-based authentication is used, the [self-service portal URL](https://docs.aws.amazon.com/vpn/latest/clientvpn-user/self-service-portal.html)\nis made available via a CloudFormation output.\n\nBy default, a new security group is created and logging is enabled. Moreover, a rule to\nauthorize all users to the VPC CIDR is created.\n\nTo customize authorization rules, set the `authorizeAllUsersToVpcCidr` prop to `false`\nand use `addaddAuthorizationRule()`:\n\n```ts fixture=client-vpn\nconst endpoint = vpc.addClientVpnEndpoint('Endpoint', {\n cidr: '10.100.0.0/16',\n serverCertificateArn: 'arn:aws:acm:us-east-1:123456789012:certificate/server-certificate-id',\n userBasedAuthentication: ec2.ClientVpnUserBasedAuthentication.federated(samlProvider),\n authorizeAllUsersToVpcCidr: false,\n});\n\nendpoint.addAuthorizationRule('Rule', {\n cidr: '10.0.10.0/32',\n groupId: 'group-id',\n});\n```\n\nUse `addRoute()` to configure network routes:\n\n```ts fixture=client-vpn\nconst endpoint = vpc.addClientVpnEndpoint('Endpoint', {\n cidr: '10.100.0.0/16',\n serverCertificateArn: 'arn:aws:acm:us-east-1:123456789012:certificate/server-certificate-id',\n userBasedAuthentication: ec2.ClientVpnUserBasedAuthentication.federated(samlProvider),\n});\n\n// Client-to-client access\nendpoint.addRoute('Route', {\n cidr: '10.100.0.0/16',\n target: ec2.ClientVpnRouteTarget.local(),\n});\n```\n\nUse the `connections` object of the endpoint to allow traffic to other security groups.\n\n## Instances\n\nYou can use the `Instance` class to start up a single EC2 instance. For production setups, we recommend\nyou use an `AutoScalingGroup` from the `aws-autoscaling` module instead, as AutoScalingGroups will take\ncare of restarting your instance if it ever fails.\n\n### Configuring Instances using CloudFormation Init (cfn-init)\n\nCloudFormation Init allows you to configure your instances by writing files to them, installing software\npackages, starting services and running arbitrary commands. By default, if any of the instance setup\ncommands throw an error, the deployment will fail and roll back to the previously known good state.\nThe following documentation also applies to `AutoScalingGroup`s.\n\nFor the full set of capabilities of this system, see the documentation for\n[`AWS::CloudFormation::Init`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-init.html).\nHere is an example of applying some configuration to an instance:\n\n```ts\nnew ec2.Instance(this, 'Instance', {\n // Showing the most complex setup, if you have simpler requirements\n // you can use `CloudFormationInit.fromElements()`.\n init: ec2.CloudFormationInit.fromConfigSets({\n configSets: {\n // Applies the configs below in this order\n default: ['yumPreinstall', 'config'],\n },\n configs: {\n yumPreinstall: new ec2.InitConfig([\n // Install an Amazon Linux package using yum\n ec2.InitPackage.yum('git'),\n ]),\n config: new ec2.InitConfig([\n // Create a JSON file from tokens (can also create other files)\n ec2.InitFile.fromObject('/etc/stack.json', {\n stackId: stack.stackId,\n stackName: stack.stackName,\n region: stack.region,\n }),\n\n // Create a group and user\n ec2.InitGroup.fromName('my-group'),\n ec2.InitUser.fromName('my-user'),\n\n // Install an RPM from the internet\n ec2.InitPackage.rpm('http://mirrors.ukfast.co.uk/sites/dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/r/rubygem-git-1.5.0-2.el8.noarch.rpm'),\n ]),\n },\n }),\n initOptions: {\n // Optional, which configsets to activate (['default'] by default)\n configSets: ['default'],\n\n // Optional, how long the installation is expected to take (5 minutes by default)\n timeout: Duration.minutes(30),\n\n // Optional, whether to include the --url argument when running cfn-init and cfn-signal commands (false by default)\n includeUrl: true\n\n // Optional, whether to include the --role argument when running cfn-init and cfn-signal commands (false by default)\n includeRole: true\n },\n});\n```\n\nYou can have services restarted after the init process has made changes to the system.\nTo do that, instantiate an `InitServiceRestartHandle` and pass it to the config elements\nthat need to trigger the restart and the service itself. For example, the following\nconfig writes a config file for nginx, extracts an archive to the root directory, and then\nrestarts nginx so that it picks up the new config and files:\n\n```ts\nconst handle = new ec2.InitServiceRestartHandle();\n\nec2.CloudFormationInit.fromElements(\n ec2.InitFile.fromString('/etc/nginx/nginx.conf', '...', { serviceRestartHandles: [handle] }),\n ec2.InitSource.fromBucket('/var/www/html', myBucket, 'html.zip', { serviceRestartHandles: [handle] }),\n ec2.InitService.enable('nginx', {\n serviceRestartHandle: handle,\n })\n);\n```\n\n### Bastion Hosts\n\nA bastion host functions as an instance used to access servers and resources in a VPC without open up the complete VPC on a network level.\nYou can use bastion hosts using a standard SSH connection targeting port 22 on the host. As an alternative, you can connect the SSH connection\nfeature of AWS Systems Manager Session Manager, which does not need an opened security group. (https://aws.amazon.com/about-aws/whats-new/2019/07/session-manager-launches-tunneling-support-for-ssh-and-scp/)\n\nA default bastion host for use via SSM can be configured like:\n\n```ts fixture=with-vpc\nconst host = new ec2.BastionHostLinux(this, 'BastionHost', { vpc });\n```\n\nIf you want to connect from the internet using SSH, you need to place the host into a public subnet. You can then configure allowed source hosts.\n\n```ts fixture=with-vpc\nconst host = new ec2.BastionHostLinux(this, 'BastionHost', {\n vpc,\n subnetSelection: { subnetType: ec2.SubnetType.PUBLIC },\n});\nhost.allowSshAccessFrom(ec2.Peer.ipv4('1.2.3.4/32'));\n```\n\nAs there are no SSH public keys deployed on this machine, you need to use [EC2 Instance Connect](https://aws.amazon.com/de/blogs/compute/new-using-amazon-ec2-instance-connect-for-ssh-access-to-your-ec2-instances/)\nwith the command `aws ec2-instance-connect send-ssh-public-key` to provide your SSH public key.\n\nEBS volume for the bastion host can be encrypted like:\n\n```ts\n const host = new ec2.BastionHostLinux(stack, 'BastionHost', {\n vpc,\n blockDevices: [{\n deviceName: 'EBSBastionHost',\n volume: BlockDeviceVolume.ebs(10, {\n encrypted: true,\n }),\n }],\n });\n```\n\n### Block Devices\n\nTo add EBS block device mappings, specify the `blockDevices` property. The following example sets the EBS-backed\nroot device (`/dev/sda1`) size to 50 GiB, and adds another EBS-backed device mapped to `/dev/sdm` that is 100 GiB in\nsize:\n\n```ts\nnew ec2.Instance(this, 'Instance', {\n // ...\n blockDevices: [\n {\n deviceName: '/dev/sda1',\n volume: ec2.BlockDeviceVolume.ebs(50),\n },\n {\n deviceName: '/dev/sdm',\n volume: ec2.BlockDeviceVolume.ebs(100),\n },\n ],\n});\n\n```\n\n### Volumes\n\nWhereas a `BlockDeviceVolume` is an EBS volume that is created and destroyed as part of the creation and destruction of a specific instance. A `Volume` is for when you want an EBS volume separate from any particular instance. A `Volume` is an EBS block device that can be attached to, or detached from, any instance at any time. Some types of `Volume`s can also be attached to multiple instances at the same time to allow you to have shared storage between those instances.\n\nA notable restriction is that a Volume can only be attached to instances in the same availability zone as the Volume itself.\n\nThe following demonstrates how to create a 500 GiB encrypted Volume in the `us-west-2a` availability zone, and give a role the ability to attach that Volume to a specific instance:\n\n```ts\nconst instance = new ec2.Instance(this, 'Instance', {\n // ...\n});\nconst role = new iam.Role(stack, 'SomeRole', {\n assumedBy: new iam.AccountRootPrincipal(),\n});\nconst volume = new ec2.Volume(this, 'Volume', {\n availabilityZone: 'us-west-2a',\n size: cdk.Size.gibibytes(500),\n encrypted: true,\n});\n\nvolume.grantAttachVolume(role, [instance]);\n```\n\n#### Instances Attaching Volumes to Themselves\n\nIf you need to grant an instance the ability to attach/detach an EBS volume to/from itself, then using `grantAttachVolume` and `grantDetachVolume` as outlined above\nwill lead to an unresolvable circular reference between the instance role and the instance. In this case, use `grantAttachVolumeByResourceTag` and `grantDetachVolumeByResourceTag` as follows:\n\n```ts\nconst instance = new ec2.Instance(this, 'Instance', {\n // ...\n});\nconst volume = new ec2.Volume(this, 'Volume', {\n // ...\n});\n\nconst attachGrant = volume.grantAttachVolumeByResourceTag(instance.grantPrincipal, [instance]);\nconst detachGrant = volume.grantDetachVolumeByResourceTag(instance.grantPrincipal, [instance]);\n```\n\n#### Attaching Volumes\n\nThe Amazon EC2 documentation for\n[Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) and\n[Windows Instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-volumes.html) contains information on how\nto attach and detach your Volumes to/from instances, and how to format them for use.\n\nThe following is a sample skeleton of EC2 UserData that can be used to attach a Volume to the Linux instance that it is running on:\n\n```ts\nconst volume = new ec2.Volume(this, 'Volume', {\n // ...\n});\nconst instance = new ec2.Instance(this, 'Instance', {\n // ...\n});\nvolume.grantAttachVolumeByResourceTag(instance.grantPrincipal, [instance]);\nconst targetDevice = '/dev/xvdz';\ninstance.userData.addCommands(\n // Retrieve token for accessing EC2 instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html)\n `TOKEN=$(curl -SsfX PUT \"http://169.254.169.254/latest/api/token\" -H \"X-aws-ec2-metadata-token-ttl-seconds: 21600\")`,\n // Retrieve the instance Id of the current EC2 instance\n `INSTANCE_ID=$(curl -SsfH \"X-aws-ec2-metadata-token: $TOKEN\" http://169.254.169.254/latest/meta-data/instance-id)`,\n // Attach the volume to /dev/xvdz\n `aws --region ${Stack.of(this).region} ec2 attach-volume --volume-id ${volume.volumeId} --instance-id $INSTANCE_ID --device ${targetDevice}`,\n // Wait until the volume has attached\n `while ! test -e ${targetDevice}; do sleep 1; done`\n // The volume will now be mounted. You may have to add additional code to format the volume if it has not been prepared.\n);\n```\n\n## VPC Flow Logs\n\nVPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. Flow log data can be published to Amazon CloudWatch Logs and Amazon S3. After you've created a flow log, you can retrieve and view its data in the chosen destination. (<https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html>).\n\nBy default a flow log will be created with CloudWatch Logs as the destination.\n\nYou can create a flow log like this:\n\n```ts\nnew ec2.FlowLog(this, 'FlowLog', {\n resourceType: ec2.FlowLogResourceType.fromVpc(vpc)\n})\n```\n\nOr you can add a Flow Log to a VPC by using the addFlowLog method like this:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'Vpc');\n\nvpc.addFlowLog('FlowLog');\n```\n\nYou can also add multiple flow logs with different destinations.\n\n```ts\nconst vpc = new ec2.Vpc(this, 'Vpc');\n\nvpc.addFlowLog('FlowLogS3', {\n destination: ec2.FlowLogDestination.toS3()\n});\n\nvpc.addFlowLog('FlowLogCloudWatch', {\n trafficType: ec2.FlowLogTrafficType.REJECT\n});\n```\n\nBy default the CDK will create the necessary resources for the destination. For the CloudWatch Logs destination\nit will create a CloudWatch Logs Log Group as well as the IAM role with the necessary permissions to publish to\nthe log group. In the case of an S3 destination, it will create the S3 bucket.\n\nIf you want to customize any of the destination resources you can provide your own as part of the `destination`.\n\n*CloudWatch Logs*\n\n```ts\nconst logGroup = new logs.LogGroup(this, 'MyCustomLogGroup');\n\nconst role = new iam.Role(this, 'MyCustomRole', {\n assumedBy: new iam.ServicePrincipal('vpc-flow-logs.amazonaws.com')\n});\n\nnew ec2.FlowLog(this, 'FlowLog', {\n resourceType: ec2.FlowLogResourceType.fromVpc(vpc),\n destination: ec2.FlowLogDestination.toCloudWatchLogs(logGroup, role)\n});\n```\n\n*S3*\n\n```ts\n\nconst bucket = new s3.Bucket(this, 'MyCustomBucket');\n\nnew ec2.FlowLog(this, 'FlowLog', {\n resourceType: ec2.FlowLogResourceType.fromVpc(vpc),\n destination: ec2.FlowLogDestination.toS3(bucket)\n});\n\nnew ec2.FlowLog(this, 'FlowLogWithKeyPrefix', {\n resourceType: ec2.FlowLogResourceType.fromVpc(vpc),\n destination: ec2.FlowLogDestination.toS3(bucket, 'prefix/')\n});\n```\n\n## User Data\n\nUser data enables you to run a script when your instances start up. In order to configure these scripts you can add commands directly to the script\n or you can use the UserData's convenience functions to aid in the creation of your script.\n\nA user data could be configured to run a script found in an asset through the following:\n\n```ts\nconst asset = new Asset(this, 'Asset', {path: path.join(__dirname, 'configure.sh')});\nconst instance = new ec2.Instance(this, 'Instance', {\n // ...\n });\nconst localPath = instance.userData.addS3DownloadCommand({\n bucket:asset.bucket,\n bucketKey:asset.s3ObjectKey,\n});\ninstance.userData.addExecuteFileCommand({\n filePath:localPath,\n arguments: '--verbose -y'\n});\nasset.grantRead( instance.role );\n```\n\n### Multipart user data\n\nIn addition, to above the `MultipartUserData` can be used to change instance startup behavior. Multipart user data are composed\nfrom separate parts forming archive. The most common parts are scripts executed during instance set-up. However, there are other\nkinds, too.\n\nThe advantage of multipart archive is in flexibility when it's needed to add additional parts or to use specialized parts to\nfine tune instance startup. Some services (like AWS Batch) supports only `MultipartUserData`.\n\nThe parts can be executed at different moment of instance start-up and can serve a different purposes. This is controlled by `contentType` property.\nFor common scripts, `text/x-shellscript; charset=\"utf-8\"` can be used as content type.\n\nIn order to create archive the `MultipartUserData` has to be instantiated. Than, user can add parts to multipart archive using `addPart`. The `MultipartBody` contains methods supporting creation of body parts.\n\nIf the very custom part is required, it can be created using `MultipartUserData.fromRawBody`, in this case full control over content type,\ntransfer encoding, and body properties is given to the user.\n\nBelow is an example for creating multipart user data with single body part responsible for installing `awscli` and configuring maximum size\nof storage used by Docker containers:\n\n```ts\nconst bootHookConf = ec2.UserData.forLinux();\nbootHookConf.addCommands('cloud-init-per once docker_options echo \\'OPTIONS=\"${OPTIONS} --storage-opt dm.basesize=40G\"\\' >> /etc/sysconfig/docker');\n\nconst setupCommands = ec2.UserData.forLinux();\nsetupCommands.addCommands('sudo yum install awscli && echo Packages installed らと > /var/tmp/setup');\n\nconst multipartUserData = new ec2.MultipartUserData();\n// The docker has to be configured at early stage, so content type is overridden to boothook\nmultipartUserData.addPart(ec2.MultipartBody.fromUserData(bootHookConf, 'text/cloud-boothook; charset=\"us-ascii\"'));\n// Execute the rest of setup\nmultipartUserData.addPart(ec2.MultipartBody.fromUserData(setupCommands));\n\nnew ec2.LaunchTemplate(stack, '', {\n userData: multipartUserData,\n blockDevices: [\n // Block device configuration rest\n ]\n});\n```\n\nFor more information see\n[Specifying Multiple User Data Blocks Using a MIME Multi Part Archive](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/bootstrap_container_instance.html#multi-part_user_data)\n\n#### Using add*Command on MultipartUserData\n\nTo use the `add*Command` methods, that are inherited from the `UserData` interface, on `MultipartUserData` you must add a part\nto the `MultipartUserData` and designate it as the reciever for these methods. This is accomplished by using the `addUserDataPart()`\nmethod on `MultipartUserData` with the `makeDefault` argument set to `true`:\n\n```ts\nconst multipartUserData = new ec2.MultipartUserData();\nconst commandsUserData = ec2.UserData.forLinux();\nmultipartUserData.addUserDataPart(commandsUserData, MultipartBody.SHELL_SCRIPT, true);\n\n// Adding commands to the multipartUserData adds them to commandsUserData, and vice-versa.\nmultipartUserData.addCommands('touch /root/multi.txt');\ncommandsUserData.addCommands('touch /root/userdata.txt');\n```\n\nWhen used on an EC2 instance, the above `multipartUserData` will create both `multi.txt` and `userdata.txt` in `/root`.\n\n## Importing existing subnet\n\nTo import an existing Subnet, call `Subnet.fromSubnetAttributes()` or\n`Subnet.fromSubnetId()`. Only if you supply the subnet's Availability Zone\nand Route Table Ids when calling `Subnet.fromSubnetAttributes()` will you be\nable to use the CDK features that use these values (such as selecting one\nsubnet per AZ).\n\nImporting an existing subnet looks like this:\n\n```ts\n// Supply all properties\nconst subnet = Subnet.fromSubnetAttributes(this, 'SubnetFromAttributes', {\n subnetId: 's-1234',\n availabilityZone: 'pub-az-4465',\n routeTableId: 'rt-145'\n});\n\n// Supply only subnet id\nconst subnet = Subnet.fromSubnetId(this, 'SubnetFromId', 's-1234');\n```\n\n## Launch Templates\n\nA Launch Template is a standardized template that contains the configuration information to launch an instance.\nThey can be used when launching instances on their own, through Amazon EC2 Auto Scaling, EC2 Fleet, and Spot Fleet.\nLaunch templates enable you to store launch parameters so that you do not have to specify them every time you launch\nan instance. For information on Launch Templates please see the\n[official documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html).\n\nThe following demonstrates how to create a launch template with an Amazon Machine Image, and security group.\n\n```ts\nconst vpc = new ec2.Vpc(...);\n// ...\nconst template = new ec2.LaunchTemplate(this, 'LaunchTemplate', {\n machineImage: new ec2.AmazonMachineImage(),\n securityGroup: new ec2.SecurityGroup(this, 'LaunchTemplateSG', {\n vpc: vpc,\n }),\n});\n```\n"
|
|
1227
|
-
},
|
|
1228
876
|
"targets": {
|
|
1229
877
|
"dotnet": {
|
|
1230
878
|
"namespace": "Amazon.CDK.AWS.EC2"
|
|
@@ -1238,13 +886,6 @@
|
|
|
1238
886
|
}
|
|
1239
887
|
},
|
|
1240
888
|
"aws-cdk-lib.aws_ecr": {
|
|
1241
|
-
"locationInModule": {
|
|
1242
|
-
"filename": "lib/index.ts",
|
|
1243
|
-
"line": 66
|
|
1244
|
-
},
|
|
1245
|
-
"readme": {
|
|
1246
|
-
"markdown": "# Amazon ECR Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis package contains constructs for working with Amazon Elastic Container Registry.\n\n## Repositories\n\nDefine a repository by creating a new instance of `Repository`. A repository\nholds multiple verions of a single container image.\n\n```ts\nconst repository = new ecr.Repository(this, 'Repository');\n```\n\n## Image scanning\n\nAmazon ECR image scanning helps in identifying software vulnerabilities in your container images. You can manually scan container images stored in Amazon ECR, or you can configure your repositories to scan images when you push them to a repository. To create a new repository to scan on push, simply enable `imageScanOnPush` in the properties\n\n```ts\nconst repository = new ecr.Repository(stack, 'Repo', {\n imageScanOnPush: true\n});\n```\n\nTo create an `onImageScanCompleted` event rule and trigger the event target\n\n```ts\nrepository.onImageScanCompleted('ImageScanComplete')\n .addTarget(...)\n```\n\n### Authorization Token\n\nBesides the Amazon ECR APIs, ECR also allows the Docker CLI or a language-specific Docker library to push and pull\nimages from an ECR repository. However, the Docker CLI does not support native IAM authentication methods and\nadditional steps must be taken so that Amazon ECR can authenticate and authorize Docker push and pull requests.\nMore information can be found at at [Registry Authentication](https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth).\n\nA Docker authorization token can be obtained using the `GetAuthorizationToken` ECR API. The following code snippets\ngrants an IAM user access to call this API.\n\n```ts\nimport { aws_iam as iam } from 'aws-cdk-lib';\nimport { aws_ecr as ecr } from 'aws-cdk-lib';\n\nconst user = new iam.User(this, 'User', { ... });\necr.AuthorizationToken.grantRead(user);\n```\n\nIf you access images in the [Public ECR Gallery](https://gallery.ecr.aws/) as well, it is recommended you authenticate to the registry to benefit from\nhigher rate and bandwidth limits.\n\n> See `Pricing` in https://aws.amazon.com/blogs/aws/amazon-ecr-public-a-new-public-container-registry/ and [Service quotas](https://docs.aws.amazon.com/AmazonECR/latest/public/public-service-quotas.html).\n\nThe following code snippet grants an IAM user access to retrieve an authorization token for the public gallery.\n\n```ts\nimport { aws_iam as iam } from 'aws-cdk-lib';\nimport { aws_ecr as ecr } from 'aws-cdk-lib';\n\nconst user = new iam.User(this, 'User', { ... });\necr.PublicGalleryAuthorizationToken.grantRead(user);\n```\n\nThis user can then proceed to login to the registry using one of the [authentication methods](https://docs.aws.amazon.com/AmazonECR/latest/public/public-registries.html#public-registry-auth).\n\n### Image tag immutability\n\nYou can set tag immutability on images in our repository using the `imageTagMutability` construct prop.\n\n```ts\nnew ecr.Repository(stack, 'Repo', { imageTagMutability: ecr.TagMutability.IMMUTABLE });\n```\n\n## Automatically clean up repositories\n\nYou can set life cycle rules to automatically clean up old images from your\nrepository. The first life cycle rule that matches an image will be applied\nagainst that image. For example, the following deletes images older than\n30 days, while keeping all images tagged with prod (note that the order\nis important here):\n\n```ts\nrepository.addLifecycleRule({ tagPrefixList: ['prod'], maxImageCount: 9999 });\nrepository.addLifecycleRule({ maxImageAge: cdk.Duration.days(30) });\n```\n"
|
|
1247
|
-
},
|
|
1248
889
|
"targets": {
|
|
1249
890
|
"dotnet": {
|
|
1250
891
|
"namespace": "Amazon.CDK.AWS.ECR"
|
|
@@ -1258,13 +899,6 @@
|
|
|
1258
899
|
}
|
|
1259
900
|
},
|
|
1260
901
|
"aws-cdk-lib.aws_ecr_assets": {
|
|
1261
|
-
"locationInModule": {
|
|
1262
|
-
"filename": "lib/index.ts",
|
|
1263
|
-
"line": 67
|
|
1264
|
-
},
|
|
1265
|
-
"readme": {
|
|
1266
|
-
"markdown": "# AWS CDK Docker Image Assets\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module allows bundling Docker images as assets.\n\n## Images from Dockerfile\n\nImages are built from a local Docker context directory (with a `Dockerfile`),\nuploaded to ECR by the CDK toolkit and/or your app's CI-CD pipeline, and can be\nnaturally referenced in your CDK app.\n\n```ts\nimport { DockerImageAsset } from 'aws-cdk-lib/aws-ecr-assets';\n\nconst asset = new DockerImageAsset(this, 'MyBuildImage', {\n directory: path.join(__dirname, 'my-image')\n});\n```\n\nThe directory `my-image` must include a `Dockerfile`.\n\nThis will instruct the toolkit to build a Docker image from `my-image`, push it\nto an AWS ECR repository and wire the name of the repository as CloudFormation\nparameters to your stack.\n\nBy default, all files in the given directory will be copied into the docker\n*build context*. If there is a large directory that you know you definitely\ndon't need in the build context you can improve the performance by adding the\nnames of files and directories to ignore to a file called `.dockerignore`, or\npass them via the `exclude` property. If both are available, the patterns\nfound in `exclude` are appended to the patterns found in `.dockerignore`.\n\nThe `ignoreMode` property controls how the set of ignore patterns is\ninterpreted. The recommended setting for Docker image assets is\n`IgnoreMode.DOCKER`. If the context flag\n`@aws-cdk/aws-ecr-assets:dockerIgnoreSupport` is set to `true` in your\n`cdk.json` (this is by default for new projects, but must be set manually for\nold projects) then `IgnoreMode.DOCKER` is the default and you don't need to\nconfigure it on the asset itself.\n\nUse `asset.imageUri` to reference the image. It includes both the ECR image URL\nand tag.\n\nYou can optionally pass build args to the `docker build` command by specifying\nthe `buildArgs` property:\n\n```ts\nconst asset = new DockerImageAsset(this, 'MyBuildImage', {\n directory: path.join(__dirname, 'my-image'),\n buildArgs: {\n HTTP_PROXY: 'http://10.20.30.2:1234'\n }\n});\n```\n\nYou can optionally pass a target to the `docker build` command by specifying\nthe `target` property:\n\n```ts\nconst asset = new DockerImageAsset(this, 'MyBuildImage', {\n directory: path.join(__dirname, 'my-image'),\n target: 'a-target'\n})\n```\n\n## Images from Tarball\n\nImages are loaded from a local tarball, uploaded to ECR by the CDK toolkit and/or your app's CI-CD pipeline, and can be\nnaturally referenced in your CDK app.\n\n```ts\nimport { TarballImageAsset } from 'aws-cdk-lib/aws-ecr-assets';\n\nconst asset = new TarballImageAsset(this, 'MyBuildImage', {\n tarballFile: 'local-image.tar'\n});\n```\n\nThis will instruct the toolkit to add the tarball as a file asset. During deployment it will load the container image\nfrom `local-image.tar`, push it to an AWS ECR repository and wire the name of the repository as CloudFormation parameters\nto your stack.\n\n## Publishing images to ECR repositories\n\n`DockerImageAsset` is designed for seamless build & consumption of image assets by CDK code deployed to multiple environments\nthrough the CDK CLI or through CI/CD workflows. To that end, the ECR repository behind this construct is controlled by the AWS CDK.\nThe mechanics of where these images are published and how are intentionally kept as an implementation detail, and the construct\ndoes not support customizations such as specifying the ECR repository name or tags.\n\nIf you are looking for a way to _publish_ image assets to an ECR repository in your control, you should consider using\n[cdklabs/cdk-ecr-deployment], which is able to replicate an image asset from the CDK-controlled ECR repository to a repository of\nyour choice.\n\nHere an example from the [cdklabs/cdk-ecr-deployment] project:\n\n```ts\nimport * as ecrdeploy from 'cdk-ecr-deployment';\n\nconst image = new DockerImageAsset(this, 'CDKDockerImage', {\n directory: path.join(__dirname, 'docker'),\n});\n\nnew ecrdeploy.ECRDeployment(this, 'DeployDockerImage', {\n src: new ecrdeploy.DockerImageName(image.imageUri),\n dest: new ecrdeploy.DockerImageName(`${cdk.Aws.ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/test:nginx`),\n});\n```\n\n⚠️ Please note that this is a 3rd-party construct library and is not officially supported by AWS.\nYou are welcome to +1 [this GitHub issue](https://github.com/aws/aws-cdk/issues/12597) if you would like to see\nnative support for this use-case in the AWS CDK.\n\n[cdklabs/cdk-ecr-deployment]: https://github.com/cdklabs/cdk-ecr-deployment\n\n## Pull Permissions\n\nDepending on the consumer of your image asset, you will need to make sure\nthe principal has permissions to pull the image.\n\nIn most cases, you should use the `asset.repository.grantPull(principal)`\nmethod. This will modify the IAM policy of the principal to allow it to\npull images from this repository.\n\nIf the pulling principal is not in the same account or is an AWS service that\ndoesn't assume a role in your account (e.g. AWS CodeBuild), pull permissions\nmust be granted on the __resource policy__ (and not on the principal's policy).\nTo do that, you can use `asset.repository.addToResourcePolicy(statement)` to\ngrant the desired principal the following permissions: \"ecr:GetDownloadUrlForLayer\",\n\"ecr:BatchGetImage\" and \"ecr:BatchCheckLayerAvailability\".\n"
|
|
1267
|
-
},
|
|
1268
902
|
"targets": {
|
|
1269
903
|
"dotnet": {
|
|
1270
904
|
"namespace": "Amazon.CDK.AWS.Ecr.Assets"
|
|
@@ -1278,13 +912,6 @@
|
|
|
1278
912
|
}
|
|
1279
913
|
},
|
|
1280
914
|
"aws-cdk-lib.aws_ecs": {
|
|
1281
|
-
"locationInModule": {
|
|
1282
|
-
"filename": "lib/index.ts",
|
|
1283
|
-
"line": 68
|
|
1284
|
-
},
|
|
1285
|
-
"readme": {
|
|
1286
|
-
"markdown": "# Amazon ECS Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis package contains constructs for working with **Amazon Elastic Container\nService** (Amazon ECS).\n\nAmazon Elastic Container Service (Amazon ECS) is a fully managed container orchestration service.\n\nFor further information on Amazon ECS,\nsee the [Amazon ECS documentation](https://docs.aws.amazon.com/ecs)\n\nThe following example creates an Amazon ECS cluster, adds capacity to it, and\nruns a service on it:\n\n```ts\nimport { aws_ecs as ecs } from 'aws-cdk-lib';\n\n// Create an ECS cluster\nconst cluster = new ecs.Cluster(this, 'Cluster', {\n vpc,\n});\n\n// Add capacity to it\ncluster.addCapacity('DefaultAutoScalingGroupCapacity', {\n instanceType: new ec2.InstanceType(\"t2.xlarge\"),\n desiredCapacity: 3,\n});\n\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\n\ntaskDefinition.addContainer('DefaultContainer', {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n memoryLimitMiB: 512,\n});\n\n// Instantiate an Amazon ECS Service\nconst ecsService = new ecs.Ec2Service(this, 'Service', {\n cluster,\n taskDefinition,\n});\n```\n\nFor a set of constructs defining common ECS architectural patterns, see the `@aws-cdk/aws-ecs-patterns` package.\n\n## Launch Types: AWS Fargate vs Amazon EC2\n\nThere are two sets of constructs in this library; one to run tasks on Amazon EC2 and\none to run tasks on AWS Fargate.\n\n- Use the `Ec2TaskDefinition` and `Ec2Service` constructs to run tasks on Amazon EC2 instances running in your account.\n- Use the `FargateTaskDefinition` and `FargateService` constructs to run tasks on\n instances that are managed for you by AWS.\n- Use the `ExternalTaskDefinition` and `ExternalService` constructs to run AWS ECS Anywhere tasks on self-managed infrastructure.\n\nHere are the main differences:\n\n- **Amazon EC2**: instances are under your control. Complete control of task to host\n allocation. Required to specify at least a memory reservation or limit for\n every container. Can use Host, Bridge and AwsVpc networking modes. Can attach\n Classic Load Balancer. Can share volumes between container and host.\n- **AWS Fargate**: tasks run on AWS-managed instances, AWS manages task to host\n allocation for you. Requires specification of memory and cpu sizes at the\n taskdefinition level. Only supports AwsVpc networking modes and\n Application/Network Load Balancers. Only the AWS log driver is supported.\n Many host features are not supported such as adding kernel capabilities\n and mounting host devices/volumes inside the container.\n- **AWS ECSAnywhere**: tasks are run and managed by AWS ECS Anywhere on infrastructure owned by the customer. Only Bridge networking mode is supported. Does not support autoscaling, load balancing, cloudmap or attachment of volumes.\n\nFor more information on Amazon EC2 vs AWS Fargate, networking and ECS Anywhere see the AWS Documentation:\n[AWS Fargate](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html),\n[Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html),\n[ECS Anywhere](https://aws.amazon.com/ecs/anywhere/)\n\n## Clusters\n\nA `Cluster` defines the infrastructure to run your\ntasks on. You can run many tasks on a single cluster.\n\nThe following code creates a cluster that can run AWS Fargate tasks:\n\n```ts\nconst cluster = new ecs.Cluster(this, 'Cluster', {\n vpc: vpc\n});\n```\n\nTo use tasks with Amazon EC2 launch-type, you have to add capacity to\nthe cluster in order for tasks to be scheduled on your instances. Typically,\nyou add an AutoScalingGroup with instances running the latest\nAmazon ECS-optimized AMI to the cluster. There is a method to build and add such an\nAutoScalingGroup automatically, or you can supply a customized AutoScalingGroup\nthat you construct yourself. It's possible to add multiple AutoScalingGroups\nwith various instance types.\n\nThe following example creates an Amazon ECS cluster and adds capacity to it:\n\n```ts\nconst cluster = new ecs.Cluster(this, 'Cluster', {\n vpc: vpc\n});\n\n// Either add default capacity\ncluster.addCapacity('DefaultAutoScalingGroupCapacity', {\n instanceType: new ec2.InstanceType(\"t2.xlarge\"),\n desiredCapacity: 3,\n});\n\n// Or add customized capacity. Be sure to start the Amazon ECS-optimized AMI.\nconst autoScalingGroup = new autoscaling.AutoScalingGroup(this, 'ASG', {\n vpc,\n instanceType: new ec2.InstanceType('t2.xlarge'),\n machineImage: EcsOptimizedImage.amazonLinux(),\n // Or use Amazon ECS-Optimized Amazon Linux 2 AMI\n // machineImage: EcsOptimizedImage.amazonLinux2(),\n desiredCapacity: 3,\n // ... other options here ...\n});\n\ncluster.addAutoScalingGroup(autoScalingGroup);\n```\n\nIf you omit the property `vpc`, the construct will create a new VPC with two AZs.\n\n\n### Bottlerocket\n\n[Bottlerocket](https://aws.amazon.com/bottlerocket/) is a Linux-based open source operating system that is\npurpose-built by AWS for running containers. You can launch Amazon ECS container instances with the Bottlerocket AMI.\n\nThe following example will create a capacity with self-managed Amazon EC2 capacity of 2 `c5.large` Linux instances running with `Bottlerocket` AMI.\n\nThe following example adds Bottlerocket capacity to the cluster:\n\n```ts\ncluster.addCapacity('bottlerocket-asg', {\n minCapacity: 2,\n instanceType: new ec2.InstanceType('c5.large'),\n machineImage: new ecs.BottleRocketImage(),\n});\n```\n\n### ARM64 (Graviton) Instances\n\nTo launch instances with ARM64 hardware, you can use the Amazon ECS-optimized\nAmazon Linux 2 (arm64) AMI. Based on Amazon Linux 2, this AMI is recommended\nfor use when launching your EC2 instances that are powered by Arm-based AWS\nGraviton Processors.\n\n```ts\ncluster.addCapacity('graviton-cluster', {\n minCapacity: 2,\n instanceType: new ec2.InstanceType('c6g.large'),\n machineImage: ecs.EcsOptimizedImage.amazonLinux2(ecs.AmiHardwareType.ARM),\n});\n```\n\nBottlerocket is also supported:\n\n```ts\ncluster.addCapacity('graviton-cluster', {\n minCapacity: 2,\n instanceType: new ec2.InstanceType('c6g.large'),\n machineImage: ecs.MachineImageType.BOTTLEROCKET,\n});\n```\n\n### Spot Instances\n\nTo add spot instances into the cluster, you must specify the `spotPrice` in the `ecs.AddCapacityOptions` and optionally enable the `spotInstanceDraining` property.\n\n```ts\n// Add an AutoScalingGroup with spot instances to the existing cluster\ncluster.addCapacity('AsgSpot', {\n maxCapacity: 2,\n minCapacity: 2,\n desiredCapacity: 2,\n instanceType: new ec2.InstanceType('c5.xlarge'),\n spotPrice: '0.0735',\n // Enable the Automated Spot Draining support for Amazon ECS\n spotInstanceDraining: true,\n});\n```\n\n### SNS Topic Encryption\n\nWhen the `ecs.AddCapacityOptions` that you provide has a non-zero `taskDrainTime` (the default) then an SNS topic and Lambda are created to ensure that the\ncluster's instances have been properly drained of tasks before terminating. The SNS Topic is sent the instance-terminating lifecycle event from the AutoScalingGroup,\nand the Lambda acts on that event. If you wish to engage [server-side encryption](https://docs.aws.amazon.com/sns/latest/dg/sns-data-encryption.html) for this SNS Topic\nthen you may do so by providing a KMS key for the `topicEncryptionKey` property of `ecs.AddCapacityOptions`.\n\n```ts\n// Given\nconst key = kms.Key(...);\n// Then, use that key to encrypt the lifecycle-event SNS Topic.\ncluster.addCapacity('ASGEncryptedSNS', {\n instanceType: new ec2.InstanceType(\"t2.xlarge\"),\n desiredCapacity: 3,\n topicEncryptionKey: key,\n});\n```\n\n## Task definitions\n\nA task definition describes what a single copy of a **task** should look like.\nA task definition has one or more containers; typically, it has one\nmain container (the *default container* is the first one that's added\nto the task definition, and it is marked *essential*) and optionally\nsome supporting containers which are used to support the main container,\ndoings things like upload logs or metrics to monitoring services.\n\nTo run a task or service with Amazon EC2 launch type, use the `Ec2TaskDefinition`. For AWS Fargate tasks/services, use the\n`FargateTaskDefinition`. For AWS ECS Anywhere use the `ExternalTaskDefinition`. These classes\nprovide simplified APIs that only contain properties relevant for each specific launch type.\n\nFor a `FargateTaskDefinition`, specify the task size (`memoryLimitMiB` and `cpu`):\n\n```ts\nconst fargateTaskDefinition = new ecs.FargateTaskDefinition(this, 'TaskDef', {\n memoryLimitMiB: 512,\n cpu: 256\n});\n```\n\nOn Fargate Platform Version 1.4.0 or later, you may specify up to 200GiB of\n[ephemeral storage](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/fargate-task-storage.html#fargate-task-storage-pv14):\n\n```ts\nconst fargateTaskDefinition = new ecs.FargateTaskDefinition(this, 'TaskDef', {\n memoryLimitMiB: 512,\n cpu: 256,\n ephemeralStorageGiB: 100\n});\n```\n\nTo add containers to a task definition, call `addContainer()`:\n\n```ts\nconst container = fargateTaskDefinition.addContainer(\"WebContainer\", {\n // Use an image from DockerHub\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n // ... other options here ...\n});\n```\n\nFor a `Ec2TaskDefinition`:\n\n```ts\nconst ec2TaskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef', {\n networkMode: NetworkMode.BRIDGE\n});\n\nconst container = ec2TaskDefinition.addContainer(\"WebContainer\", {\n // Use an image from DockerHub\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n memoryLimitMiB: 1024\n // ... other options here ...\n});\n```\n\nFor an `ExternalTaskDefinition`:\n\n```ts\nconst externalTaskDefinition = new ecs.ExternalTaskDefinition(this, 'TaskDef');\n\nconst container = externalTaskDefinition.addContainer(\"WebContainer\", {\n // Use an image from DockerHub\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n memoryLimitMiB: 1024\n // ... other options here ...\n});\n```\n\nYou can specify container properties when you add them to the task definition, or with various methods, e.g.:\n\nTo add a port mapping when adding a container to the task definition, specify the `portMappings` option:\n\n```ts\ntaskDefinition.addContainer(\"WebContainer\", {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n memoryLimitMiB: 1024,\n portMappings: [{ containerPort: 3000 }]\n});\n```\n\nTo add port mappings directly to a container definition, call `addPortMappings()`:\n\n```ts\ncontainer.addPortMappings({\n containerPort: 3000\n});\n```\n\nTo add data volumes to a task definition, call `addVolume()`:\n\n```ts\nconst volume = {\n // Use an Elastic FileSystem\n name: \"mydatavolume\",\n efsVolumeConfiguration: ecs.EfsVolumeConfiguration({\n fileSystemId: \"EFS\"\n // ... other options here ...\n })\n};\n\nconst container = fargateTaskDefinition.addVolume(\"mydatavolume\");\n```\n\n> Note: ECS Anywhere doesn't support volume attachments in the task definition.\n\nTo use a TaskDefinition that can be used with either Amazon EC2 or\nAWS Fargate launch types, use the `TaskDefinition` construct.\n\nWhen creating a task definition you have to specify what kind of\ntasks you intend to run: Amazon EC2, AWS Fargate, or both.\nThe following example uses both:\n\n```ts\nconst taskDefinition = new ecs.TaskDefinition(this, 'TaskDef', {\n memoryMiB: '512',\n cpu: '256',\n networkMode: NetworkMode.AWS_VPC,\n compatibility: ecs.Compatibility.EC2_AND_FARGATE,\n});\n```\n\n### Images\n\nImages supply the software that runs inside the container. Images can be\nobtained from either DockerHub or from ECR repositories, built directly from a local Dockerfile, or use an existing tarball.\n\n- `ecs.ContainerImage.fromRegistry(imageName)`: use a public image.\n- `ecs.ContainerImage.fromRegistry(imageName, { credentials: mySecret })`: use a private image that requires credentials.\n- `ecs.ContainerImage.fromEcrRepository(repo, tag)`: use the given ECR repository as the image\n to start. If no tag is provided, \"latest\" is assumed.\n- `ecs.ContainerImage.fromAsset('./image')`: build and upload an\n image directly from a `Dockerfile` in your source directory.\n- `ecs.ContainerImage.fromDockerImageAsset(asset)`: uses an existing\n `@aws-cdk/aws-ecr-assets.DockerImageAsset` as a container image.\n- `ecs.ContainerImage.fromTarball(file)`: use an existing tarball.\n- `new ecs.TagParameterContainerImage(repository)`: use the given ECR repository as the image\n but a CloudFormation parameter as the tag.\n\n### Environment variables\n\nTo pass environment variables to the container, you can use the `environment`, `environmentFiles`, and `secrets` props.\n\n```ts\ntaskDefinition.addContainer('container', {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n memoryLimitMiB: 1024,\n environment: { // clear text, not for sensitive data\n STAGE: 'prod',\n },\n environmentFiles: [ // list of environment files hosted either on local disk or S3\n ecs.EnvironmentFile.fromAsset('./demo-env-file.env'),\n ecs.EnvironmentFile.fromBucket(s3Bucket, 'assets/demo-env-file.env'),\n ],\n secrets: { // Retrieved from AWS Secrets Manager or AWS Systems Manager Parameter Store at container start-up.\n SECRET: ecs.Secret.fromSecretsManager(secret),\n DB_PASSWORD: ecs.Secret.fromSecretsManager(dbSecret, 'password'), // Reference a specific JSON field, (requires platform version 1.4.0 or later for Fargate tasks)\n PARAMETER: ecs.Secret.fromSsmParameter(parameter),\n }\n});\n```\n\nThe task execution role is automatically granted read permissions on the secrets/parameters. Support for environment\nfiles is restricted to the EC2 launch type for files hosted on S3. Further details provided in the AWS documentation\nabout [specifying environment variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html).\n\n## Service\n\nA `Service` instantiates a `TaskDefinition` on a `Cluster` a given number of\ntimes, optionally associating them with a load balancer.\nIf a task fails,\nAmazon ECS automatically restarts the task.\n\n```ts\nconst taskDefinition;\n\nconst service = new ecs.FargateService(this, 'Service', {\n cluster,\n taskDefinition,\n desiredCount: 5\n});\n```\n\nECS Anywhere service definition looks like:\n\n```ts\nconst taskDefinition;\n\nconst service = new ecs.ExternalService(this, 'Service', {\n cluster,\n taskDefinition,\n desiredCount: 5\n});\n```\n\n`Services` by default will create a security group if not provided.\nIf you'd like to specify which security groups to use you can override the `securityGroups` property.\n\n### Deployment circuit breaker and rollback\n\nAmazon ECS [deployment circuit breaker](https://aws.amazon.com/tw/blogs/containers/announcing-amazon-ecs-deployment-circuit-breaker/)\nautomatically rolls back unhealthy service deployments without the need for manual intervention. Use `circuitBreaker` to enable\ndeployment circuit breaker and optionally enable `rollback` for automatic rollback. See [Using the deployment circuit breaker](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html)\nfor more details.\n\n```ts\nconst service = new ecs.FargateService(stack, 'Service', {\n cluster,\n taskDefinition,\n circuitBreaker: { rollback: true },\n});\n```\n\n> Note: ECS Anywhere doesn't support deployment circuit breakers and rollback.\n\n### Include an application/network load balancer\n\n`Services` are load balancing targets and can be added to a target group, which will be attached to an application/network load balancers:\n\n```ts\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\n\nconst service = new ecs.FargateService(this, 'Service', { /* ... */ });\n\nconst lb = new elbv2.ApplicationLoadBalancer(this, 'LB', { vpc, internetFacing: true });\nconst listener = lb.addListener('Listener', { port: 80 });\nconst targetGroup1 = listener.addTargets('ECS1', {\n port: 80,\n targets: [service]\n});\nconst targetGroup2 = listener.addTargets('ECS2', {\n port: 80,\n targets: [service.loadBalancerTarget({\n containerName: 'MyContainer',\n containerPort: 8080\n })]\n});\n```\n\n> Note: ECS Anywhere doesn't support application/network load balancers.\n\nNote that in the example above, the default `service` only allows you to register the first essential container or the first mapped port on the container as a target and add it to a new target group. To have more control over which container and port to register as targets, you can use `service.loadBalancerTarget()` to return a load balancing target for a specific container and port.\n\nAlternatively, you can also create all load balancer targets to be registered in this service, add them to target groups, and attach target groups to listeners accordingly.\n\n```ts\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\n\nconst service = new ecs.FargateService(this, 'Service', { /* ... */ });\n\nconst lb = new elbv2.ApplicationLoadBalancer(this, 'LB', { vpc, internetFacing: true });\nconst listener = lb.addListener('Listener', { port: 80 });\nservice.registerLoadBalancerTargets(\n {\n containerName: 'web',\n containerPort: 80,\n newTargetGroupId: 'ECS',\n listener: ecs.ListenerConfig.applicationListener(listener, {\n protocol: elbv2.ApplicationProtocol.HTTPS\n }),\n },\n);\n```\n\n### Using a Load Balancer from a different Stack\n\nIf you want to put your Load Balancer and the Service it is load balancing to in\ndifferent stacks, you may not be able to use the convenience methods\n`loadBalancer.addListener()` and `listener.addTargets()`.\n\nThe reason is that these methods will create resources in the same Stack as the\nobject they're called on, which may lead to cyclic references between stacks.\nInstead, you will have to create an `ApplicationListener` in the service stack,\nor an empty `TargetGroup` in the load balancer stack that you attach your\nservice to.\n\nSee the [ecs/cross-stack-load-balancer example](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/ecs/cross-stack-load-balancer/)\nfor the alternatives.\n\n### Include a classic load balancer\n\n`Services` can also be directly attached to a classic load balancer as targets:\n\n```ts\nimport { aws_elasticloadbalancing as elb } from 'aws-cdk-lib';\n\nconst service = new ecs.Ec2Service(this, 'Service', { /* ... */ });\n\nconst lb = new elb.LoadBalancer(stack, 'LB', { vpc });\nlb.addListener({ externalPort: 80 });\nlb.addTarget(service);\n```\n\nSimilarly, if you want to have more control over load balancer targeting:\n\n```ts\nimport { aws_elasticloadbalancing as elb } from 'aws-cdk-lib';\n\nconst service = new ecs.Ec2Service(this, 'Service', { /* ... */ });\n\nconst lb = new elb.LoadBalancer(stack, 'LB', { vpc });\nlb.addListener({ externalPort: 80 });\nlb.addTarget(service.loadBalancerTarget({\n containerName: 'MyContainer',\n containerPort: 80\n}));\n```\n\nThere are two higher-level constructs available which include a load balancer for you that can be found in the aws-ecs-patterns module:\n\n- `LoadBalancedFargateService`\n- `LoadBalancedEc2Service`\n\n## Task Auto-Scaling\n\nYou can configure the task count of a service to match demand. Task auto-scaling is\nconfigured by calling `autoScaleTaskCount()`:\n\n```ts\nconst scaling = service.autoScaleTaskCount({ maxCapacity: 10 });\nscaling.scaleOnCpuUtilization('CpuScaling', {\n targetUtilizationPercent: 50\n});\n\nscaling.scaleOnRequestCount('RequestScaling', {\n requestsPerTarget: 10000,\n targetGroup: target\n})\n```\n\nTask auto-scaling is powered by *Application Auto-Scaling*.\nSee that section for details.\n\n## Integration with CloudWatch Events\n\nTo start an Amazon ECS task on an Amazon EC2-backed Cluster, instantiate an\n`@aws-cdk/aws-events-targets.EcsTask` instead of an `Ec2Service`:\n\n```ts\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\n\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromAsset(path.resolve(__dirname, '..', 'eventhandler-image')),\n memoryLimitMiB: 256,\n logging: new ecs.AwsLogDriver({ streamPrefix: 'EventDemo', mode: AwsLogDriverMode.NON_BLOCKING })\n});\n\n// An Rule that describes the event trigger (in this case a scheduled run)\nconst rule = new events.Rule(this, 'Rule', {\n schedule: events.Schedule.expression('rate(1 min)')\n});\n\n// Pass an environment variable to the container 'TheContainer' in the task\nrule.addTarget(new targets.EcsTask({\n cluster,\n taskDefinition,\n taskCount: 1,\n containerOverrides: [{\n containerName: 'TheContainer',\n environment: [{\n name: 'I_WAS_TRIGGERED',\n value: 'From CloudWatch Events'\n }]\n }]\n}));\n```\n\n## Log Drivers\n\nCurrently Supported Log Drivers:\n\n- awslogs\n- fluentd\n- gelf\n- journald\n- json-file\n- splunk\n- syslog\n- awsfirelens\n\n### awslogs Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.awsLogs({ streamPrefix: 'EventDemo' })\n});\n```\n\n### fluentd Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.fluentd()\n});\n```\n\n### gelf Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.gelf({ address: 'my-gelf-address' })\n});\n```\n\n### journald Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.journald()\n});\n```\n\n### json-file Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.jsonFile()\n});\n```\n\n### splunk Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.splunk({\n secretToken: cdk.SecretValue.secretsManager('my-splunk-token'),\n url: 'my-splunk-url'\n })\n});\n```\n\n### syslog Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.syslog()\n});\n```\n\n### firelens Log Driver\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.firelens({\n options: {\n Name: 'firehose',\n region: 'us-west-2',\n delivery_stream: 'my-stream',\n }\n })\n});\n```\n\nTo pass secrets to the log configuration, use the `secretOptions` property of the log configuration. The task execution role is automatically granted read permissions on the secrets/parameters.\n\n```ts\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: ecs.LogDrivers.firelens({\n options: {\n // ... log driver options here ...\n },\n secretOptions: { // Retrieved from AWS Secrets Manager or AWS Systems Manager Parameter Store\n apikey: ecs.Secret.fromSecretsManager(secret),\n host: ecs.Secret.fromSsmParameter(parameter),\n },\n })\n});\n```\n\n### Generic Log Driver\n\nA generic log driver object exists to provide a lower level abstraction of the log driver configuration.\n\n```ts\n// Create a Task Definition for the container to start\nconst taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef');\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('example-image'),\n memoryLimitMiB: 256,\n logging: new ecs.GenericLogDriver({\n logDriver: 'fluentd',\n options: {\n tag: 'example-tag'\n }\n })\n});\n```\n\n## CloudMap Service Discovery\n\nTo register your ECS service with a CloudMap Service Registry, you may add the\n`cloudMapOptions` property to your service:\n\n```ts\nconst service = new ecs.Ec2Service(stack, 'Service', {\n cluster,\n taskDefinition,\n cloudMapOptions: {\n // Create A records - useful for AWSVPC network mode.\n dnsRecordType: cloudmap.DnsRecordType.A,\n },\n});\n```\n\nWith `bridge` or `host` network modes, only `SRV` DNS record types are supported.\nBy default, `SRV` DNS record types will target the default container and default\nport. However, you may target a different container and port on the same ECS task:\n\n```ts\n// Add a container to the task definition\nconst specificContainer = taskDefinition.addContainer(...);\n\n// Add a port mapping\nspecificContainer.addPortMappings({\n containerPort: 7600,\n protocol: ecs.Protocol.TCP,\n});\n\nnew ecs.Ec2Service(stack, 'Service', {\n cluster,\n taskDefinition,\n cloudMapOptions: {\n // Create SRV records - useful for bridge networking\n dnsRecordType: cloudmap.DnsRecordType.SRV,\n // Targets port TCP port 7600 `specificContainer`\n container: specificContainer,\n containerPort: 7600,\n },\n});\n```\n\n### Associate With a Specific CloudMap Service\n\nYou may associate an ECS service with a specific CloudMap service. To do\nthis, use the service's `associateCloudMapService` method:\n\n```ts\nconst cloudMapService = new cloudmap.Service(...);\nconst ecsService = new ecs.FargateService(...);\n\necsService.associateCloudMapService({\n service: cloudMapService,\n});\n```\n\n## Capacity Providers\n\nThere are two major families of Capacity Providers: [AWS\nFargate](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/fargate-capacity-providers.html)\n(including Fargate Spot) and EC2 [Auto Scaling\nGroup](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/asg-capacity-providers.html)\nCapacity Providers. Both are supported.\n\n### Fargate Capacity Providers\n\nTo enable Fargate capacity providers, you can either set\n`enableFargateCapacityProviders` to `true` when creating your cluster, or by\ninvoking the `enableFargateCapacityProviders()` method after creating your\ncluster. This will add both `FARGATE` and `FARGATE_SPOT` as available capacity\nproviders on your cluster.\n\n```ts\nconst cluster = new ecs.Cluster(stack, 'FargateCPCluster', {\n vpc,\n enableFargateCapacityProviders: true,\n});\n\nconst taskDefinition = new ecs.FargateTaskDefinition(stack, 'TaskDef');\n\ntaskDefinition.addContainer('web', {\n image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'),\n});\n\nnew ecs.FargateService(stack, 'FargateService', {\n cluster,\n taskDefinition,\n capacityProviderStrategies: [\n {\n capacityProvider: 'FARGATE_SPOT',\n weight: 2,\n },\n {\n capacityProvider: 'FARGATE',\n weight: 1,\n }\n ],\n});\n```\n\n### Auto Scaling Group Capacity Providers\n\nTo add an Auto Scaling Group Capacity Provider, first create an EC2 Auto Scaling\nGroup. Then, create an `AsgCapacityProvider` and pass the Auto Scaling Group to\nit in the constructor. Then add the Capacity Provider to the cluster. Finally,\nyou can refer to the Provider by its name in your service's or task's Capacity\nProvider strategy.\n\nBy default, an Auto Scaling Group Capacity Provider will manage the Auto Scaling\nGroup's size for you. It will also enable managed termination protection, in\norder to prevent EC2 Auto Scaling from terminating EC2 instances that have tasks\nrunning on them. If you want to disable this behavior, set both\n`enableManagedScaling` to and `enableManagedTerminationProtection` to `false`.\n\n```ts\nconst cluster = new ecs.Cluster(stack, 'Cluster', {\n vpc,\n});\n\nconst autoScalingGroup = new autoscaling.AutoScalingGroup(stack, 'ASG', {\n vpc,\n instanceType: new ec2.InstanceType('t2.micro'),\n machineImage: ecs.EcsOptimizedImage.amazonLinux2(),\n minCapacity: 0,\n maxCapacity: 100,\n});\n\nconst capacityProvider = new ecs.AsgCapacityProvider(stack, 'AsgCapacityProvider', {\n autoScalingGroup,\n});\ncluster.addAsgCapacityProvider(capacityProvider);\n\nconst taskDefinition = new ecs.Ec2TaskDefinition(stack, 'TaskDef');\n\ntaskDefinition.addContainer('web', {\n image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'),\n memoryReservationMiB: 256,\n});\n\nnew ecs.Ec2Service(stack, 'EC2Service', {\n cluster,\n taskDefinition,\n capacityProviderStrategies: [\n {\n capacityProvider: capacityProvider.capacityProviderName,\n weight: 1,\n }\n ],\n});\n```\n\n## Elastic Inference Accelerators\n\nCurrently, this feature is only supported for services with EC2 launch types.\n\nTo add elastic inference accelerators to your EC2 instance, first add\n`inferenceAccelerators` field to the Ec2TaskDefinition and set the `deviceName`\nand `deviceType` properties.\n\n```ts\nconst inferenceAccelerators = [{\n deviceName: 'device1',\n deviceType: 'eia2.medium',\n}];\n\nconst taskDefinition = new ecs.Ec2TaskDefinition(stack, 'Ec2TaskDef', {\n inferenceAccelerators,\n});\n```\n\nTo enable using the inference accelerators in the containers, add `inferenceAcceleratorResources`\nfield and set it to a list of device names used for the inference accelerators. Each value in the\nlist should match a `DeviceName` for an `InferenceAccelerator` specified in the task definition.\n\n```ts\nconst inferenceAcceleratorResources = ['device1'];\n\ntaskDefinition.addContainer('cont', {\n image: ecs.ContainerImage.fromRegistry('test'),\n memoryLimitMiB: 1024,\n inferenceAcceleratorResources,\n});\n```\n\n## ECS Exec command\n\nPlease note, ECS Exec leverages AWS Systems Manager (SSM). So as a prerequisite for the exec command\nto work, you need to have the SSM plugin for the AWS CLI installed locally. For more information, see\n[Install Session Manager plugin for AWS CLI](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html).\n\nTo enable the ECS Exec feature for your containers, set the boolean flag `enableExecuteCommand` to `true` in\nyour `Ec2Service` or `FargateService`.\n\n```ts\nconst service = new ecs.Ec2Service(stack, 'Service', {\n cluster,\n taskDefinition,\n enableExecuteCommand: true,\n});\n```\n\n### Enabling logging\n\nYou can enable sending logs of your execute session commands to a CloudWatch log group or S3 bucket by configuring\nthe `executeCommandConfiguration` property for your cluster. The default configuration will send the\nlogs to the CloudWatch Logs using the `awslogs` log driver that is configured in your task definition. Please note,\nwhen using your own `logConfiguration` the log group or S3 Bucket specified must already be created.\n\nTo encrypt data using your own KMS Customer Key (CMK), you must create a CMK and provide the key in the `kmsKey` field\nof the `executeCommandConfiguration`. To use this key for encrypting CloudWatch log data or S3 bucket, make sure to associate the key\nto these resources on creation.\n\n```ts\nconst kmsKey = new kms.Key(stack, 'KmsKey');\n\n// Pass the KMS key in the `encryptionKey` field to associate the key to the log group\nconst logGroup = new logs.LogGroup(stack, 'LogGroup', {\n encryptionKey: kmsKey,\n});\n\n// Pass the KMS key in the `encryptionKey` field to associate the key to the S3 bucket\nconst execBucket = new s3.Bucket(stack, 'EcsExecBucket', {\n encryptionKey: kmsKey,\n});\n\nconst cluster = new ecs.Cluster(stack, 'Cluster', {\n vpc,\n executeCommandConfiguration: {\n kmsKey,\n logConfiguration: {\n cloudWatchLogGroup: logGroup,\n cloudWatchEncryptionEnabled: true,\n s3Bucket: execBucket,\n s3EncryptionEnabled: true,\n s3KeyPrefix: 'exec-command-output',\n },\n logging: ecs.ExecuteCommandLogging.OVERRIDE,\n },\n});\n```\n"
|
|
1287
|
-
},
|
|
1288
915
|
"targets": {
|
|
1289
916
|
"dotnet": {
|
|
1290
917
|
"namespace": "Amazon.CDK.AWS.ECS"
|
|
@@ -1298,13 +925,6 @@
|
|
|
1298
925
|
}
|
|
1299
926
|
},
|
|
1300
927
|
"aws-cdk-lib.aws_ecs_patterns": {
|
|
1301
|
-
"locationInModule": {
|
|
1302
|
-
"filename": "lib/index.ts",
|
|
1303
|
-
"line": 69
|
|
1304
|
-
},
|
|
1305
|
-
"readme": {
|
|
1306
|
-
"markdown": "# CDK Construct library for higher-level ECS Constructs\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library provides higher-level Amazon ECS constructs which follow common architectural patterns. It contains:\n\n* Application Load Balanced Services\n* Network Load Balanced Services\n* Queue Processing Services\n* Scheduled Tasks (cron jobs)\n* Additional Examples\n\n## Application Load Balanced Services\n\nTo define an Amazon ECS service that is behind an application load balancer, instantiate one of the following:\n\n* `ApplicationLoadBalancedEc2Service`\n\n```ts\nconst loadBalancedEcsService = new ecsPatterns.ApplicationLoadBalancedEc2Service(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry('test'),\n environment: {\n TEST_ENVIRONMENT_VARIABLE1: \"test environment variable 1 value\",\n TEST_ENVIRONMENT_VARIABLE2: \"test environment variable 2 value\"\n },\n },\n desiredCount: 2,\n});\n```\n\n* `ApplicationLoadBalancedFargateService`\n\n```ts\nconst loadBalancedFargateService = new ecsPatterns.ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n});\n\nloadBalancedFargateService.targetGroup.configureHealthCheck({\n path: \"/custom-health-path\",\n});\n```\n\nInstead of providing a cluster you can specify a VPC and CDK will create a new ECS cluster.\nIf you deploy multiple services CDK will only create one cluster per VPC.\n\nYou can omit `cluster` and `vpc` to let CDK create a new VPC with two AZs and create a cluster inside this VPC.\n\nYou can customize the health check for your target group; otherwise it defaults to `HTTP` over port `80` hitting path `/`.\n\nFargate services will use the `LATEST` platform version by default, but you can override by providing a value for the `platformVersion` property in the constructor.\n\nFargate services use the default VPC Security Group unless one or more are provided using the `securityGroups` property in the constructor.\n\nBy setting `redirectHTTP` to true, CDK will automatically create a listener on port 80 that redirects HTTP traffic to the HTTPS port.\n\nIf you specify the option `recordType` you can decide if you want the construct to use CNAME or Route53-Aliases as record sets.\n\nIf you need to encrypt the traffic between the load balancer and the ECS tasks, you can set the `targetProtocol` to `HTTPS`.\n\nAdditionally, if more than one application target group are needed, instantiate one of the following:\n\n* `ApplicationMultipleTargetGroupsEc2Service`\n\n```ts\n// One application load balancer with one listener and two target groups.\nconst loadBalancedEc2Service = new ApplicationMultipleTargetGroupsEc2Service(stack, 'Service', {\n cluster,\n memoryLimitMiB: 256,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n targetGroups: [\n {\n containerPort: 80,\n },\n {\n containerPort: 90,\n pathPattern: 'a/b/c',\n priority: 10\n }\n ]\n});\n```\n\n* `ApplicationMultipleTargetGroupsFargateService`\n\n```ts\n// One application load balancer with one listener and two target groups.\nconst loadBalancedFargateService = new ApplicationMultipleTargetGroupsFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n targetGroups: [\n {\n containerPort: 80,\n },\n {\n containerPort: 90,\n pathPattern: 'a/b/c',\n priority: 10\n }\n ]\n});\n```\n\n## Network Load Balanced Services\n\nTo define an Amazon ECS service that is behind a network load balancer, instantiate one of the following:\n\n* `NetworkLoadBalancedEc2Service`\n\n```ts\nconst loadBalancedEcsService = new ecsPatterns.NetworkLoadBalancedEc2Service(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry('test'),\n environment: {\n TEST_ENVIRONMENT_VARIABLE1: \"test environment variable 1 value\",\n TEST_ENVIRONMENT_VARIABLE2: \"test environment variable 2 value\"\n },\n },\n desiredCount: 2,\n});\n```\n\n* `NetworkLoadBalancedFargateService`\n\n```ts\nconst loadBalancedFargateService = new ecsPatterns.NetworkLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n});\n```\n\nThe CDK will create a new Amazon ECS cluster if you specify a VPC and omit `cluster`. If you deploy multiple services the CDK will only create one cluster per VPC.\n\nIf `cluster` and `vpc` are omitted, the CDK creates a new VPC with subnets in two Availability Zones and a cluster within this VPC.\n\nIf you specify the option `recordType` you can decide if you want the construct to use CNAME or Route53-Aliases as record sets.\n\nAdditionally, if more than one network target group is needed, instantiate one of the following:\n\n* NetworkMultipleTargetGroupsEc2Service\n\n```ts\n// Two network load balancers, each with their own listener and target group.\nconst loadBalancedEc2Service = new NetworkMultipleTargetGroupsEc2Service(stack, 'Service', {\n cluster,\n memoryLimitMiB: 256,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n loadBalancers: [\n {\n name: 'lb1',\n listeners: [\n {\n name: 'listener1'\n }\n ]\n },\n {\n name: 'lb2',\n listeners: [\n {\n name: 'listener2'\n }\n ]\n }\n ],\n targetGroups: [\n {\n containerPort: 80,\n listener: 'listener1'\n },\n {\n containerPort: 90,\n listener: 'listener2'\n }\n ]\n});\n```\n\n* NetworkMultipleTargetGroupsFargateService\n\n```ts\n// Two network load balancers, each with their own listener and target group.\nconst loadBalancedFargateService = new NetworkMultipleTargetGroupsFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n loadBalancers: [\n {\n name: 'lb1',\n listeners: [\n {\n name: 'listener1'\n }\n ]\n },\n {\n name: 'lb2',\n listeners: [\n {\n name: 'listener2'\n }\n ]\n }\n ],\n targetGroups: [\n {\n containerPort: 80,\n listener: 'listener1'\n },\n {\n containerPort: 90,\n listener: 'listener2'\n }\n ]\n});\n```\n\n## Queue Processing Services\n\nTo define a service that creates a queue and reads from that queue, instantiate one of the following:\n\n* `QueueProcessingEc2Service`\n\n```ts\nconst queueProcessingEc2Service = new QueueProcessingEc2Service(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n image: ecs.ContainerImage.fromRegistry('test'),\n command: [\"-c\", \"4\", \"amazon.com\"],\n enableLogging: false,\n desiredTaskCount: 2,\n environment: {\n TEST_ENVIRONMENT_VARIABLE1: \"test environment variable 1 value\",\n TEST_ENVIRONMENT_VARIABLE2: \"test environment variable 2 value\"\n },\n queue,\n maxScalingCapacity: 5,\n containerName: 'test',\n});\n```\n\n* `QueueProcessingFargateService`\n\n```ts\nconst queueProcessingFargateService = new QueueProcessingFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 512,\n image: ecs.ContainerImage.fromRegistry('test'),\n command: [\"-c\", \"4\", \"amazon.com\"],\n enableLogging: false,\n desiredTaskCount: 2,\n environment: {\n TEST_ENVIRONMENT_VARIABLE1: \"test environment variable 1 value\",\n TEST_ENVIRONMENT_VARIABLE2: \"test environment variable 2 value\"\n },\n queue,\n maxScalingCapacity: 5,\n containerName: 'test',\n});\n```\n\nwhen queue not provided by user, CDK will create a primary queue and a dead letter queue with default redrive policy and attach permission to the task to be able to access the primary queue.\n\n## Scheduled Tasks\n\nTo define a task that runs periodically, there are 2 options:\n\n* `ScheduledEc2Task`\n\n```ts\n// Instantiate an Amazon EC2 Task to run at a scheduled interval\nconst ecsScheduledTask = new ScheduledEc2Task(stack, 'ScheduledTask', {\n cluster,\n scheduledEc2TaskImageOptions: {\n image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'),\n memoryLimitMiB: 256,\n environment: { name: 'TRIGGER', value: 'CloudWatch Events' },\n },\n schedule: events.Schedule.expression('rate(1 minute)'),\n enabled: true,\n ruleName: 'sample-scheduled-task-rule'\n});\n```\n\n* `ScheduledFargateTask`\n\n```ts\nconst scheduledFargateTask = new ScheduledFargateTask(stack, 'ScheduledFargateTask', {\n cluster,\n scheduledFargateTaskImageOptions: {\n image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'),\n memoryLimitMiB: 512,\n },\n schedule: events.Schedule.expression('rate(1 minute)'),\n platformVersion: ecs.FargatePlatformVersion.LATEST,\n});\n```\n\n## Additional Examples\n\nIn addition to using the constructs, users can also add logic to customize these constructs:\n\n### Configure HTTPS on an ApplicationLoadBalancedFargateService\n\n```ts\nimport { ApplicationLoadBalancedFargateService } from './application-load-balanced-fargate-service';\nimport { HostedZone } from 'aws-cdk-lib/aws-route53';\nimport { Certificate } from 'aws-cdk-lib/aws-certificatemanager';\nimport { SslPolicy } from 'aws-cdk-lib/aws-elasticloadbalancingv2';\n\nconst domainZone = HostedZone.fromLookup(this, 'Zone', { domainName: 'example.com' });\nconst certificate = Certificate.fromCertificateArn(this, 'Cert', 'arn:aws:acm:us-east-1:123456:certificate/abcdefg');\n\nconst loadBalancedFargateService = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n vpc\n cluster,\n certificate,\n sslPolicy: SslPolicy.RECOMMENDED,\n domainName: 'api.example.com',\n domainZone,\n redirectHTTP: true,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n});\n```\n\n### Add Schedule-Based Auto-Scaling to an ApplicationLoadBalancedFargateService\n\n```ts\nimport { Schedule } from 'aws-cdk-lib/aws-applicationautoscaling';\nimport { ApplicationLoadBalancedFargateService, ApplicationLoadBalancedFargateServiceProps } from './application-load-balanced-fargate-service';\n\nconst loadBalancedFargateService = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n desiredCount: 1,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n});\n\nconst scalableTarget = loadBalancedFargateService.service.autoScaleTaskCount({\n minCapacity: 5,\n maxCapacity: 20,\n});\n\nscalableTarget.scaleOnSchedule('DaytimeScaleDown', {\n schedule: Schedule.cron({ hour: '8', minute: '0'}),\n minCapacity: 1,\n});\n\nscalableTarget.scaleOnSchedule('EveningRushScaleUp', {\n schedule: Schedule.cron({ hour: '20', minute: '0'}),\n minCapacity: 10,\n});\n```\n\n### Add Metric-Based Auto-Scaling to an ApplicationLoadBalancedFargateService\n\n```ts\nimport { ApplicationLoadBalancedFargateService } from './application-load-balanced-fargate-service';\n\nconst loadBalancedFargateService = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n desiredCount: 1,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n});\n\nconst scalableTarget = loadBalancedFargateService.service.autoScaleTaskCount({\n minCapacity: 1,\n maxCapacity: 20,\n});\n\nscalableTarget.scaleOnCpuUtilization('CpuScaling', {\n targetUtilizationPercent: 50,\n});\n\nscalableTarget.scaleOnMemoryUtilization('MemoryScaling', {\n targetUtilizationPercent: 50,\n});\n```\n\n### Change the default Deployment Controller\n\n```ts\nimport { ApplicationLoadBalancedFargateService } from './application-load-balanced-fargate-service';\n\nconst loadBalancedFargateService = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n desiredCount: 1,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n deploymentController: {\n type: ecs.DeploymentControllerType.CODE_DEPLOY,\n },\n});\n```\n\n### Deployment circuit breaker and rollback\n\nAmazon ECS [deployment circuit breaker](https://aws.amazon.com/tw/blogs/containers/announcing-amazon-ecs-deployment-circuit-breaker/)\nautomatically rolls back unhealthy service deployments without the need for manual intervention. Use `circuitBreaker` to enable\ndeployment circuit breaker and optionally enable `rollback` for automatic rollback. See [Using the deployment circuit breaker](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html)\nfor more details.\n\n```ts\nconst service = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n desiredCount: 1,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n circuitBreaker: { rollback: true },\n});\n```\n\n### Set deployment configuration on QueueProcessingService\n\n```ts\nconst queueProcessingFargateService = new QueueProcessingFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 512,\n image: ecs.ContainerImage.fromRegistry('test'),\n command: [\"-c\", \"4\", \"amazon.com\"],\n enableLogging: false,\n desiredTaskCount: 2,\n environment: {},\n queue,\n maxScalingCapacity: 5,\n maxHealthyPercent: 200,\n minHealthPercent: 66,\n});\n```\n\n### Set taskSubnets and securityGroups for QueueProcessingFargateService\n\n```ts\nconst queueProcessingFargateService = new QueueProcessingFargateService(stack, 'Service', {\n vpc,\n memoryLimitMiB: 512,\n image: ecs.ContainerImage.fromRegistry('test'),\n securityGroups: [securityGroup],\n taskSubnets: { subnetType: ec2.SubnetType.ISOLATED },\n});\n```\n\n### Define tasks with public IPs for QueueProcessingFargateService\n\n```ts\nconst queueProcessingFargateService = new QueueProcessingFargateService(stack, 'Service', {\n vpc,\n memoryLimitMiB: 512,\n image: ecs.ContainerImage.fromRegistry('test'),\n assignPublicIp: true,\n});\n```\n\n### Define tasks with custom queue parameters for QueueProcessingFargateService\n\n```ts\nconst queueProcessingFargateService = new QueueProcessingFargateService(stack, 'Service', {\n vpc,\n memoryLimitMiB: 512,\n image: ecs.ContainerImage.fromRegistry('test'),\n maxReceiveCount: 42,\n retentionPeriod: cdk.Duration.days(7),\n visibilityTimeout: cdk.Duration.minutes(5),\n});\n```\n\n### Set capacityProviderStrategies for QueueProcessingFargateService\n\n```ts\nconst vpc = new ec2.Vpc(stack, 'Vpc', { maxAzs: 1 });\nconst cluster = new ecs.Cluster(stack, 'EcsCluster', { vpc });\ncluster.enableFargateCapacityProviders();\n\nconst queueProcessingFargateService = new QueueProcessingFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 512,\n image: ecs.ContainerImage.fromRegistry('test'),\n capacityProviderStrategies: [\n {\n capacityProvider: 'FARGATE_SPOT',\n weight: 2,\n },\n {\n capacityProvider: 'FARGATE',\n weight: 1,\n },\n ],\n});\n```\n\n### Set capacityProviderStrategies for QueueProcessingEc2Service\n\n```ts\nconst vpc = new ec2.Vpc(stack, 'Vpc', { maxAzs: 1 });\nconst cluster = new ecs.Cluster(stack, 'EcsCluster', { vpc });\nconst autoScalingGroup = new autoscaling.AutoScalingGroup(stack, 'asg', {\n vpc,\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO),\n machineImage: ecs.EcsOptimizedImage.amazonLinux2(),\n});\nconst capacityProvider = new ecs.AsgCapacityProvider(stack, 'provider', {\n autoScalingGroup,\n});\ncluster.addAsgCapacityProvider(capacityProvider);\n\nconst queueProcessingFargateService = new QueueProcessingFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 512,\n image: ecs.ContainerImage.fromRegistry('test'),\n capacityProviderStrategies: [\n {\n capacityProvider: capacityProvider.capacityProviderName,\n },\n ],\n});\n```\n\n### Select specific vpc subnets for ApplicationLoadBalancedFargateService\n\n```ts\nconst loadBalancedFargateService = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n desiredCount: 1,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n vpcSubnets: {\n subnets: [ec2.Subnet.fromSubnetId(stack, 'subnet', 'VpcISOLATEDSubnet1Subnet80F07FA0')],\n },\n});\n```\n\n### Set PlatformVersion for ScheduledFargateTask\n\n```ts\nconst scheduledFargateTask = new ScheduledFargateTask(stack, 'ScheduledFargateTask', {\n cluster,\n scheduledFargateTaskImageOptions: {\n image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'),\n memoryLimitMiB: 512,\n },\n schedule: events.Schedule.expression('rate(1 minute)'),\n platformVersion: ecs.FargatePlatformVersion.VERSION1_4,\n});\n```\n\n### Set SecurityGroups for ScheduledFargateTask\n\n```ts\nconst stack = new cdk.Stack();\nconst vpc = new ec2.Vpc(stack, 'Vpc', { maxAzs: 1 });\nconst cluster = new ecs.Cluster(stack, 'EcsCluster', { vpc });\nconst securityGroup = new ec2.SecurityGroup(stack, 'SG', { vpc });\n\nconst scheduledFargateTask = new ScheduledFargateTask(stack, 'ScheduledFargateTask', {\n cluster,\n scheduledFargateTaskImageOptions: {\n image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'),\n memoryLimitMiB: 512,\n },\n schedule: events.Schedule.expression('rate(1 minute)'),\n securityGroups: [securityGroup],\n});\n```\n\n### Use the REMOVE_DEFAULT_DESIRED_COUNT feature flag\n\nThe REMOVE_DEFAULT_DESIRED_COUNT feature flag is used to override the default desiredCount that is autogenerated by the CDK. This will set the desiredCount of any service created by any of the following constructs to be undefined.\n\n* ApplicationLoadBalancedEc2Service\n* ApplicationLoadBalancedFargateService\n* NetworkLoadBalancedEc2Service\n* NetworkLoadBalancedFargateService\n* QueueProcessingEc2Service\n* QueueProcessingFargateService\n\nIf a desiredCount is not passed in as input to the above constructs, CloudFormation will either create a new service to start up with a desiredCount of 1, or update an existing service to start up with the same desiredCount as prior to the update.\n\nTo enable the feature flag, ensure that the REMOVE_DEFAULT_DESIRED_COUNT flag within an application stack context is set to true, like so:\n\n```ts\nstack.node.setContext(cxapi.ECS_REMOVE_DEFAULT_DESIRED_COUNT, true);\n```\n\nThe following is an example of an application with the REMOVE_DEFAULT_DESIRED_COUNT feature flag enabled:\n\n```ts\nconst app = new App();\n\nconst stack = new Stack(app, 'aws-ecs-patterns-queue');\nstack.node.setContext(cxapi.ECS_REMOVE_DEFAULT_DESIRED_COUNT, true);\n\nconst vpc = new ec2.Vpc(stack, 'VPC', {\n maxAzs: 2,\n});\n\nnew QueueProcessingFargateService(stack, 'QueueProcessingService', {\n vpc,\n memoryLimitMiB: 512,\n image: new ecs.AssetImage(path.join(__dirname, '..', 'sqs-reader')),\n});\n```\n\n### Deploy application and metrics sidecar\n\nThe following is an example of deploying an application along with a metrics sidecar container that utilizes `dockerLabels` for discovery:\n\n```ts\nconst service = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n vpc,\n desiredCount: 1,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n dockerLabels: {\n 'application.label.one': 'first_label'\n 'application.label.two': 'second_label'\n }\n});\n\nservice.taskDefinition.addContainer('Sidecar', {\n image: ContainerImage.fromRegistry('example/metrics-sidecar')\n}\n```\n\n### Select specific load balancer name ApplicationLoadBalancedFargateService\n\n```ts\nconst loadBalancedFargateService = new ApplicationLoadBalancedFargateService(stack, 'Service', {\n cluster,\n memoryLimitMiB: 1024,\n desiredCount: 1,\n cpu: 512,\n taskImageOptions: {\n image: ecs.ContainerImage.fromRegistry(\"amazon/amazon-ecs-sample\"),\n },\n vpcSubnets: {\n subnets: [ec2.Subnet.fromSubnetId(stack, 'subnet', 'VpcISOLATEDSubnet1Subnet80F07FA0')],\n },\n loadBalancerName: 'application-lb-name',\n});\n```\n"
|
|
1307
|
-
},
|
|
1308
928
|
"targets": {
|
|
1309
929
|
"dotnet": {
|
|
1310
930
|
"namespace": "Amazon.CDK.AWS.ECS.Patterns"
|
|
@@ -1318,13 +938,6 @@
|
|
|
1318
938
|
}
|
|
1319
939
|
},
|
|
1320
940
|
"aws-cdk-lib.aws_efs": {
|
|
1321
|
-
"locationInModule": {
|
|
1322
|
-
"filename": "lib/index.ts",
|
|
1323
|
-
"line": 70
|
|
1324
|
-
},
|
|
1325
|
-
"readme": {
|
|
1326
|
-
"markdown": "# Amazon Elastic File System Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n[Amazon Elastic File System](https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html) (Amazon EFS) provides a simple, scalable,\nfully managed elastic NFS file system for use with AWS Cloud services and on-premises resources.\nAmazon EFS provides file storage in the AWS Cloud. With Amazon EFS, you can create a file system,\nmount the file system on an Amazon EC2 instance, and then read and write data to and from your file system.\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## File Systems\n\nAmazon EFS provides elastic, shared file storage that is POSIX-compliant. The file system you create\nsupports concurrent read and write access from multiple Amazon EC2 instances and is accessible from\nall of the Availability Zones in the AWS Region where it is created. Learn more about [EFS file systems](https://docs.aws.amazon.com/efs/latest/ug/creating-using.html)\n\n### Create an Amazon EFS file system\n\nA Virtual Private Cloud (VPC) is required to create an Amazon EFS file system.\nThe following example creates a file system that is encrypted at rest, running in `General Purpose`\nperformance mode, and `Bursting` throughput mode and does not transition files to the Infrequent\nAccess (IA) storage class.\n\n```ts\nconst fileSystem = new efs.FileSystem(this, 'MyEfsFileSystem', {\n vpc: new ec2.Vpc(this, 'VPC'),\n lifecyclePolicy: efs.LifecyclePolicy.AFTER_14_DAYS, // files are not transitioned to infrequent access (IA) storage by default\n performanceMode: efs.PerformanceMode.GENERAL_PURPOSE, // default\n});\n```\n\n⚠️ An Amazon EFS file system's performance mode can't be changed after the file system has been created.\nUpdating this property will replace the file system.\n\nAny file system that has been created outside the stack can be imported into your CDK app.\n\nUse the `fromFileSystemAttributes()` API to import an existing file system.\nHere is an example of giving a role write permissions on a file system.\n\n```ts\nimport { aws_iam as iam } from 'aws-cdk-lib';\n\nconst importedFileSystem = efs.FileSystem.fromFileSystemAttributes(this, 'existingFS', {\n fileSystemId: 'fs-12345678', // You can also use fileSystemArn instead of fileSystemId.\n securityGroup: ec2.SecurityGroup.fromSecurityGroupId(this, 'SG', 'sg-123456789', {\n allowAllOutbound: false,\n }),\n});\n```\n\n### Permissions\n\nIf you need to grant file system permissions to another resource, you can use the `.grant()` API.\nAs an example, the following code gives `elasticfilesystem:ClientWrite` permissions to an IAM role.\n\n```ts fixture=with-filesystem-instance\nconst role = new iam.Role(this, 'Role', {\n assumedBy: new iam.AnyPrincipal(),\n});\n\nfileSystem.grant(role, 'elasticfilesystem:ClientWrite');\n```\n\n### Access Point\n\nAn access point is an application-specific view into an EFS file system that applies an operating\nsystem user and group, and a file system path, to any file system request made through the access\npoint. The operating system user and group override any identity information provided by the NFS\nclient. The file system path is exposed as the access point's root directory. Applications using\nthe access point can only access data in its own directory and below. To learn more, see [Mounting a File System Using EFS Access Points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html).\n\nUse the `addAccessPoint` API to create an access point from a fileSystem.\n\n```ts fixture=with-filesystem-instance\nfileSystem.addAccessPoint('AccessPoint');\n```\n\nBy default, when you create an access point, the root(`/`) directory is exposed to the client\nconnecting to the access point. You can specify a custom path with the `path` property.\n\nIf `path` does not exist, it will be created with the settings defined in the `creationInfo`.\nSee [Creating Access Points](https://docs.aws.amazon.com/efs/latest/ug/create-access-point.html) for more details.\n\nAny access point that has been created outside the stack can be imported into your CDK app.\n\nUse the `fromAccessPointAttributes()` API to import an existing access point.\n\n```ts\nefs.AccessPoint.fromAccessPointAttributes(this, 'ap', {\n accessPointId: 'fsap-1293c4d9832fo0912',\n fileSystem: efs.FileSystem.fromFileSystemAttributes(this, 'efs', {\n fileSystemId: 'fs-099d3e2f',\n securityGroup: ec2.SecurityGroup.fromSecurityGroupId(this, 'sg', 'sg-51530134'),\n }),\n});\n```\n\n⚠️ Notice: When importing an Access Point using `fromAccessPointAttributes()`, you must make sure\nthe mount targets are deployed and their lifecycle state is `available`. Otherwise, you may encounter\nthe following error when deploying:\n> EFS file system <ARN of efs> referenced by access point <ARN of access point of EFS> has\n> mount targets created in all availability zones the function will execute in, but not all\n> are in the available life cycle state yet. Please wait for them to become available and\n> try the request again.\n\n### Connecting\n\nTo control who can access the EFS, use the `.connections` attribute. EFS has\na fixed default port, so you don't need to specify the port:\n\n```ts fixture=with-filesystem-instance\nfileSystem.connections.allowDefaultPortFrom(instance);\n```\n\nLearn more about [managing file system network accessibility](https://docs.aws.amazon.com/efs/latest/ug/manage-fs-access.html)\n\n### Mounting the file system using User Data\n\nAfter you create a file system, you can create mount targets. Then you can mount the file system on\nEC2 instances, containers, and Lambda functions in your virtual private cloud (VPC).\n\nThe following example automatically mounts a file system during instance launch.\n\n```ts fixture=with-filesystem-instance\nfileSystem.connections.allowDefaultPortFrom(instance);\n\ninstance.userData.addCommands(\"yum check-update -y\", // Ubuntu: apt-get -y update\n \"yum upgrade -y\", // Ubuntu: apt-get -y upgrade\n \"yum install -y amazon-efs-utils\", // Ubuntu: apt-get -y install amazon-efs-utils\n \"yum install -y nfs-utils\", // Ubuntu: apt-get -y install nfs-common\n \"file_system_id_1=\" + fileSystem.fileSystemId,\n \"efs_mount_point_1=/mnt/efs/fs1\",\n \"mkdir -p \\\"${efs_mount_point_1}\\\"\",\n \"test -f \\\"/sbin/mount.efs\\\" && echo \\\"${file_system_id_1}:/ ${efs_mount_point_1} efs defaults,_netdev\\\" >> /etc/fstab || \" +\n \"echo \\\"${file_system_id_1}.efs.\" + Stack.of(this).region + \".amazonaws.com:/ ${efs_mount_point_1} nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev 0 0\\\" >> /etc/fstab\",\n \"mount -a -t efs,nfs4 defaults\");\n```\n\nLearn more about [mounting EFS file systems](https://docs.aws.amazon.com/efs/latest/ug/mounting-fs.html)\n\n### Deleting\n\nSince file systems are stateful resources, by default the file system will not be deleted when your\nstack is deleted.\n\nYou can configure the file system to be destroyed on stack deletion by setting a `removalPolicy`\n\n```ts\nconst fileSystem = new efs.FileSystem(this, 'EfsFileSystem', {\n vpc: new ec2.Vpc(this, 'VPC'),\n removalPolicy: RemovalPolicy.DESTROY\n});\n```\n"
|
|
1327
|
-
},
|
|
1328
941
|
"targets": {
|
|
1329
942
|
"dotnet": {
|
|
1330
943
|
"namespace": "Amazon.CDK.AWS.EFS"
|
|
@@ -1338,13 +951,6 @@
|
|
|
1338
951
|
}
|
|
1339
952
|
},
|
|
1340
953
|
"aws-cdk-lib.aws_eks": {
|
|
1341
|
-
"locationInModule": {
|
|
1342
|
-
"filename": "lib/index.ts",
|
|
1343
|
-
"line": 71
|
|
1344
|
-
},
|
|
1345
|
-
"readme": {
|
|
1346
|
-
"markdown": "# Amazon EKS Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis construct library allows you to define [Amazon Elastic Container Service for Kubernetes (EKS)](https://aws.amazon.com/eks/) clusters.\nIn addition, the library also supports defining Kubernetes resource manifests within EKS clusters.\n\n## Table Of Contents\n\n* [Quick Start](#quick-start)\n* [API Reference](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-eks-readme.html)\n* [Architectural Overview](#architectural-overview)\n* [Provisioning clusters](#provisioning-clusters)\n * [Managed node groups](#managed-node-groups)\n * [Fargate Profiles](#fargate-profiles)\n * [Self-managed nodes](#self-managed-nodes)\n * [Endpoint Access](#endpoint-access)\n * [VPC Support](#vpc-support)\n * [Kubectl Support](#kubectl-support)\n * [ARM64 Support](#arm64-support)\n * [Masters Role](#masters-role)\n * [Encryption](#encryption)\n* [Permissions and Security](#permissions-and-security)\n* [Applying Kubernetes Resources](#applying-kubernetes-resources)\n * [Kubernetes Manifests](#kubernetes-manifests)\n * [Helm Charts](#helm-charts)\n * [CDK8s Charts](#cdk8s-charts)\n* [Patching Kubernetes Resources](#patching-kubernetes-resources)\n* [Querying Kubernetes Resources](#querying-kubernetes-resources)\n* [Using existing clusters](#using-existing-clusters)\n* [Known Issues and Limitations](#known-issues-and-limitations)\n\n## Quick Start\n\nThis example defines an Amazon EKS cluster with the following configuration:\n\n* Dedicated VPC with default configuration (Implicitly created using [ec2.Vpc](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ec2-readme.html#vpc))\n* A Kubernetes pod with a container based on the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) image.\n\n```ts\n// provisiong a cluster\nconst cluster = new eks.Cluster(this, 'hello-eks', {\n version: eks.KubernetesVersion.V1_21,\n});\n\n// apply a kubernetes manifest to the cluster\ncluster.addManifest('mypod', {\n apiVersion: 'v1',\n kind: 'Pod',\n metadata: { name: 'mypod' },\n spec: {\n containers: [\n {\n name: 'hello',\n image: 'paulbouwer/hello-kubernetes:1.5',\n ports: [ { containerPort: 8080 } ]\n }\n ]\n }\n});\n```\n\nIn order to interact with your cluster through `kubectl`, you can use the `aws eks update-kubeconfig` [AWS CLI command](https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html)\nto configure your local kubeconfig. The EKS module will define a CloudFormation output in your stack which contains the command to run. For example:\n\n```plaintext\nOutputs:\nClusterConfigCommand43AAE40F = aws eks update-kubeconfig --name cluster-xxxxx --role-arn arn:aws:iam::112233445566:role/yyyyy\n```\n\nExecute the `aws eks update-kubeconfig ...` command in your terminal to create or update a local kubeconfig context:\n\n```console\n$ aws eks update-kubeconfig --name cluster-xxxxx --role-arn arn:aws:iam::112233445566:role/yyyyy\nAdded new context arn:aws:eks:rrrrr:112233445566:cluster/cluster-xxxxx to /home/boom/.kube/config\n```\n\nAnd now you can simply use `kubectl`:\n\n```console\n$ kubectl get all -n kube-system\nNAME READY STATUS RESTARTS AGE\npod/aws-node-fpmwv 1/1 Running 0 21m\npod/aws-node-m9htf 1/1 Running 0 21m\npod/coredns-5cb4fb54c7-q222j 1/1 Running 0 23m\npod/coredns-5cb4fb54c7-v9nxx 1/1 Running 0 23m\n...\n```\n\n## Architectural Overview\n\nThe following is a qualitative diagram of the various possible components involved in the cluster deployment.\n\n```text\n +-----------------------------------------------+ +-----------------+\n | EKS Cluster | kubectl | |\n |-----------------------------------------------|<-------------+| Kubectl Handler |\n | | | |\n | | +-----------------+\n | +--------------------+ +-----------------+ |\n | | | | | |\n | | Managed Node Group | | Fargate Profile | | +-----------------+\n | | | | | | | |\n | +--------------------+ +-----------------+ | | Cluster Handler |\n | | | |\n +-----------------------------------------------+ +-----------------+\n ^ ^ +\n | | |\n | connect self managed capacity | | aws-sdk\n | | create/update/delete |\n + | v\n +--------------------+ + +-------------------+\n | | --------------+| eks.amazonaws.com |\n | Auto Scaling Group | +-------------------+\n | |\n +--------------------+\n```\n\nIn a nutshell:\n\n* `EKS Cluster` - The cluster endpoint created by EKS.\n* `Managed Node Group` - EC2 worker nodes managed by EKS.\n* `Fargate Profile` - Fargate worker nodes managed by EKS.\n* `Auto Scaling Group` - EC2 worker nodes managed by the user.\n* `KubectlHandler` - Lambda function for invoking `kubectl` commands on the cluster - created by CDK.\n* `ClusterHandler` - Lambda function for interacting with EKS API to manage the cluster lifecycle - created by CDK.\n\nA more detailed breakdown of each is provided further down this README.\n\n## Provisioning clusters\n\nCreating a new cluster is done using the `Cluster` or `FargateCluster` constructs. The only required property is the kubernetes `version`.\n\n```ts\nnew eks.Cluster(this, 'HelloEKS', {\n version: eks.KubernetesVersion.V1_21,\n});\n```\n\nYou can also use `FargateCluster` to provision a cluster that uses only fargate workers.\n\n```ts\nnew eks.FargateCluster(this, 'HelloEKS', {\n version: eks.KubernetesVersion.V1_21,\n});\n```\n\n> **NOTE: Only 1 cluster per stack is supported.** If you have a use-case for multiple clusters per stack, or would like to understand more about this limitation, see <https://github.com/aws/aws-cdk/issues/10073>.\n\nBelow you'll find a few important cluster configuration options. First of which is Capacity.\nCapacity is the amount and the type of worker nodes that are available to the cluster for deploying resources. Amazon EKS offers 3 ways of configuring capacity, which you can combine as you like:\n\n### Managed node groups\n\nAmazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters.\nWith Amazon EKS managed node groups, you don’t need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available.\n\n> For more details visit [Amazon EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).\n\n**Managed Node Groups are the recommended way to allocate cluster capacity.**\n\nBy default, this library will allocate a managed node group with 2 *m5.large* instances (this instance type suits most common use-cases, and is good value for money).\n\nAt cluster instantiation time, you can customize the number of instances and their type:\n\n```ts\nnew eks.Cluster(this, 'HelloEKS', {\n version: eks.KubernetesVersion.V1_21,\n defaultCapacity: 5,\n defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL),\n});\n```\n\nTo access the node group that was created on your behalf, you can use `cluster.defaultNodegroup`.\n\nAdditional customizations are available post instantiation. To apply them, set the default capacity to 0, and use the `cluster.addNodegroupCapacity` method:\n\n```ts\nconst cluster = new eks.Cluster(this, 'HelloEKS', {\n version: eks.KubernetesVersion.V1_21,\n defaultCapacity: 0,\n});\n\ncluster.addNodegroupCapacity('custom-node-group', {\n instanceTypes: [new ec2.InstanceType('m5.large')],\n minSize: 4,\n diskSize: 100,\n amiType: eks.NodegroupAmiType.AL2_X86_64_GPU,\n ...\n});\n```\n\nTo set node taints, you can set `taints` option.\n\n```ts\ncluster.addNodegroupCapacity('custom-node-group', {\n instanceTypes: [new ec2.InstanceType('m5.large')],\n taints: [\n {\n effect: TaintEffect.NO_SCHEDULE,\n key: 'foo',\n value: 'bar',\n }\n ]\n ...\n});\n```\n\n#### Spot Instances Support\n\nUse `capacityType` to create managed node groups comprised of spot instances. To maximize the availability of your applications while using\nSpot Instances, we recommend that you configure a Spot managed node group to use multiple instance types with the `instanceTypes` property.\n\n> For more details visit [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types).\n\n\n```ts\ncluster.addNodegroupCapacity('extra-ng-spot', {\n instanceTypes: [\n new ec2.InstanceType('c5.large'),\n new ec2.InstanceType('c5a.large'),\n new ec2.InstanceType('c5d.large'),\n ],\n minSize: 3,\n capacityType: eks.CapacityType.SPOT,\n});\n\n```\n\n#### Launch Template Support\n\nYou can specify a launch template that the node group will use. For example, this can be useful if you want to use\na custom AMI or add custom user data.\n\nWhen supplying a custom user data script, it must be encoded in the MIME multi-part archive format, since Amazon EKS merges with its own user data. Visit the [Launch Template Docs](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)\nfor mode details.\n\n```ts\nconst userData = `MIME-Version: 1.0\nContent-Type: multipart/mixed; boundary=\"==MYBOUNDARY==\"\n\n--==MYBOUNDARY==\nContent-Type: text/x-shellscript; charset=\"us-ascii\"\n\n#!/bin/bash\necho \"Running custom user data script\"\n\n--==MYBOUNDARY==--\\\\\n`;\nconst lt = new ec2.CfnLaunchTemplate(this, 'LaunchTemplate', {\n launchTemplateData: {\n instanceType: 't3.small',\n userData: Fn.base64(userData),\n },\n});\ncluster.addNodegroupCapacity('extra-ng', {\n launchTemplateSpec: {\n id: lt.ref,\n version: lt.attrLatestVersionNumber,\n },\n});\n\n```\n\nNote that when using a custom AMI, Amazon EKS doesn't merge any user data. Which means you do not need the multi-part encoding. and are responsible for supplying the required bootstrap commands for nodes to join the cluster.\nIn the following example, `/ect/eks/bootstrap.sh` from the AMI will be used to bootstrap the node.\n\n```ts\nconst userData = ec2.UserData.forLinux();\nuserData.addCommands(\n 'set -o xtrace',\n `/etc/eks/bootstrap.sh ${cluster.clusterName}`,\n);\nconst lt = new ec2.CfnLaunchTemplate(this, 'LaunchTemplate', {\n launchTemplateData: {\n imageId: 'some-ami-id', // custom AMI\n instanceType: 't3.small',\n userData: Fn.base64(userData.render()),\n },\n});\ncluster.addNodegroupCapacity('extra-ng', {\n launchTemplateSpec: {\n id: lt.ref,\n version: lt.attrLatestVersionNumber,\n },\n});\n```\n\nYou may specify one `instanceType` in the launch template or multiple `instanceTypes` in the node group, **but not both**.\n\n> For more details visit [Launch Template Support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).\n\nGraviton 2 instance types are supported including `c6g`, `m6g`, `r6g` and `t4g`.\n\n### Fargate profiles\n\nAWS Fargate is a technology that provides on-demand, right-sized compute\ncapacity for containers. With AWS Fargate, you no longer have to provision,\nconfigure, or scale groups of virtual machines to run containers. This removes\nthe need to choose server types, decide when to scale your node groups, or\noptimize cluster packing.\n\nYou can control which pods start on Fargate and how they run with Fargate\nProfiles, which are defined as part of your Amazon EKS cluster.\n\nSee [Fargate Considerations](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html#fargate-considerations) in the AWS EKS User Guide.\n\nYou can add Fargate Profiles to any EKS cluster defined in your CDK app\nthrough the `addFargateProfile()` method. The following example adds a profile\nthat will match all pods from the \"default\" namespace:\n\n```ts\ncluster.addFargateProfile('MyProfile', {\n selectors: [ { namespace: 'default' } ]\n});\n```\n\nYou can also directly use the `FargateProfile` construct to create profiles under different scopes:\n\n```ts\nnew eks.FargateProfile(scope, 'MyProfile', {\n cluster,\n ...\n});\n```\n\nTo create an EKS cluster that **only** uses Fargate capacity, you can use `FargateCluster`.\nThe following code defines an Amazon EKS cluster with a default Fargate Profile that matches all pods from the \"kube-system\" and \"default\" namespaces. It is also configured to [run CoreDNS on Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns).\n\n```ts\nconst cluster = new eks.FargateCluster(this, 'MyCluster', {\n version: eks.KubernetesVersion.V1_21,\n});\n```\n\n**NOTE**: Classic Load Balancers and Network Load Balancers are not supported on\npods running on Fargate. For ingress, we recommend that you use the [ALB Ingress\nController](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html)\non Amazon EKS (minimum version v1.1.4).\n\n### Self-managed nodes\n\nAnother way of allocating capacity to an EKS cluster is by using self-managed nodes.\nEC2 instances that are part of the auto-scaling group will serve as worker nodes for the cluster.\nThis type of capacity is also commonly referred to as *EC2 Capacity** or *EC2 Nodes*.\n\nFor a detailed overview please visit [Self Managed Nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html).\n\nCreating an auto-scaling group and connecting it to the cluster is done using the `cluster.addAutoScalingGroupCapacity` method:\n\n```ts\ncluster.addAutoScalingGroupCapacity('frontend-nodes', {\n instanceType: new ec2.InstanceType('t2.medium'),\n minCapacity: 3,\n vpcSubnets: { subnetType: ec2.SubnetType.PUBLIC }\n});\n```\n\nTo connect an already initialized auto-scaling group, use the `cluster.connectAutoScalingGroupCapacity()` method:\n\n```ts\nconst asg = new ec2.AutoScalingGroup(...);\ncluster.connectAutoScalingGroupCapacity(asg);\n```\n\nIn both cases, the [cluster security group](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html#cluster-sg) will be automatically attached to\nthe auto-scaling group, allowing for traffic to flow freely between managed and self-managed nodes.\n\n> **Note:** The default `updateType` for auto-scaling groups does not replace existing nodes. Since security groups are determined at launch time, self-managed nodes that were provisioned with version `1.78.0` or lower, will not be updated.\n> To apply the new configuration on all your self-managed nodes, you'll need to replace the nodes using the `UpdateType.REPLACING_UPDATE` policy for the [`updateType`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-autoscaling.AutoScalingGroup.html#updatetypespan-classapi-icon-api-icon-deprecated-titlethis-api-element-is-deprecated-its-use-is-not-recommended%EF%B8%8Fspan) property.\n\nYou can customize the [/etc/eks/boostrap.sh](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh) script, which is responsible\nfor bootstrapping the node to the EKS cluster. For example, you can use `kubeletExtraArgs` to add custom node labels or taints.\n\n```ts\ncluster.addAutoScalingGroupCapacity('spot', {\n instanceType: new ec2.InstanceType('t3.large'),\n minCapacity: 2,\n bootstrapOptions: {\n kubeletExtraArgs: '--node-labels foo=bar,goo=far',\n awsApiRetryAttempts: 5\n }\n});\n```\n\nTo disable bootstrapping altogether (i.e. to fully customize user-data), set `bootstrapEnabled` to `false`.\nYou can also configure the cluster to use an auto-scaling group as the default capacity:\n\n```ts\ncluster = new eks.Cluster(this, 'HelloEKS', {\n version: eks.KubernetesVersion.V1_21,\n defaultCapacityType: eks.DefaultCapacityType.EC2,\n});\n```\n\nThis will allocate an auto-scaling group with 2 *m5.large* instances (this instance type suits most common use-cases, and is good value for money).\nTo access the `AutoScalingGroup` that was created on your behalf, you can use `cluster.defaultCapacity`.\nYou can also independently create an `AutoScalingGroup` and connect it to the cluster using the `cluster.connectAutoScalingGroupCapacity` method:\n\n```ts\nconst asg = new ec2.AutoScalingGroup(...)\ncluster.connectAutoScalingGroupCapacity(asg);\n```\n\nThis will add the necessary user-data to access the apiserver and configure all connections, roles, and tags needed for the instances in the auto-scaling group to properly join the cluster.\n\n#### Spot Instances\n\nWhen using self-managed nodes, you can configure the capacity to use spot instances, greatly reducing capacity cost.\nTo enable spot capacity, use the `spotPrice` property:\n\n```ts\ncluster.addAutoScalingGroupCapacity('spot', {\n spotPrice: '0.1094',\n instanceType: new ec2.InstanceType('t3.large'),\n maxCapacity: 10\n});\n```\n\n> Spot instance nodes will be labeled with `lifecycle=Ec2Spot` and tainted with `PreferNoSchedule`.\n\nThe [AWS Node Termination Handler](https://github.com/aws/aws-node-termination-handler) `DaemonSet` will be\ninstalled from [Amazon EKS Helm chart repository](https://github.com/aws/eks-charts/tree/master/stable/aws-node-termination-handler) on these nodes.\nThe termination handler ensures that the Kubernetes control plane responds appropriately to events that\ncan cause your EC2 instance to become unavailable, such as [EC2 maintenance events](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html)\nand [EC2 Spot interruptions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html) and helps gracefully stop all pods running on spot nodes that are about to be\nterminated.\n\n> Handler Version: [1.7.0](https://github.com/aws/aws-node-termination-handler/releases/tag/v1.7.0)\n>\n> Chart Version: [0.9.5](https://github.com/aws/eks-charts/blob/v0.0.28/stable/aws-node-termination-handler/Chart.yaml)\n\nTo disable the installation of the termination handler, set the `spotInterruptHandler` property to `false`. This applies both to `addAutoScalingGroupCapacity` and `connectAutoScalingGroupCapacity`.\n\n#### Bottlerocket\n\n[Bottlerocket](https://aws.amazon.com/bottlerocket/) is a Linux-based open-source operating system that is purpose-built by Amazon Web Services for running containers on virtual machines or bare metal hosts.\nAt this moment, `Bottlerocket` is only supported when using self-managed auto-scaling groups.\n\n> **NOTICE**: Bottlerocket is only available in [some supported AWS regions](https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-EKS.md#finding-an-ami).\n\nThe following example will create an auto-scaling group of 2 `t3.small` Linux instances running with the `Bottlerocket` AMI.\n\n```ts\ncluster.addAutoScalingGroupCapacity('BottlerocketNodes', {\n instanceType: new ec2.InstanceType('t3.small'),\n minCapacity: 2,\n machineImageType: eks.MachineImageType.BOTTLEROCKET\n});\n```\n\nThe specific Bottlerocket AMI variant will be auto selected according to the k8s version for the `x86_64` architecture.\nFor example, if the Amazon EKS cluster version is `1.17`, the Bottlerocket AMI variant will be auto selected as\n`aws-k8s-1.17` behind the scene.\n\n> See [Variants](https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#variants) for more details.\n\nPlease note Bottlerocket does not allow to customize bootstrap options and `bootstrapOptions` properties is not supported when you create the `Bottlerocket` capacity.\n\n### Endpoint Access\n\nWhen you create a new cluster, Amazon EKS creates an endpoint for the managed Kubernetes API server that you use to communicate with your cluster (using Kubernetes management tools such as `kubectl`)\n\nBy default, this API server endpoint is public to the internet, and access to the API server is secured using a combination of\nAWS Identity and Access Management (IAM) and native Kubernetes [Role Based Access Control](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) (RBAC).\n\nYou can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) by using the `endpointAccess` property:\n\n```ts\nconst cluster = new eks.Cluster(this, 'hello-eks', {\n version: eks.KubernetesVersion.V1_21,\n endpointAccess: eks.EndpointAccess.PRIVATE // No access outside of your VPC.\n});\n```\n\nThe default value is `eks.EndpointAccess.PUBLIC_AND_PRIVATE`. Which means the cluster endpoint is accessible from outside of your VPC, but worker node traffic and `kubectl` commands issued by this library stay within your VPC.\n\n### VPC Support\n\nYou can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properties:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'Vpc');\n\nnew eks.Cluster(this, 'HelloEKS', {\n version: eks.KubernetesVersion.V1_21,\n vpc,\n vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE }]\n});\n```\n\n> Note: Isolated VPCs (i.e with no internet access) are not currently supported. See https://github.com/aws/aws-cdk/issues/12171\n\nIf you do not specify a VPC, one will be created on your behalf, which you can then access via `cluster.vpc`. The cluster VPC will be associated to any EKS managed capacity (i.e Managed Node Groups and Fargate Profiles).\n\nIf you allocate self managed capacity, you can specify which subnets should the auto-scaling group use:\n\n```ts\nconst vpc = new ec2.Vpc(this, 'Vpc');\ncluster.addAutoScalingGroupCapacity('nodes', {\n vpcSubnets: { subnets: vpc.privateSubnets }\n});\n```\n\nThere are two additional components you might want to provision within the VPC.\n\n#### Kubectl Handler\n\nThe `KubectlHandler` is a Lambda function responsible to issuing `kubectl` and `helm` commands against the cluster when you add resource manifests to the cluster.\n\nThe handler association to the VPC is derived from the `endpointAccess` configuration. The rule of thumb is: *If the cluster VPC can be associated, it will be*.\n\nBreaking this down, it means that if the endpoint exposes private access (via `EndpointAccess.PRIVATE` or `EndpointAccess.PUBLIC_AND_PRIVATE`), and the VPC contains **private** subnets, the Lambda function will be provisioned inside the VPC and use the private subnets to interact with the cluster. This is the common use-case.\n\nIf the endpoint does not expose private access (via `EndpointAccess.PUBLIC`) **or** the VPC does not contain private subnets, the function will not be provisioned within the VPC.\n\n#### Cluster Handler\n\nThe `ClusterHandler` is a Lambda function responsible to interact with the EKS API in order to control the cluster lifecycle. To provision this function inside the VPC, set the `placeClusterHandlerInVpc` property to `true`. This will place the function inside the private subnets of the VPC based on the selection strategy specified in the [`vpcSubnets`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-eks.Cluster.html#vpcsubnetsspan-classapi-icon-api-icon-experimental-titlethis-api-element-is-experimental-it-may-change-without-noticespan) property.\n\nYou can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:\n\n```ts\nconst cluster = new eks.Cluster(this, 'hello-eks', {\n version: eks.KubernetesVersion.V1_21,\n clusterHandlerEnvironment: {\n 'http_proxy': 'http://proxy.myproxy.com'\n }\n});\n```\n\n### Kubectl Support\n\nThe resources are created in the cluster by running `kubectl apply` from a python lambda function.\n\n#### Environment\n\nYou can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:\n\n```ts\nconst cluster = new eks.Cluster(this, 'hello-eks', {\n version: eks.KubernetesVersion.V1_21,\n kubectlEnvironment: {\n 'http_proxy': 'http://proxy.myproxy.com'\n }\n});\n```\n\n#### Runtime\n\nThe kubectl handler uses `kubectl`, `helm` and the `aws` CLI in order to\ninteract with the cluster. These are bundled into AWS Lambda layers included in\nthe `@aws-cdk/lambda-layer-awscli` and `@aws-cdk/lambda-layer-kubectl` modules.\n\nYou can specify a custom `lambda.LayerVersion` if you wish to use a different\nversion of these tools. The handler expects the layer to include the following\nthree executables:\n\n```text\nhelm/helm\nkubectl/kubectl\nawscli/aws\n```\n\nSee more information in the\n[Dockerfile](https://github.com/aws/aws-cdk/tree/master/packages/%40aws-cdk/lambda-layer-awscli/layer) for @aws-cdk/lambda-layer-awscli\nand the\n[Dockerfile](https://github.com/aws/aws-cdk/tree/master/packages/%40aws-cdk/lambda-layer-kubectl/layer) for @aws-cdk/lambda-layer-kubectl.\n\n```ts\nconst layer = new lambda.LayerVersion(this, 'KubectlLayer', {\n code: lambda.Code.fromAsset('layer.zip'),\n});\n```\n\nNow specify when the cluster is defined:\n\n```ts\nconst cluster = new eks.Cluster(this, 'MyCluster', {\n kubectlLayer: layer,\n});\n\n// or\nconst cluster = eks.Cluster.fromClusterAttributes(this, 'MyCluster', {\n kubectlLayer: layer,\n});\n```\n\n#### Memory\n\nBy default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function:\n\n```ts\nimport { Size } from 'aws-cdk-lib';\n\nnew eks.Cluster(this, 'MyCluster', {\n kubectlMemory: Size.gibibytes(4)\n});\n\n// or\neks.Cluster.fromClusterAttributes(this, 'MyCluster', {\n kubectlMemory: Size.gibibytes(4)\n});\n```\n\n### ARM64 Support\n\nInstance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest\nAmazon Linux 2 AMI for ARM64 will be automatically selected.\n\n```ts\n// add a managed ARM64 nodegroup\ncluster.addNodegroupCapacity('extra-ng-arm', {\n instanceTypes: [new ec2.InstanceType('m6g.medium')],\n minSize: 2,\n});\n\n// add a self-managed ARM64 nodegroup\ncluster.addAutoScalingGroupCapacity('self-ng-arm', {\n instanceType: new ec2.InstanceType('m6g.medium'),\n minCapacity: 2,\n})\n```\n\n### Masters Role\n\nWhen you create a cluster, you can specify a `mastersRole`. The `Cluster` construct will associate this role with the `system:masters` [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) group, giving it super-user access to the cluster.\n\n```ts\nconst role = new iam.Role(...);\nnew eks.Cluster(this, 'HelloEKS', {\n version: eks.KubernetesVersion.V1_21,\n mastersRole: role,\n});\n```\n\nIf you do not specify it, a default role will be created on your behalf, that can be assumed by anyone in the account with `sts:AssumeRole` permissions for this role.\n\nThis is the role you see as part of the stack outputs mentioned in the [Quick Start](#quick-start).\n\n```console\n$ aws eks update-kubeconfig --name cluster-xxxxx --role-arn arn:aws:iam::112233445566:role/yyyyy\nAdded new context arn:aws:eks:rrrrr:112233445566:cluster/cluster-xxxxx to /home/boom/.kube/config\n```\n\n### Encryption\n\nWhen you create an Amazon EKS cluster, envelope encryption of Kubernetes secrets using the AWS Key Management Service (AWS KMS) can be enabled.\nThe documentation on [creating a cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html)\ncan provide more details about the customer master key (CMK) that can be used for the encryption.\n\nYou can use the `secretsEncryptionKey` to configure which key the cluster will use to encrypt Kubernetes secrets. By default, an AWS Managed key will be used.\n\n> This setting can only be specified when the cluster is created and cannot be updated.\n\n```ts\nconst secretsKey = new kms.Key(this, 'SecretsKey');\nconst cluster = new eks.Cluster(this, 'MyCluster', {\n secretsEncryptionKey: secretsKey,\n // ...\n});\n```\n\nYou can also use a similar configuration for running a cluster built using the FargateCluster construct.\n\n```ts\nconst secretsKey = new kms.Key(this, 'SecretsKey');\nconst cluster = new eks.FargateCluster(this, 'MyFargateCluster', {\n secretsEncryptionKey: secretsKey\n});\n```\n\nThe Amazon Resource Name (ARN) for that CMK can be retrieved.\n\n```ts\nconst clusterEncryptionConfigKeyArn = cluster.clusterEncryptionConfigKeyArn;\n```\n\n## Permissions and Security\n\nAmazon EKS provides several mechanism of securing the cluster and granting permissions to specific IAM users and roles.\n\n### AWS IAM Mapping\n\nAs described in the [Amazon EKS User Guide](https://docs.aws.amazon.com/en_us/eks/latest/userguide/add-user-role.html), you can map AWS IAM users and roles to [Kubernetes Role-based access control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac).\n\nThe Amazon EKS construct manages the *aws-auth* `ConfigMap` Kubernetes resource on your behalf and exposes an API through the `cluster.awsAuth` for mapping\nusers, roles and accounts.\n\nFurthermore, when auto-scaling group capacity is added to the cluster, the IAM instance role of the auto-scaling group will be automatically mapped to RBAC so nodes can connect to the cluster. No manual mapping is required.\n\nFor example, let's say you want to grant an IAM user administrative privileges on your cluster:\n\n```ts\nconst adminUser = new iam.User(this, 'Admin');\ncluster.awsAuth.addUserMapping(adminUser, { groups: [ 'system:masters' ]});\n```\n\nA convenience method for mapping a role to the `system:masters` group is also available:\n\n```ts\ncluster.awsAuth.addMastersRole(role)\n```\n\n### Cluster Security Group\n\nWhen you create an Amazon EKS cluster, a [cluster security group](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html)\nis automatically created as well. This security group is designed to allow all traffic from the control plane and managed node groups to flow freely\nbetween each other.\n\nThe ID for that security group can be retrieved after creating the cluster.\n\n```ts\nconst clusterSecurityGroupId = cluster.clusterSecurityGroupId;\n```\n\n### Node SSH Access\n\nIf you want to be able to SSH into your worker nodes, you must already have an SSH key in the region you're connecting to and pass it when\nyou add capacity to the cluster. You must also be able to connect to the hosts (meaning they must have a public IP and you\nshould be allowed to connect to them on port 22):\n\nSee [SSH into nodes](test/example.ssh-into-nodes.lit.ts) for a code example.\n\nIf you want to SSH into nodes in a private subnet, you should set up a bastion host in a public subnet. That setup is recommended, but is\nunfortunately beyond the scope of this documentation.\n\n### Service Accounts\n\nWith services account you can provide Kubernetes Pods access to AWS resources.\n\n```ts\n// add service account\nconst serviceAccount = cluster.addServiceAccount('MyServiceAccount');\n\nconst bucket = new Bucket(this, 'Bucket');\nbucket.grantReadWrite(serviceAccount);\n\nconst mypod = cluster.addManifest('mypod', {\n apiVersion: 'v1',\n kind: 'Pod',\n metadata: { name: 'mypod' },\n spec: {\n serviceAccountName: serviceAccount.serviceAccountName\n containers: [\n {\n name: 'hello',\n image: 'paulbouwer/hello-kubernetes:1.5',\n ports: [ { containerPort: 8080 } ],\n\n }\n ]\n }\n});\n\n// create the resource after the service account.\nmypod.node.addDependency(serviceAccount);\n\n// print the IAM role arn for this service account\nnew cdk.CfnOutput(this, 'ServiceAccountIamRole', { value: serviceAccount.role.roleArn })\n```\n\nNote that using `serviceAccount.serviceAccountName` above **does not** translate into a resource dependency.\nThis is why an explicit dependency is needed. See <https://github.com/aws/aws-cdk/issues/9910> for more details.\n\nYou can also add service accounts to existing clusters.\nTo do so, pass the `openIdConnectProvider` property when you import the cluster into the application.\n\n```ts\n// you can import an existing provider\nconst provider = eks.OpenIdConnectProvider.fromOpenIdConnectProviderArn(this, 'Provider', 'arn:aws:iam::123456:oidc-provider/oidc.eks.eu-west-1.amazonaws.com/id/AB123456ABC');\n\n// or create a new one using an existing issuer url\nconst provider = new eks.OpenIdConnectProvider(this, 'Provider', issuerUrl);\n\nconst cluster = eks.Cluster.fromClusterAttributes({\n clusterName: 'Cluster',\n openIdConnectProvider: provider,\n kubectlRoleArn: 'arn:aws:iam::123456:role/service-role/k8sservicerole',\n});\n\nconst serviceAccount = cluster.addServiceAccount('MyServiceAccount');\n\nconst bucket = new Bucket(this, 'Bucket');\nbucket.grantReadWrite(serviceAccount);\n\n// ...\n```\n\nNote that adding service accounts requires running `kubectl` commands against the cluster.\nThis means you must also pass the `kubectlRoleArn` when importing the cluster.\nSee [Using existing Clusters](https://github.com/aws/aws-cdk/tree/master/packages/@aws-cdk/aws-eks#using-existing-clusters).\n\n## Applying Kubernetes Resources\n\nThe library supports several popular resource deployment mechanisms, among which are:\n\n### Kubernetes Manifests\n\nThe `KubernetesManifest` construct or `cluster.addManifest` method can be used\nto apply Kubernetes resource manifests to this cluster.\n\n> When using `cluster.addManifest`, the manifest construct is defined within the cluster's stack scope. If the manifest contains\n> attributes from a different stack which depend on the cluster stack, a circular dependency will be created and you will get a synth time error.\n> To avoid this, directly use `new KubernetesManifest` to create the manifest in the scope of the other stack.\n\nThe following examples will deploy the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes)\nservice on the cluster:\n\n```ts\nconst appLabel = { app: \"hello-kubernetes\" };\n\nconst deployment = {\n apiVersion: \"apps/v1\",\n kind: \"Deployment\",\n metadata: { name: \"hello-kubernetes\" },\n spec: {\n replicas: 3,\n selector: { matchLabels: appLabel },\n template: {\n metadata: { labels: appLabel },\n spec: {\n containers: [\n {\n name: \"hello-kubernetes\",\n image: \"paulbouwer/hello-kubernetes:1.5\",\n ports: [ { containerPort: 8080 } ]\n }\n ]\n }\n }\n }\n};\n\nconst service = {\n apiVersion: \"v1\",\n kind: \"Service\",\n metadata: { name: \"hello-kubernetes\" },\n spec: {\n type: \"LoadBalancer\",\n ports: [ { port: 80, targetPort: 8080 } ],\n selector: appLabel\n }\n};\n\n// option 1: use a construct\nnew KubernetesManifest(this, 'hello-kub', {\n cluster,\n manifest: [ deployment, service ]\n});\n\n// or, option2: use `addManifest`\ncluster.addManifest('hello-kub', service, deployment);\n```\n\n#### Adding resources from a URL\n\nThe following example will deploy the resource manifest hosting on remote server:\n\n```ts\nimport * as yaml from 'js-yaml';\nimport * as request from 'sync-request';\n\nconst manifestUrl = 'https://url/of/manifest.yaml';\nconst manifest = yaml.safeLoadAll(request('GET', manifestUrl).getBody());\ncluster.addManifest('my-resource', ...manifest);\n```\n\n#### Dependencies\n\nThere are cases where Kubernetes resources must be deployed in a specific order.\nFor example, you cannot define a resource in a Kubernetes namespace before the\nnamespace was created.\n\nYou can represent dependencies between `KubernetesManifest`s using\n`resource.node.addDependency()`:\n\n```ts\nconst namespace = cluster.addManifest('my-namespace', {\n apiVersion: 'v1',\n kind: 'Namespace',\n metadata: { name: 'my-app' }\n});\n\nconst service = cluster.addManifest('my-service', {\n metadata: {\n name: 'myservice',\n namespace: 'my-app'\n },\n spec: // ...\n});\n\nservice.node.addDependency(namespace); // will apply `my-namespace` before `my-service`.\n```\n\n**NOTE:** when a `KubernetesManifest` includes multiple resources (either directly\nor through `cluster.addManifest()`) (e.g. `cluster.addManifest('foo', r1, r2,\nr3,...)`), these resources will be applied as a single manifest via `kubectl`\nand will be applied sequentially (the standard behavior in `kubectl`).\n\n---\n\nSince Kubernetes manifests are implemented as CloudFormation resources in the\nCDK. This means that if the manifest is deleted from your code (or the stack is\ndeleted), the next `cdk deploy` will issue a `kubectl delete` command and the\nKubernetes resources in that manifest will be deleted.\n\n#### Resource Pruning\n\nWhen a resource is deleted from a Kubernetes manifest, the EKS module will\nautomatically delete these resources by injecting a _prune label_ to all\nmanifest resources. This label is then passed to [`kubectl apply --prune`].\n\n[`kubectl apply --prune`]: https://kubernetes.io/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune-l-your-label\n\nPruning is enabled by default but can be disabled through the `prune` option\nwhen a cluster is defined:\n\n```ts\nnew Cluster(this, 'MyCluster', {\n prune: false\n});\n```\n\n#### Manifests Validation\n\nThe `kubectl` CLI supports applying a manifest by skipping the validation.\nThis can be accomplished by setting the `skipValidation` flag to `true` in the `KubernetesManifest` props.\n\n```ts\nnew eks.KubernetesManifest(this, 'HelloAppWithoutValidation', {\n cluster: this.cluster,\n manifest: [ deployment, service ],\n skipValidation: true,\n});\n```\n\n### Helm Charts\n\nThe `HelmChart` construct or `cluster.addHelmChart` method can be used\nto add Kubernetes resources to this cluster using Helm.\n\n> When using `cluster.addHelmChart`, the manifest construct is defined within the cluster's stack scope. If the manifest contains\n> attributes from a different stack which depend on the cluster stack, a circular dependency will be created and you will get a synth time error.\n> To avoid this, directly use `new HelmChart` to create the chart in the scope of the other stack.\n\nThe following example will install the [NGINX Ingress Controller](https://kubernetes.github.io/ingress-nginx/) to your cluster using Helm.\n\n```ts\n// option 1: use a construct\nnew HelmChart(this, 'NginxIngress', {\n cluster,\n chart: 'nginx-ingress',\n repository: 'https://helm.nginx.com/stable',\n namespace: 'kube-system'\n});\n\n// or, option2: use `addHelmChart`\ncluster.addHelmChart('NginxIngress', {\n chart: 'nginx-ingress',\n repository: 'https://helm.nginx.com/stable',\n namespace: 'kube-system'\n});\n```\n\nHelm charts will be installed and updated using `helm upgrade --install`, where a few parameters\nare being passed down (such as `repo`, `values`, `version`, `namespace`, `wait`, `timeout`, etc).\nThis means that if the chart is added to CDK with the same release name, it will try to update\nthe chart in the cluster.\n\nHelm charts are implemented as CloudFormation resources in CDK.\nThis means that if the chart is deleted from your code (or the stack is\ndeleted), the next `cdk deploy` will issue a `helm uninstall` command and the\nHelm chart will be deleted.\n\nWhen there is no `release` defined, a unique ID will be allocated for the release based\non the construct path.\n\nBy default, all Helm charts will be installed concurrently. In some cases, this\ncould cause race conditions where two Helm charts attempt to deploy the same\nresource or if Helm charts depend on each other. You can use\n`chart.node.addDependency()` in order to declare a dependency order between\ncharts:\n\n```ts\nconst chart1 = cluster.addHelmChart(...);\nconst chart2 = cluster.addHelmChart(...);\n\nchart2.node.addDependency(chart1);\n```\n\n#### CDK8s Charts\n\n[CDK8s](https://cdk8s.io/) is an open-source library that enables Kubernetes manifest authoring using familiar programming languages. It is founded on the same technologies as the AWS CDK, such as [`constructs`](https://github.com/aws/constructs) and [`jsii`](https://github.com/aws/jsii).\n\n> To learn more about cdk8s, visit the [Getting Started](https://github.com/awslabs/cdk8s/tree/master/docs/getting-started) tutorials.\n\nThe EKS module natively integrates with cdk8s and allows you to apply cdk8s charts on AWS EKS clusters via the `cluster.addCdk8sChart` method.\n\nIn addition to `cdk8s`, you can also use [`cdk8s+`](https://github.com/awslabs/cdk8s/tree/master/packages/cdk8s-plus), which provides higher level abstraction for the core kubernetes api objects.\nYou can think of it like the `L2` constructs for Kubernetes. Any other `cdk8s` based libraries are also supported, for example [`cdk8s-debore`](https://github.com/toricls/cdk8s-debore).\n\nTo get started, add the following dependencies to your `package.json` file:\n\n```json\n\"dependencies\": {\n \"cdk8s\": \"0.30.0\",\n \"cdk8s-plus\": \"0.30.0\",\n \"constructs\": \"3.0.4\"\n}\n```\n\n> Note that the version of `cdk8s` must be `>=0.30.0`.\n\nSimilarly to how you would create a stack by extending `@aws-cdk/core.Stack`, we recommend you create a chart of your own that extends `cdk8s.Chart`,\nand add your kubernetes resources to it. You can use `aws-cdk` construct attributes and properties inside your `cdk8s` construct freely.\n\nIn this example we create a chart that accepts an `s3.Bucket` and passes its name to a kubernetes pod as an environment variable.\n\nNotice that the chart must accept a `constructs.Construct` type as its scope, not an `@aws-cdk/core.Construct` as you would normally use.\nFor this reason, to avoid possible confusion, we will create the chart in a separate file:\n\n`+ my-chart.ts`\n\n```ts\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\nimport * as constructs from 'constructs';\nimport * as cdk8s from 'cdk8s';\nimport * as kplus from 'cdk8s-plus';\n\nexport interface MyChartProps {\n readonly bucket: s3.Bucket;\n}\n\nexport class MyChart extends cdk8s.Chart {\n constructor(scope: constructs.Construct, id: string, props: MyChartProps) {\n super(scope, id);\n\n new kplus.Pod(this, 'Pod', {\n spec: {\n containers: [\n new kplus.Container({\n image: 'my-image',\n env: {\n BUCKET_NAME: kplus.EnvValue.fromValue(props.bucket.bucketName),\n },\n }),\n ],\n },\n });\n }\n}\n```\n\nThen, in your AWS CDK app:\n\n```ts\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\nimport * as cdk8s from 'cdk8s';\nimport { MyChart } from './my-chart';\n\n// some bucket..\nconst bucket = new s3.Bucket(this, 'Bucket');\n\n// create a cdk8s chart and use `cdk8s.App` as the scope.\nconst myChart = new MyChart(new cdk8s.App(), 'MyChart', { bucket });\n\n// add the cdk8s chart to the cluster\ncluster.addCdk8sChart('my-chart', myChart);\n```\n\n##### Custom CDK8s Constructs\n\nYou can also compose a few stock `cdk8s+` constructs into your own custom construct. However, since mixing scopes between `aws-cdk` and `cdk8s` is currently not supported, the `Construct` class\nyou'll need to use is the one from the [`constructs`](https://github.com/aws/constructs) module, and not from `@aws-cdk/core` like you normally would.\nThis is why we used `new cdk8s.App()` as the scope of the chart above.\n\n```ts\nimport * as constructs from 'constructs';\nimport * as cdk8s from 'cdk8s';\nimport * as kplus from 'cdk8s-plus';\n\nexport interface LoadBalancedWebService {\n readonly port: number;\n readonly image: string;\n readonly replicas: number;\n}\n\nexport class LoadBalancedWebService extends constructs.Construct {\n constructor(scope: constructs.Construct, id: string, props: LoadBalancedWebService) {\n super(scope, id);\n\n const deployment = new kplus.Deployment(chart, 'Deployment', {\n spec: {\n replicas: props.replicas,\n podSpecTemplate: {\n containers: [ new kplus.Container({ image: props.image }) ]\n }\n },\n });\n\n deployment.expose({port: props.port, serviceType: kplus.ServiceType.LOAD_BALANCER})\n\n }\n}\n```\n\n##### Manually importing k8s specs and CRD's\n\nIf you find yourself unable to use `cdk8s+`, or just like to directly use the `k8s` native objects or CRD's, you can do so by manually importing them using the `cdk8s-cli`.\n\nSee [Importing kubernetes objects](https://github.com/awslabs/cdk8s/tree/master/packages/cdk8s-cli#import) for detailed instructions.\n\n## Patching Kubernetes Resources\n\nThe `KubernetesPatch` construct can be used to update existing kubernetes\nresources. The following example can be used to patch the `hello-kubernetes`\ndeployment from the example above with 5 replicas.\n\n```ts\nnew KubernetesPatch(this, 'hello-kub-deployment-label', {\n cluster,\n resourceName: \"deployment/hello-kubernetes\",\n applyPatch: { spec: { replicas: 5 } },\n restorePatch: { spec: { replicas: 3 } }\n})\n```\n\n## Querying Kubernetes Resources\n\nThe `KubernetesObjectValue` construct can be used to query for information about kubernetes objects,\nand use that as part of your CDK application.\n\nFor example, you can fetch the address of a [`LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) type service:\n\n```ts\n// query the load balancer address\nconst myServiceAddress = new KubernetesObjectValue(this, 'LoadBalancerAttribute', {\n cluster: cluster,\n objectType: 'service',\n objectName: 'my-service',\n jsonPath: '.status.loadBalancer.ingress[0].hostname', // https://kubernetes.io/docs/reference/kubectl/jsonpath/\n});\n\n// pass the address to a lambda function\nconst proxyFunction = new lambda.Function(this, 'ProxyFunction', {\n ...\n environment: {\n myServiceAddress: myServiceAddress.value\n },\n})\n```\n\nSpecifically, since the above use-case is quite common, there is an easier way to access that information:\n\n```ts\nconst loadBalancerAddress = cluster.getServiceLoadBalancerAddress('my-service');\n```\n\n## Using existing clusters\n\nThe Amazon EKS library allows defining Kubernetes resources such as [Kubernetes\nmanifests](#kubernetes-resources) and [Helm charts](#helm-charts) on clusters\nthat are not defined as part of your CDK app.\n\nFirst, you'll need to \"import\" a cluster to your CDK app. To do that, use the\n`eks.Cluster.fromClusterAttributes()` static method:\n\n```ts\nconst cluster = eks.Cluster.fromClusterAttributes(this, 'MyCluster', {\n clusterName: 'my-cluster-name',\n kubectlRoleArn: 'arn:aws:iam::1111111:role/iam-role-that-has-masters-access',\n});\n```\n\nThen, you can use `addManifest` or `addHelmChart` to define resources inside\nyour Kubernetes cluster. For example:\n\n```ts\ncluster.addManifest('Test', {\n apiVersion: 'v1',\n kind: 'ConfigMap',\n metadata: {\n name: 'myconfigmap',\n },\n data: {\n Key: 'value',\n Another: '123454',\n },\n});\n```\n\nAt the minimum, when importing clusters for `kubectl` management, you will need\nto specify:\n\n* `clusterName` - the name of the cluster.\n* `kubectlRoleArn` - the ARN of an IAM role mapped to the `system:masters` RBAC\n role. If the cluster you are importing was created using the AWS CDK, the\n CloudFormation stack has an output that includes an IAM role that can be used.\n Otherwise, you can create an IAM role and map it to `system:masters` manually.\n The trust policy of this role should include the the\n `arn:aws::iam::${accountId}:root` principal in order to allow the execution\n role of the kubectl resource to assume it.\n\nIf the cluster is configured with private-only or private and restricted public\nKubernetes [endpoint access](#endpoint-access), you must also specify:\n\n* `kubectlSecurityGroupId` - the ID of an EC2 security group that is allowed\n connections to the cluster's control security group. For example, the EKS managed [cluster security group](#cluster-security-group).\n* `kubectlPrivateSubnetIds` - a list of private VPC subnets IDs that will be used\n to access the Kubernetes endpoint.\n\n## Known Issues and Limitations\n\n* [One cluster per stack](https://github.com/aws/aws-cdk/issues/10073)\n* [Service Account dependencies](https://github.com/aws/aws-cdk/issues/9910)\n* [Support isolated VPCs](https://github.com/aws/aws-cdk/issues/12171)\n"
|
|
1347
|
-
},
|
|
1348
954
|
"targets": {
|
|
1349
955
|
"dotnet": {
|
|
1350
956
|
"namespace": "Amazon.CDK.AWS.EKS"
|
|
@@ -1358,10 +964,6 @@
|
|
|
1358
964
|
}
|
|
1359
965
|
},
|
|
1360
966
|
"aws-cdk-lib.aws_elasticache": {
|
|
1361
|
-
"locationInModule": {
|
|
1362
|
-
"filename": "lib/index.ts",
|
|
1363
|
-
"line": 72
|
|
1364
|
-
},
|
|
1365
967
|
"targets": {
|
|
1366
968
|
"dotnet": {
|
|
1367
969
|
"namespace": "Amazon.CDK.AWS.ElastiCache"
|
|
@@ -1375,10 +977,6 @@
|
|
|
1375
977
|
}
|
|
1376
978
|
},
|
|
1377
979
|
"aws-cdk-lib.aws_elasticbeanstalk": {
|
|
1378
|
-
"locationInModule": {
|
|
1379
|
-
"filename": "lib/index.ts",
|
|
1380
|
-
"line": 73
|
|
1381
|
-
},
|
|
1382
980
|
"targets": {
|
|
1383
981
|
"dotnet": {
|
|
1384
982
|
"namespace": "Amazon.CDK.AWS.ElasticBeanstalk"
|
|
@@ -1392,13 +990,6 @@
|
|
|
1392
990
|
}
|
|
1393
991
|
},
|
|
1394
992
|
"aws-cdk-lib.aws_elasticloadbalancing": {
|
|
1395
|
-
"locationInModule": {
|
|
1396
|
-
"filename": "lib/index.ts",
|
|
1397
|
-
"line": 74
|
|
1398
|
-
},
|
|
1399
|
-
"readme": {
|
|
1400
|
-
"markdown": "# Amazon Elastic Load Balancing Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThe `@aws-cdk/aws-elasticloadbalancing` package provides constructs for configuring\nclassic load balancers.\n\n## Configuring a Load Balancer\n\nLoad balancers send traffic to one or more AutoScalingGroups. Create a load\nbalancer, set up listeners and a health check, and supply the fleet(s) you want\nto load balance to in the `targets` property.\n\n```ts\nconst lb = new elb.LoadBalancer(this, 'LB', {\n vpc,\n internetFacing: true,\n healthCheck: {\n port: 80\n },\n});\n\nlb.addTarget(myAutoScalingGroup);\nlb.addListener({\n externalPort: 80,\n});\n```\n\nThe load balancer allows all connections by default. If you want to change that,\npass the `allowConnectionsFrom` property while setting up the listener:\n\n```ts\nlb.addListener({\n externalPort: 80,\n allowConnectionsFrom: [mySecurityGroup]\n});\n```\n"
|
|
1401
|
-
},
|
|
1402
993
|
"targets": {
|
|
1403
994
|
"dotnet": {
|
|
1404
995
|
"namespace": "Amazon.CDK.AWS.ElasticLoadBalancing"
|
|
@@ -1412,13 +1003,6 @@
|
|
|
1412
1003
|
}
|
|
1413
1004
|
},
|
|
1414
1005
|
"aws-cdk-lib.aws_elasticloadbalancingv2": {
|
|
1415
|
-
"locationInModule": {
|
|
1416
|
-
"filename": "lib/index.ts",
|
|
1417
|
-
"line": 75
|
|
1418
|
-
},
|
|
1419
|
-
"readme": {
|
|
1420
|
-
"markdown": "# Amazon Elastic Load Balancing V2 Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\nThe `@aws-cdk/aws-elasticloadbalancingv2` package provides constructs for\nconfiguring application and network load balancers.\n\nFor more information, see the AWS documentation for\n[Application Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html)\nand [Network Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html).\n\n## Defining an Application Load Balancer\n\nYou define an application load balancer by creating an instance of\n`ApplicationLoadBalancer`, adding a Listener to the load balancer\nand adding Targets to the Listener:\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\nimport { AutoScalingGroup } from 'aws-cdk-lib/aws-autoscaling';\n\n// ...\n\nconst vpc = new ec2.Vpc(...);\n\n// Create the load balancer in a VPC. 'internetFacing' is 'false'\n// by default, which creates an internal load balancer.\nconst lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {\n vpc,\n internetFacing: true\n});\n\n// Add a listener and open up the load balancer's security group\n// to the world.\nconst listener = lb.addListener('Listener', {\n port: 80,\n\n // 'open: true' is the default, you can leave it out if you want. Set it\n // to 'false' and use `listener.connections` if you want to be selective\n // about who can access the load balancer.\n open: true,\n});\n\n// Create an AutoScaling group and add it as a load balancing\n// target to the listener.\nconst asg = new AutoScalingGroup(...);\nlistener.addTargets('ApplicationFleet', {\n port: 8080,\n targets: [asg]\n});\n```\n\nThe security groups of the load balancer and the target are automatically\nupdated to allow the network traffic.\n\nOne (or more) security groups can be associated with the load balancer;\nif a security group isn't provided, one will be automatically created.\n\n```ts\nconst securityGroup1 = new ec2.SecurityGroup(stack, 'SecurityGroup1', { vpc });\nconst lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {\n vpc,\n internetFacing: true,\n securityGroup: securityGroup1, // Optional - will be automatically created otherwise\n});\n\nconst securityGroup2 = new ec2.SecurityGroup(stack, 'SecurityGroup2', { vpc });\nlb.addSecurityGroup(securityGroup2);\n```\n\n### Conditions\n\nIt's possible to route traffic to targets based on conditions in the incoming\nHTTP request. For example, the following will route requests to the indicated\nAutoScalingGroup only if the requested host in the request is either for\n`example.com/ok` or `example.com/path`:\n\n```ts\nlistener.addTargets('Example.Com Fleet', {\n priority: 10,\n conditions: [\n ListenerCondition.hostHeaders(['example.com']),\n ListenerCondition.pathPatterns(['/ok', '/path']),\n ],\n port: 8080,\n targets: [asg]\n});\n```\n\nA target with a condition contains either `pathPatterns` or `hostHeader`, or\nboth. If both are specified, both conditions must be met for the requests to\nbe routed to the given target. `priority` is a required field when you add\ntargets with conditions. The lowest number wins.\n\nEvery listener must have at least one target without conditions, which is\nwhere all requests that didn't match any of the conditions will be sent.\n\n### Convenience methods and more complex Actions\n\nRouting traffic from a Load Balancer to a Target involves the following steps:\n\n- Create a Target Group, register the Target into the Target Group\n- Add an Action to the Listener which forwards traffic to the Target Group.\n\nA new listener can be added to the Load Balancer by calling `addListener()`.\nListeners that have been added to the load balancer can be listed using the\n`listeners` property. Note that the `listeners` property will throw an Error\nfor imported or looked up Load Balancers.\n\nVarious methods on the `Listener` take care of this work for you to a greater\nor lesser extent:\n\n- `addTargets()` performs both steps: automatically creates a Target Group and the\n required Action.\n- `addTargetGroups()` gives you more control: you create the Target Group (or\n Target Groups) yourself and the method creates Action that routes traffic to\n the Target Groups.\n- `addAction()` gives you full control: you supply the Action and wire it up\n to the Target Groups yourself (or access one of the other ELB routing features).\n\nUsing `addAction()` gives you access to some of the features of an Elastic Load\nBalancer that the other two convenience methods don't:\n\n- **Routing stickiness**: use `ListenerAction.forward()` and supply a\n `stickinessDuration` to make sure requests are routed to the same target group\n for a given duration.\n- **Weighted Target Groups**: use `ListenerAction.weightedForward()`\n to give different weights to different target groups.\n- **Fixed Responses**: use `ListenerAction.fixedResponse()` to serve\n a static response (ALB only).\n- **Redirects**: use `ListenerAction.redirect()` to serve an HTTP\n redirect response (ALB only).\n- **Authentication**: use `ListenerAction.authenticateOidc()` to\n perform OpenID authentication before serving a request (see the\n `@aws-cdk/aws-elasticloadbalancingv2-actions` package for direct authentication\n integration with Cognito) (ALB only).\n\nHere's an example of serving a fixed response at the `/ok` URL:\n\n```ts\nlistener.addAction('Fixed', {\n priority: 10,\n conditions: [\n ListenerCondition.pathPatterns(['/ok']),\n ],\n action: ListenerAction.fixedResponse(200, {\n contentType: elbv2.ContentType.TEXT_PLAIN,\n messageBody: 'OK',\n })\n});\n```\n\nHere's an example of using OIDC authentication before forwarding to a TargetGroup:\n\n```ts\nlistener.addAction('DefaultAction', {\n action: ListenerAction.authenticateOidc({\n authorizationEndpoint: 'https://example.com/openid',\n // Other OIDC properties here\n // ...\n next: ListenerAction.forward([myTargetGroup]),\n }),\n});\n```\n\nIf you just want to redirect all incoming traffic on one port to another port, you can use the following code:\n\n```ts\nlb.addRedirect({\n sourceProtocol: elbv2.ApplicationProtocol.HTTPS,\n sourcePort: 8443,\n targetProtocol: elbv2.ApplicationProtocol.HTTP,\n targetPort: 8080,\n});\n```\n\nIf you do not provide any options for this method, it redirects HTTP port 80 to HTTPS port 443.\n\nBy default all ingress traffic will be allowed on the source port. If you want to be more selective with your\ningress rules then set `open: false` and use the listener's `connections` object to selectively grant access to the listener.\n\n## Defining a Network Load Balancer\n\nNetwork Load Balancers are defined in a similar way to Application Load\nBalancers:\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\nimport { aws_autoscaling as autoscaling } from 'aws-cdk-lib';\n\n// Create the load balancer in a VPC. 'internetFacing' is 'false'\n// by default, which creates an internal load balancer.\nconst lb = new elbv2.NetworkLoadBalancer(this, 'LB', {\n vpc,\n internetFacing: true\n});\n\n// Add a listener on a particular port.\nconst listener = lb.addListener('Listener', {\n port: 443,\n});\n\n// Add targets on a particular port.\nlistener.addTargets('AppFleet', {\n port: 443,\n targets: [asg]\n});\n```\n\nOne thing to keep in mind is that network load balancers do not have security\ngroups, and no automatic security group configuration is done for you. You will\nhave to configure the security groups of the target yourself to allow traffic by\nclients and/or load balancer instances, depending on your target types. See\n[Target Groups for your Network Load\nBalancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html)\nand [Register targets with your Target\nGroup](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html)\nfor more information.\n\n## Targets and Target Groups\n\nApplication and Network Load Balancers organize load balancing targets in Target\nGroups. If you add your balancing targets (such as AutoScalingGroups, ECS\nservices or individual instances) to your listener directly, the appropriate\n`TargetGroup` will be automatically created for you.\n\nIf you need more control over the Target Groups created, create an instance of\n`ApplicationTargetGroup` or `NetworkTargetGroup`, add the members you desire,\nand add it to the listener by calling `addTargetGroups` instead of `addTargets`.\n\n`addTargets()` will always return the Target Group it just created for you:\n\n```ts\nconst group = listener.addTargets('AppFleet', {\n port: 443,\n targets: [asg1],\n});\n\ngroup.addTarget(asg2);\n```\n\n### Sticky sessions for your Application Load Balancer\n\nBy default, an Application Load Balancer routes each request independently to a registered target based on the chosen load-balancing algorithm. However, you can use the sticky session feature (also known as session affinity) to enable the load balancer to bind a user's session to a specific target. This ensures that all requests from the user during the session are sent to the same target. This feature is useful for servers that maintain state information in order to provide a continuous experience to clients. To use sticky sessions, the client must support cookies.\n\nApplication Load Balancers support both duration-based cookies (`lb_cookie`) and application-based cookies (`app_cookie`). The key to managing sticky sessions is determining how long your load balancer should consistently route the user's request to the same target. Sticky sessions are enabled at the target group level. You can use a combination of duration-based stickiness, application-based stickiness, and no stickiness across all of your target groups.\n\n```ts\n// Target group with duration-based stickiness with load-balancer generated cookie\nconst tg1 = new elbv2.ApplicationTargetGroup(stack, 'TG1', {\n targetType: elbv2.TargetType.INSTANCE,\n port: 80,\n stickinessCookieDuration: cdk.Duration.minutes(5),\n vpc,\n});\n\n// Target group with application-based stickiness\nconst tg2 = new elbv2.ApplicationTargetGroup(stack, 'TG2', {\n targetType: elbv2.TargetType.INSTANCE,\n port: 80,\n stickinessCookieDuration: cdk.Duration.minutes(5),\n stickinessCookieName: 'MyDeliciousCookie',\n vpc,\n});\n```\n\nFor more information see: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/sticky-sessions.html#application-based-stickiness\n\n### Setting the target group protocol version\n\nBy default, Application Load Balancers send requests to targets using HTTP/1.1. You can use the [protocol version](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html#target-group-protocol-version) to send requests to targets using HTTP/2 or gRPC.\n\n```ts\nconst tg = new elbv2.ApplicationTargetGroup(stack, 'TG', {\n targetType: elbv2.TargetType.IP,\n port: 50051,\n protocol: elbv2.ApplicationProtocol.HTTP,\n protocolVersion: elbv2.ApplicationProtocolVersion.GRPC,\n healthCheck: {\n enabled: true,\n healthyGrpcCodes: '0-99',\n },\n vpc,\n});\n```\n\n## Using Lambda Targets\n\nTo use a Lambda Function as a target, use the integration class in the\n`@aws-cdk/aws-elasticloadbalancingv2-targets` package:\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\nimport { aws_elasticloadbalancingv2_targets as targets } from 'aws-cdk-lib';\n\nconst lambdaFunction = new lambda.Function(...);\nconst lb = new elbv2.ApplicationLoadBalancer(...);\n\nconst listener = lb.addListener('Listener', { port: 80 });\nlistener.addTargets('Targets', {\n targets: [new targets.LambdaTarget(lambdaFunction)],\n\n // For Lambda Targets, you need to explicitly enable health checks if you\n // want them.\n healthCheck: {\n enabled: true,\n }\n});\n```\n\nOnly a single Lambda function can be added to a single listener rule.\n\n## Configuring Health Checks\n\nHealth checks are configured upon creation of a target group:\n\n```ts\nlistener.addTargets('AppFleet', {\n port: 8080,\n targets: [asg],\n healthCheck: {\n path: '/ping',\n interval: cdk.Duration.minutes(1),\n }\n});\n```\n\nThe health check can also be configured after creation by calling\n`configureHealthCheck()` on the created object.\n\nNo attempts are made to configure security groups for the port you're\nconfiguring a health check for, but if the health check is on the same port\nyou're routing traffic to, the security group already allows the traffic.\nIf not, you will have to configure the security groups appropriately:\n\n```ts\nlistener.addTargets('AppFleet', {\n port: 8080,\n targets: [asg],\n healthCheck: {\n port: 8088,\n }\n});\n\nlistener.connections.allowFrom(lb, ec2.Port.tcp(8088));\n```\n\n## Using a Load Balancer from a different Stack\n\nIf you want to put your Load Balancer and the Targets it is load balancing to in\ndifferent stacks, you may not be able to use the convenience methods\n`loadBalancer.addListener()` and `listener.addTargets()`.\n\nThe reason is that these methods will create resources in the same Stack as the\nobject they're called on, which may lead to cyclic references between stacks.\nInstead, you will have to create an `ApplicationListener` in the target stack,\nor an empty `TargetGroup` in the load balancer stack that you attach your\nservice to.\n\nFor an example of the alternatives while load balancing to an ECS service, see the\n[ecs/cross-stack-load-balancer\nexample](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/ecs/cross-stack-load-balancer/).\n\n## Protocol for Load Balancer Targets\n\nConstructs that want to be a load balancer target should implement\n`IApplicationLoadBalancerTarget` and/or `INetworkLoadBalancerTarget`, and\nprovide an implementation for the function `attachToXxxTargetGroup()`, which can\ncall functions on the load balancer and should return metadata about the\nload balancing target:\n\n```ts\npublic attachToApplicationTargetGroup(targetGroup: ApplicationTargetGroup): LoadBalancerTargetProps {\n targetGroup.registerConnectable(...);\n return {\n targetType: TargetType.Instance | TargetType.Ip\n targetJson: { id: ..., port: ... },\n };\n}\n```\n\n`targetType` should be one of `Instance` or `Ip`. If the target can be\ndirectly added to the target group, `targetJson` should contain the `id` of\nthe target (either instance ID or IP address depending on the type) and\noptionally a `port` or `availabilityZone` override.\n\nApplication load balancer targets can call `registerConnectable()` on the\ntarget group to register themselves for addition to the load balancer's security\ngroup rules.\n\nIf your load balancer target requires that the TargetGroup has been\nassociated with a LoadBalancer before registration can happen (such as is the\ncase for ECS Services for example), take a resource dependency on\n`targetGroup.loadBalancerDependency()` as follows:\n\n```ts\n// Make sure that the listener has been created, and so the TargetGroup\n// has been associated with the LoadBalancer, before 'resource' is created.\nresourced.addDependency(targetGroup.loadBalancerDependency());\n```\n\n## Looking up Load Balancers and Listeners\n\nYou may look up load balancers and load balancer listeners by using one of the\nfollowing lookup methods:\n\n- `ApplicationLoadBalancer.fromlookup(options)` - Look up an application load\n balancer.\n- `ApplicationListener.fromLookup(options)` - Look up an application load\n balancer listener.\n- `NetworkLoadBalancer.fromLookup(options)` - Look up a network load balancer.\n- `NetworkListener.fromLookup(options)` - Look up a network load balancer\n listener.\n\n### Load Balancer lookup options\n\nYou may look up a load balancer by ARN or by associated tags. When you look a\nload balancer up by ARN, that load balancer will be returned unless CDK detects\nthat the load balancer is of the wrong type. When you look up a load balancer by\ntags, CDK will return the load balancer matching all specified tags. If more\nthan one load balancer matches, CDK will throw an error requesting that you\nprovide more specific criteria.\n\n**Look up a Application Load Balancer by ARN**\n\n```ts\nconst loadBalancer = ApplicationLoadBalancer.fromLookup(stack, 'ALB', {\n loadBalancerArn: YOUR_ALB_ARN,\n});\n```\n\n**Look up an Application Load Balancer by tags**\n\n```ts\nconst loadBalancer = ApplicationLoadBalancer.fromLookup(stack, 'ALB', {\n loadBalancerTags: {\n // Finds a load balancer matching all tags.\n some: 'tag',\n someother: 'tag',\n },\n});\n```\n\n## Load Balancer Listener lookup options\n\nYou may look up a load balancer listener by the following criteria:\n\n- Associated load balancer ARN\n- Associated load balancer tags\n- Listener ARN\n- Listener port\n- Listener protocol\n\nThe lookup method will return the matching listener. If more than one listener\nmatches, CDK will throw an error requesting that you specify additional\ncriteria.\n\n**Look up a Listener by associated Load Balancer, Port, and Protocol**\n\n```ts\nconst listener = ApplicationListener.fromLookup(stack, 'ALBListener', {\n loadBalancerArn: YOUR_ALB_ARN,\n listenerProtocol: ApplicationProtocol.HTTPS,\n listenerPort: 443,\n});\n```\n\n**Look up a Listener by associated Load Balancer Tag, Port, and Protocol**\n\n```ts\nconst listener = ApplicationListener.fromLookup(stack, 'ALBListener', {\n loadBalancerTags: {\n Cluster: 'MyClusterName',\n },\n listenerProtocol: ApplicationProtocol.HTTPS,\n listenerPort: 443,\n});\n```\n\n**Look up a Network Listener by associated Load Balancer Tag, Port, and Protocol**\n\n```ts\nconst listener = NetworkListener.fromLookup(stack, 'ALBListener', {\n loadBalancerTags: {\n Cluster: 'MyClusterName',\n },\n listenerProtocol: Protocol.TCP,\n listenerPort: 12345,\n});\n```\n"
|
|
1421
|
-
},
|
|
1422
1006
|
"targets": {
|
|
1423
1007
|
"dotnet": {
|
|
1424
1008
|
"namespace": "Amazon.CDK.AWS.ElasticLoadBalancingV2"
|
|
@@ -1432,13 +1016,6 @@
|
|
|
1432
1016
|
}
|
|
1433
1017
|
},
|
|
1434
1018
|
"aws-cdk-lib.aws_elasticloadbalancingv2_actions": {
|
|
1435
|
-
"locationInModule": {
|
|
1436
|
-
"filename": "lib/index.ts",
|
|
1437
|
-
"line": 76
|
|
1438
|
-
},
|
|
1439
|
-
"readme": {
|
|
1440
|
-
"markdown": "# Actions for AWS Elastic Load Balancing V2\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis package contains integration actions for ELBv2. See the README of the `@aws-cdk/aws-elasticloadbalancingv2` library.\n\n## Cognito\n\nELB allows for requests to be authenticated against a Cognito user pool using\nthe `AuthenticateCognitoAction`. For details on the setup's requirements,\nread [Prepare to use Amazon\nCognito](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/listener-authenticate-users.html#cognito-requirements).\nHere's an example:\n\n[Example of using AuthenticateCognitoAction](test/integ.cognito.lit.ts)\n\n> NOTE: this example seems incomplete, I was not able to get the redirect back to the\nLoad Balancer after authentication working. Would love some pointers on what a full working\nsetup actually looks like!\n"
|
|
1441
|
-
},
|
|
1442
1019
|
"targets": {
|
|
1443
1020
|
"dotnet": {
|
|
1444
1021
|
"namespace": "Amazon.CDK.AWS.ElasticLoadBalancingV2.Actions"
|
|
@@ -1452,13 +1029,6 @@
|
|
|
1452
1029
|
}
|
|
1453
1030
|
},
|
|
1454
1031
|
"aws-cdk-lib.aws_elasticloadbalancingv2_targets": {
|
|
1455
|
-
"locationInModule": {
|
|
1456
|
-
"filename": "lib/index.ts",
|
|
1457
|
-
"line": 77
|
|
1458
|
-
},
|
|
1459
|
-
"readme": {
|
|
1460
|
-
"markdown": "# Targets for AWS Elastic Load Balancing V2\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis package contains targets for ELBv2. See the README of the `@aws-cdk/aws-elasticloadbalancingv2` library.\n"
|
|
1461
|
-
},
|
|
1462
1032
|
"targets": {
|
|
1463
1033
|
"dotnet": {
|
|
1464
1034
|
"namespace": "Amazon.CDK.AWS.ElasticLoadBalancingV2.Targets"
|
|
@@ -1472,13 +1042,6 @@
|
|
|
1472
1042
|
}
|
|
1473
1043
|
},
|
|
1474
1044
|
"aws-cdk-lib.aws_elasticsearch": {
|
|
1475
|
-
"locationInModule": {
|
|
1476
|
-
"filename": "lib/index.ts",
|
|
1477
|
-
"line": 78
|
|
1478
|
-
},
|
|
1479
|
-
"readme": {
|
|
1480
|
-
"markdown": "# Amazon Elasticsearch Service Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\nFeatures | Stability\n-----------------------------------|----------------------------------------------------------------\nCFN Resources | \nHigher level constructs for Domain | \n\n> **CFN Resources:** All classes with the `Cfn` prefix in this module ([CFN Resources]) are always\n> stable and safe to use.\n>\n> [CFN Resources]: https://docs.aws.amazon.com/cdk/latest/guide/constructs.html#constructs_lib\n\n<!-- -->\n\n> **Stable:** Higher level constructs in this module that are marked stable will not undergo any\n> breaking changes. They will strictly follow the [Semantic Versioning](https://semver.org/) model.\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Quick start\n\nCreate a development cluster by simply specifying the version:\n\n```ts\nimport { aws_elasticsearch as es } from 'aws-cdk-lib';\n\nconst devDomain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_1,\n});\n```\n\nTo perform version upgrades without replacing the entire domain, specify the `enableVersionUpgrade` property.\n\n```ts\nimport { aws_elasticsearch as es } from 'aws-cdk-lib';\n\nconst devDomain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_10,\n enableVersionUpgrade: true // defaults to false\n});\n```\n\nCreate a production grade cluster by also specifying things like capacity and az distribution\n\n```ts\nconst prodDomain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_1,\n capacity: {\n masterNodes: 5,\n dataNodes: 20\n },\n ebs: {\n volumeSize: 20\n },\n zoneAwareness: {\n availabilityZoneCount: 3\n },\n logging: {\n slowSearchLogEnabled: true,\n appLogEnabled: true,\n slowIndexLogEnabled: true,\n },\n});\n```\n\nThis creates an Elasticsearch cluster and automatically sets up log groups for\nlogging the domain logs and slow search logs.\n\n## A note about SLR\n\nSome cluster configurations (e.g VPC access) require the existence of the [`AWSServiceRoleForAmazonElasticsearchService`](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/slr-es.html) Service-Linked Role.\n\nWhen performing such operations via the AWS Console, this SLR is created automatically when needed. However, this is not the behavior when using CloudFormation. If an SLR is needed, but doesn't exist, you will encounter a failure message simlar to:\n\n```console\nBefore you can proceed, you must enable a service-linked role to give Amazon ES...\n```\n\nTo resolve this, you need to [create](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role) the SLR. We recommend using the AWS CLI:\n\n```console\naws iam create-service-linked-role --aws-service-name es.amazonaws.com\n```\n\nYou can also create it using the CDK, **but note that only the first application deploying this will succeed**:\n\n```ts\nconst slr = new iam.CfnServiceLinkedRole(this, 'ElasticSLR', {\n awsServiceName: 'es.amazonaws.com'\n});\n```\n\n## Importing existing domains\n\nTo import an existing domain into your CDK application, use the `Domain.fromDomainEndpoint` factory method.\nThis method accepts a domain endpoint of an already existing domain:\n\n```ts\nconst domainEndpoint = 'https://my-domain-jcjotrt6f7otem4sqcwbch3c4u.us-east-1.es.amazonaws.com';\nconst domain = Domain.fromDomainEndpoint(this, 'ImportedDomain', domainEndpoint);\n```\n\n## Permissions\n\n### IAM\n\nHelper methods also exist for managing access to the domain.\n\n```ts\nconst lambda = new lambda.Function(this, 'Lambda', { /* ... */ });\n\n// Grant write access to the app-search index\ndomain.grantIndexWrite('app-search', lambda);\n\n// Grant read access to the 'app-search/_search' path\ndomain.grantPathRead('app-search/_search', lambda);\n```\n\n## Encryption\n\nThe domain can also be created with encryption enabled:\n\n```ts\nconst domain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_4,\n ebs: {\n volumeSize: 100,\n volumeType: EbsDeviceVolumeType.GENERAL_PURPOSE_SSD,\n },\n nodeToNodeEncryption: true,\n encryptionAtRest: {\n enabled: true,\n },\n});\n```\n\nThis sets up the domain with node to node encryption and encryption at\nrest. You can also choose to supply your own KMS key to use for encryption at\nrest.\n\n## VPC Support\n\nElasticsearch domains can be placed inside a VPC, providing a secure communication between Amazon ES and other services within the VPC without the need for an internet gateway, NAT device, or VPN connection.\n\n> Visit [VPC Support for Amazon Elasticsearch Service Domains](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html) for more details.\n\n```ts\nconst vpc = new ec2.Vpc(this, 'Vpc');\nconst domainProps: es.DomainProps = {\n version: es.ElasticsearchVersion.V7_1,\n removalPolicy: RemovalPolicy.DESTROY,\n vpc,\n // must be enabled since our VPC contains multiple private subnets.\n zoneAwareness: {\n enabled: true,\n },\n capacity: {\n // must be an even number since the default az count is 2.\n dataNodes: 2,\n },\n};\nnew es.Domain(this, 'Domain', domainProps);\n```\n\nIn addition, you can use the `vpcSubnets` property to control which specific subnets will be used, and the `securityGroups` property to control\nwhich security groups will be attached to the domain. By default, CDK will select all *private* subnets in the VPC, and create one dedicated security group.\n\n## Metrics\n\nHelper methods exist to access common domain metrics for example:\n\n```ts\nconst freeStorageSpace = domain.metricFreeStorageSpace();\nconst masterSysMemoryUtilization = domain.metric('MasterSysMemoryUtilization');\n```\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Fine grained access control\n\nThe domain can also be created with a master user configured. The password can\nbe supplied or dynamically created if not supplied.\n\n```ts\nconst domain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_1,\n enforceHttps: true,\n nodeToNodeEncryption: true,\n encryptionAtRest: {\n enabled: true,\n },\n fineGrainedAccessControl: {\n masterUserName: 'master-user',\n },\n});\n\nconst masterUserPassword = domain.masterUserPassword;\n```\n\n## Using unsigned basic auth\n\nFor convenience, the domain can be configured to allow unsigned HTTP requests\nthat use basic auth. Unless the domain is configured to be part of a VPC this\nmeans anyone can access the domain using the configured master username and\npassword.\n\nTo enable unsigned basic auth access the domain is configured with an access\npolicy that allows anyonmous requests, HTTPS required, node to node encryption,\nencryption at rest and fine grained access control.\n\nIf the above settings are not set they will be configured as part of enabling\nunsigned basic auth. If they are set with conflicting values, an error will be\nthrown.\n\nIf no master user is configured a default master user is created with the\nusername `admin`.\n\nIf no password is configured a default master user password is created and\nstored in the AWS Secrets Manager as secret. The secret has the prefix\n`<domain id>MasterUser`.\n\n```ts\nconst domain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_1,\n useUnsignedBasicAuth: true,\n});\n\nconst masterUserPassword = domain.masterUserPassword;\n```\n\n\n\n## Audit logs\n\nAudit logs can be enabled for a domain, but only when fine grained access control is enabled.\n\n```ts\nconst domain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_1,\n enforceHttps: true,\n nodeToNodeEncryption: true,\n encryptionAtRest: {\n enabled: true,\n },\n fineGrainedAccessControl: {\n masterUserName: 'master-user',\n },\n logging: {\n auditLogEnabled: true,\n slowSearchLogEnabled: true,\n appLogEnabled: true,\n slowIndexLogEnabled: true,\n },\n});\n```\n\n## UltraWarm\n\nUltraWarm nodes can be enabled to provide a cost-effective way to store large amounts of read-only data.\n\n```ts\nconst domain = new es.Domain(this, 'Domain', {\n version: es.ElasticsearchVersion.V7_10,\n capacity: {\n masterNodes: 2,\n warmNodes: 2,\n warmInstanceType: 'ultrawarm1.medium.elasticsearch',\n },\n});\n```\n\n## Custom endpoint\n\nCustom endpoints can be configured to reach the ES domain under a custom domain name.\n\n```ts\nnew Domain(stack, 'Domain', {\n version: ElasticsearchVersion.V7_7,\n customEndpoint: {\n domainName: 'search.example.com',\n },\n});\n```\n\nIt is also possible to specify a custom certificate instead of the auto-generated one.\n\nAdditionally, an automatic CNAME-Record is created if a hosted zone is provided for the custom endpoint\n\n## Advanced options\n\n[Advanced options](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options) can used to configure additional options.\n\n```ts\nnew Domain(stack, 'Domain', {\n version: ElasticsearchVersion.V7_7,\n advancedOptions: {\n 'rest.action.multi.allow_explicit_index': 'false',\n 'indices.fielddata.cache.size': '25',\n 'indices.query.bool.max_clause_count': '2048',\n },\n});\n```\n"
|
|
1481
|
-
},
|
|
1482
1045
|
"targets": {
|
|
1483
1046
|
"dotnet": {
|
|
1484
1047
|
"namespace": "Amazon.CDK.AWS.Elasticsearch"
|
|
@@ -1492,10 +1055,6 @@
|
|
|
1492
1055
|
}
|
|
1493
1056
|
},
|
|
1494
1057
|
"aws-cdk-lib.aws_emr": {
|
|
1495
|
-
"locationInModule": {
|
|
1496
|
-
"filename": "lib/index.ts",
|
|
1497
|
-
"line": 79
|
|
1498
|
-
},
|
|
1499
1058
|
"targets": {
|
|
1500
1059
|
"dotnet": {
|
|
1501
1060
|
"namespace": "Amazon.CDK.AWS.EMR"
|
|
@@ -1509,10 +1068,6 @@
|
|
|
1509
1068
|
}
|
|
1510
1069
|
},
|
|
1511
1070
|
"aws-cdk-lib.aws_emrcontainers": {
|
|
1512
|
-
"locationInModule": {
|
|
1513
|
-
"filename": "lib/index.ts",
|
|
1514
|
-
"line": 80
|
|
1515
|
-
},
|
|
1516
1071
|
"targets": {
|
|
1517
1072
|
"dotnet": {
|
|
1518
1073
|
"namespace": "Amazon.CDK.AWS.EMRContainers"
|
|
@@ -1526,13 +1081,6 @@
|
|
|
1526
1081
|
}
|
|
1527
1082
|
},
|
|
1528
1083
|
"aws-cdk-lib.aws_events": {
|
|
1529
|
-
"locationInModule": {
|
|
1530
|
-
"filename": "lib/index.ts",
|
|
1531
|
-
"line": 81
|
|
1532
|
-
},
|
|
1533
|
-
"readme": {
|
|
1534
|
-
"markdown": "# Amazon EventBridge Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAmazon EventBridge delivers a near real-time stream of system events that\ndescribe changes in AWS resources. For example, an AWS CodePipeline emits the\n[State\nChange](https://docs.aws.amazon.com/eventbridge/latest/userguide/event-types.html#codepipeline-event-type)\nevent when the pipeline changes its state.\n\n* __Events__: An event indicates a change in your AWS environment. AWS resources\n can generate events when their state changes. For example, Amazon EC2\n generates an event when the state of an EC2 instance changes from pending to\n running, and Amazon EC2 Auto Scaling generates events when it launches or\n terminates instances. AWS CloudTrail publishes events when you make API calls.\n You can generate custom application-level events and publish them to\n EventBridge. You can also set up scheduled events that are generated on\n a periodic basis. For a list of services that generate events, and sample\n events from each service, see [EventBridge Event Examples From Each\n Supported\n Service](https://docs.aws.amazon.com/eventbridge/latest/userguide/event-types.html).\n* __Targets__: A target processes events. Targets can include Amazon EC2\n instances, AWS Lambda functions, Kinesis streams, Amazon ECS tasks, Step\n Functions state machines, Amazon SNS topics, Amazon SQS queues, Amazon CloudWatch LogGroups, and built-in\n targets. A target receives events in JSON format.\n* __Rules__: A rule matches incoming events and routes them to targets for\n processing. A single rule can route to multiple targets, all of which are\n processed in parallel. Rules are not processed in a particular order. This\n enables different parts of an organization to look for and process the events\n that are of interest to them. A rule can customize the JSON sent to the\n target, by passing only certain parts or by overwriting it with a constant.\n* __EventBuses__: An event bus can receive events from your own custom applications\n or it can receive events from applications and services created by AWS SaaS partners.\n See [Creating an Event Bus](https://docs.aws.amazon.com/eventbridge/latest/userguide/create-event-bus.html).\n\n## Rule\n\nThe `Rule` construct defines an EventBridge rule which monitors an\nevent based on an [event\npattern](https://docs.aws.amazon.com/eventbridge/latest/userguide/filtering-examples-structure.html)\nand invoke __event targets__ when the pattern is matched against a triggered\nevent. Event targets are objects that implement the `IRuleTarget` interface.\n\nNormally, you will use one of the `source.onXxx(name[, target[, options]]) ->\nRule` methods on the event source to define an event rule associated with\nthe specific activity. You can targets either via props, or add targets using\n`rule.addTarget`.\n\nFor example, to define an rule that triggers a CodeBuild project build when a\ncommit is pushed to the \"master\" branch of a CodeCommit repository:\n\n```ts\nconst onCommitRule = repo.onCommit('OnCommit', {\n target: new targets.CodeBuildProject(project),\n branches: ['master']\n});\n```\n\nYou can add additional targets, with optional [input\ntransformer](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_InputTransformer.html)\nusing `eventRule.addTarget(target[, input])`. For example, we can add a SNS\ntopic target which formats a human-readable message for the commit.\n\nFor example, this adds an SNS topic as a target:\n\n```ts\nonCommitRule.addTarget(new targets.SnsTopic(topic, {\n message: events.RuleTargetInput.fromText(\n `A commit was pushed to the repository ${codecommit.ReferenceEvent.repositoryName} on branch ${codecommit.ReferenceEvent.referenceName}`\n )\n}));\n```\n\nOr using an Object:\n\n```ts\nonCommitRule.addTarget(new targets.SnsTopic(topic, {\n message: events.RuleTargetInput.fromObject(\n {\n DataType: `custom_${events.EventField.fromPath('$.detail-type')}`\n }\n )\n}));\n```\n\n## Scheduling\n\nYou can configure a Rule to run on a schedule (cron or rate).\nRate must be specified in minutes, hours or days.\n\nThe following example runs a task every day at 4am:\n\n```ts\nimport { Rule, Schedule } from 'aws-cdk-lib/aws-events';\nimport { EcsTask } from 'aws-cdk-lib/aws-events-targets';\n...\n\nconst ecsTaskTarget = new EcsTask({ cluster, taskDefinition, role });\n\nnew Rule(this, 'ScheduleRule', {\n schedule: Schedule.cron({ minute: '0', hour: '4' }),\n targets: [ecsTaskTarget],\n});\n```\n\nIf you want to specify Fargate platform version, set `platformVersion` in EcsTask's props like the following example:\n\n```ts\nconst platformVersion = ecs.FargatePlatformVersion.VERSION1_4;\nconst ecsTaskTarget = new EcsTask({ cluster, taskDefinition, role, platformVersion });\n```\n\n## Event Targets\n\nThe `@aws-cdk/aws-events-targets` module includes classes that implement the `IRuleTarget`\ninterface for various AWS services.\n\nThe following targets are supported:\n\n* `targets.CodeBuildProject`: Start an AWS CodeBuild build\n* `targets.CodePipeline`: Start an AWS CodePipeline pipeline execution\n* `targets.EcsTask`: Start a task on an Amazon ECS cluster\n* `targets.LambdaFunction`: Invoke an AWS Lambda function\n* `targets.SnsTopic`: Publish into an SNS topic\n* `targets.SqsQueue`: Send a message to an Amazon SQS Queue\n* `targets.SfnStateMachine`: Trigger an AWS Step Functions state machine\n* `targets.BatchJob`: Queue an AWS Batch Job\n* `targets.AwsApi`: Make an AWS API call\n\n### Cross-account and cross-region targets\n\nIt's possible to have the source of the event and a target in separate AWS accounts and regions:\n\n```ts\nimport { App, Stack } from 'aws-cdk-lib';\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\nimport { aws_codecommit as codecommit } from 'aws-cdk-lib';\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\n\nconst app = new App();\n\nconst stack1 = new Stack(app, 'Stack1', { env: { account: account1, region: 'us-west-1' } });\nconst repo = new codecommit.Repository(stack1, 'Repository', {\n // ...\n});\n\nconst stack2 = new Stack(app, 'Stack2', { env: { account: account2, region: 'us-east-1' } });\nconst project = new codebuild.Project(stack2, 'Project', {\n // ...\n});\n\nrepo.onCommit('OnCommit', {\n target: new targets.CodeBuildProject(project),\n});\n```\n\nIn this situation, the CDK will wire the 2 accounts together:\n\n* It will generate a rule in the source stack with the event bus of the target account as the target\n* It will generate a rule in the target stack, with the provided target\n* It will generate a separate stack that gives the source account permissions to publish events\n to the event bus of the target account in the given region,\n and make sure its deployed before the source stack\n\nFor more information, see the\n[AWS documentation on cross-account events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-cross-account-event-delivery.html).\n\n## Archiving\n\nIt is possible to archive all or some events sent to an event bus. It is then possible to [replay these events](https://aws.amazon.com/blogs/aws/new-archive-and-replay-events-with-amazon-eventbridge/).\n\n```ts\nimport * as cdk from 'aws-cdk-lib';\n\nconst stack = new stack();\n\nconst bus = new EventBus(stack, 'bus', {\n eventBusName: 'MyCustomEventBus'\n});\n\nbus.archive('MyArchive', {\n archiveName: 'MyCustomEventBusArchive',\n description: 'MyCustomerEventBus Archive',\n eventPattern: {\n account: [stack.account],\n },\n retention: cdk.Duration.days(365),\n});\n```\n\n## Granting PutEvents to an existing EventBus\n\nTo import an existing EventBus into your CDK application, use `EventBus.fromEventBusArn`, `EventBus.fromEventBusAttributes`\nor `EventBus.fromEventBusName` factory method.\n\nThen, you can use the `grantPutEventsTo` method to grant `event:PutEvents` to the eventBus.\n\n```ts\nconst eventBus = EventBus.fromEventBusArn(this, 'ImportedEventBus', 'arn:aws:events:us-east-1:111111111:event-bus/my-event-bus');\n\n// now you can just call methods on the eventbus\neventBus.grantPutEventsTo(lambdaFunction);\n```\n"
|
|
1535
|
-
},
|
|
1536
1084
|
"targets": {
|
|
1537
1085
|
"dotnet": {
|
|
1538
1086
|
"namespace": "Amazon.CDK.AWS.Events"
|
|
@@ -1546,13 +1094,6 @@
|
|
|
1546
1094
|
}
|
|
1547
1095
|
},
|
|
1548
1096
|
"aws-cdk-lib.aws_events_targets": {
|
|
1549
|
-
"locationInModule": {
|
|
1550
|
-
"filename": "lib/index.ts",
|
|
1551
|
-
"line": 82
|
|
1552
|
-
},
|
|
1553
|
-
"readme": {
|
|
1554
|
-
"markdown": "# Event Targets for Amazon EventBridge\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library contains integration classes to send Amazon EventBridge to any\nnumber of supported AWS Services. Instances of these classes should be passed\nto the `rule.addTarget()` method.\n\nCurrently supported are:\n\n* [Start a CodeBuild build](#start-a-codebuild-build)\n* [Start a CodePipeline pipeline](#start-a-codepipeline-pipeline)\n* Run an ECS task\n* [Invoke a Lambda function](#invoke-a-lambda-function)\n* [Invoke a API Gateway REST API](#invoke-a-api-gateway-rest-api)\n* Publish a message to an SNS topic\n* Send a message to an SQS queue\n* [Start a StepFunctions state machine](#start-a-stepfunctions-state-machine)\n* [Queue a Batch job](#queue-a-batch-job)\n* Make an AWS API call\n* Put a record to a Kinesis stream\n* [Log an event into a LogGroup](#log-an-event-into-a-loggroup)\n* Put a record to a Kinesis Data Firehose stream\n* Put an event on an EventBridge bus\n\nSee the README of the `@aws-cdk/aws-events` library for more information on\nEventBridge.\n\n## Event retry policy and using dead-letter queues\n\nThe Codebuild, CodePipeline, Lambda, StepFunctions and LogGroup targets support attaching a [dead letter queue and setting retry policies](https://docs.aws.amazon.com/eventbridge/latest/userguide/rule-dlq.html). See the [lambda example](#invoke-a-lambda-function).\nUse [escape hatches](https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html) for the other target types.\n\n## Invoke a Lambda function\n\nUse the `LambdaFunction` target to invoke a lambda function.\n\nThe code snippet below creates an event rule with a Lambda function as a target\ntriggered for every events from `aws.ec2` source. You can optionally attach a\n[dead letter queue](https://docs.aws.amazon.com/eventbridge/latest/userguide/rule-dlq.html).\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\n\nconst fn = new lambda.Function(this, 'MyFunc', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromInline(`exports.handler = handler.toString()`),\n});\n\nconst rule = new events.Rule(this, 'rule', {\n eventPattern: {\n source: [\"aws.ec2\"],\n },\n});\n\nconst queue = new sqs.Queue(this, 'Queue');\n\nrule.addTarget(new targets.LambdaFunction(fn, {\n deadLetterQueue: queue, // Optional: add a dead letter queue\n maxEventAge: cdk.Duration.hours(2), // Otional: set the maxEventAge retry policy\n retryAttempts: 2, // Optional: set the max number of retry attempts\n}));\n```\n\n## Log an event into a LogGroup\n\nUse the `LogGroup` target to log your events in a CloudWatch LogGroup.\n\nFor example, the following code snippet creates an event rule with a CloudWatch LogGroup as a target.\nEvery events sent from the `aws.ec2` source will be sent to the CloudWatch LogGroup.\n\n```ts\nimport { aws_logs as logs } from 'aws-cdk-lib';\n\nconst logGroup = new logs.LogGroup(this, 'MyLogGroup', {\n logGroupName: 'MyLogGroup',\n});\n\nconst rule = new events.Rule(this, 'rule', {\n eventPattern: {\n source: [\"aws.ec2\"],\n },\n});\n\nrule.addTarget(new targets.CloudWatchLogGroup(logGroup));\n```\n\n## Start a CodeBuild build\n\nUse the `CodeBuildProject` target to trigger a CodeBuild project.\n\nThe code snippet below creates a CodeCommit repository that triggers a CodeBuild project\non commit to the master branch. You can optionally attach a\n[dead letter queue](https://docs.aws.amazon.com/eventbridge/latest/userguide/rule-dlq.html).\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\nimport { aws_codecommit as codecommit } from 'aws-cdk-lib';\n\nconst repo = new codecommit.Repository(this, 'MyRepo', {\n repositoryName: 'aws-cdk-codebuild-events',\n});\n\nconst project = new codebuild.Project(this, 'MyProject', {\n source: codebuild.Source.codeCommit({ repository: repo }),\n});\n\nconst deadLetterQueue = new sqs.Queue(this, 'DeadLetterQueue');\n\n// trigger a build when a commit is pushed to the repo\nconst onCommitRule = repo.onCommit('OnCommit', {\n target: new targets.CodeBuildProject(project, {\n deadLetterQueue: deadLetterQueue,\n }),\n branches: ['master'],\n});\n```\n\n## Start a CodePipeline pipeline\n\nUse the `CodePipeline` target to trigger a CodePipeline pipeline.\n\nThe code snippet below creates a CodePipeline pipeline that is triggered every hour\n\n```ts\nimport { aws_codepipeline as codepipeline } from 'aws-cdk-lib';\n\nconst pipeline = new codepipeline.Pipeline(this, 'Pipeline');\n\nconst rule = new events.Rule(this, 'Rule', {\n schedule: events.Schedule.expression('rate(1 hour)'),\n});\n\nrule.addTarget(new targets.CodePipeline(pipeline));\n```\n\n## Start a StepFunctions state machine\n\nUse the `SfnStateMachine` target to trigger a State Machine.\n\nThe code snippet below creates a Simple StateMachine that is triggered every minute with a\ndummy object as input.\nYou can optionally attach a\n[dead letter queue](https://docs.aws.amazon.com/eventbridge/latest/userguide/rule-dlq.html)\nto the target.\n\n```ts\nimport { aws_iam as iam } from 'aws-cdk-lib';\nimport { aws_stepfunctions as sfn } from 'aws-cdk-lib';\n\nconst rule = new events.Rule(this, 'Rule', {\n schedule: events.Schedule.rate(cdk.Duration.minutes(1)),\n});\n\nconst dlq = new sqs.Queue(this, 'DeadLetterQueue');\n\nconst role = new iam.Role(this, 'Role', {\n assumedBy: new iam.ServicePrincipal('events.amazonaws.com'),\n});\nconst stateMachine = new sfn.StateMachine(this, 'SM', {\n definition: new sfn.Wait(this, 'Hello', { time: sfn.WaitTime.duration(cdk.Duration.seconds(10)) }),\n role,\n});\n\nrule.addTarget(new targets.SfnStateMachine(stateMachine, {\n input: events.RuleTargetInput.fromObject({ SomeParam: 'SomeValue' }),\n deadLetterQueue: dlq,\n}));\n```\n\n## Queue a Batch job\n\nUse the `BatchJob` target to queue a Batch job.\n\nThe code snippet below creates a Simple JobQueue that is triggered every hour with a\ndummy object as input.\nYou can optionally attach a\n[dead letter queue](https://docs.aws.amazon.com/eventbridge/latest/userguide/rule-dlq.html)\nto the target.\n\n```ts\nimport { aws_batch as batch } from 'aws-cdk-lib';\nimport { ContainerImage } from 'aws-cdk-lib/aws-ecs';\n\nconst jobQueue = new batch.JobQueue(this, 'MyQueue', {\n computeEnvironments: [\n {\n computeEnvironment: new batch.ComputeEnvironment(this, 'ComputeEnvironment', {\n managed: false,\n }),\n order: 1,\n },\n ],\n});\n\nconst jobDefinition = new batch.JobDefinition(this, 'MyJob', {\n container: {\n image: ContainerImage.fromRegistry('test-repo'),\n },\n});\n\nconst queue = new sqs.Queue(this, 'Queue');\n\nconst rule = new events.Rule(this, 'Rule', {\n schedule: events.Schedule.rate(cdk.Duration.hours(1)),\n});\n\nrule.addTarget(new targets.BatchJob(\n jobQueue.jobQueueArn,\n jobQueue,\n jobDefinition.jobDefinitionArn,\n jobDefinition, {\n deadLetterQueue: queue,\n event: events.RuleTargetInput.fromObject({ SomeParam: 'SomeValue' }),\n retryAttempts: 2,\n maxEventAge: cdk.Duration.hours(2),\n },\n));\n```\n\n## Invoke a API Gateway REST API\n\nUse the `ApiGateway` target to trigger a REST API.\n\nThe code snippet below creates a Api Gateway REST API that is invoked every hour.\n\n```ts\nimport { aws_apigateway as api } from 'aws-cdk-lib';\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\n\nconst rule = new events.Rule(this, 'Rule', {\n schedule: events.Schedule.rate(cdk.Duration.minutes(1)),\n});\n\nconst fn = new lambda.Function( this, 'MyFunc', {\n handler: 'index.handler',\n runtime: lambda.Runtime.NODEJS_12_X,\n code: lambda.Code.fromInline( 'exports.handler = e => {}' ),\n} );\n\nconst restApi = new api.LambdaRestApi( this, 'MyRestAPI', { handler: fn } );\n\nconst dlq = new sqs.Queue(this, 'DeadLetterQueue');\n\nrule.addTarget(\n new targets.ApiGateway( restApi, {\n path: '/*/test',\n method: 'GET',\n stage: 'prod',\n pathParameterValues: ['path-value'],\n headerParameters: {\n Header1: 'header1',\n },\n queryStringParameters: {\n QueryParam1: 'query-param-1',\n },\n deadLetterQueue: dlq\n } ),\n)\n```\n"
|
|
1555
|
-
},
|
|
1556
1097
|
"targets": {
|
|
1557
1098
|
"dotnet": {
|
|
1558
1099
|
"namespace": "Amazon.CDK.AWS.Events.Targets"
|
|
@@ -1566,10 +1107,6 @@
|
|
|
1566
1107
|
}
|
|
1567
1108
|
},
|
|
1568
1109
|
"aws-cdk-lib.aws_eventschemas": {
|
|
1569
|
-
"locationInModule": {
|
|
1570
|
-
"filename": "lib/index.ts",
|
|
1571
|
-
"line": 83
|
|
1572
|
-
},
|
|
1573
1110
|
"targets": {
|
|
1574
1111
|
"dotnet": {
|
|
1575
1112
|
"namespace": "Amazon.CDK.AWS.EventSchemas"
|
|
@@ -1583,10 +1120,6 @@
|
|
|
1583
1120
|
}
|
|
1584
1121
|
},
|
|
1585
1122
|
"aws-cdk-lib.aws_finspace": {
|
|
1586
|
-
"locationInModule": {
|
|
1587
|
-
"filename": "lib/index.ts",
|
|
1588
|
-
"line": 84
|
|
1589
|
-
},
|
|
1590
1123
|
"targets": {
|
|
1591
1124
|
"dotnet": {
|
|
1592
1125
|
"namespace": "Amazon.CDK.AWS.FinSpace"
|
|
@@ -1600,10 +1133,6 @@
|
|
|
1600
1133
|
}
|
|
1601
1134
|
},
|
|
1602
1135
|
"aws-cdk-lib.aws_fis": {
|
|
1603
|
-
"locationInModule": {
|
|
1604
|
-
"filename": "lib/index.ts",
|
|
1605
|
-
"line": 85
|
|
1606
|
-
},
|
|
1607
1136
|
"targets": {
|
|
1608
1137
|
"dotnet": {
|
|
1609
1138
|
"namespace": "Amazon.CDK.AWS.FIS"
|
|
@@ -1617,10 +1146,6 @@
|
|
|
1617
1146
|
}
|
|
1618
1147
|
},
|
|
1619
1148
|
"aws-cdk-lib.aws_fms": {
|
|
1620
|
-
"locationInModule": {
|
|
1621
|
-
"filename": "lib/index.ts",
|
|
1622
|
-
"line": 86
|
|
1623
|
-
},
|
|
1624
1149
|
"targets": {
|
|
1625
1150
|
"dotnet": {
|
|
1626
1151
|
"namespace": "Amazon.CDK.AWS.FMS"
|
|
@@ -1634,10 +1159,6 @@
|
|
|
1634
1159
|
}
|
|
1635
1160
|
},
|
|
1636
1161
|
"aws-cdk-lib.aws_frauddetector": {
|
|
1637
|
-
"locationInModule": {
|
|
1638
|
-
"filename": "lib/index.ts",
|
|
1639
|
-
"line": 87
|
|
1640
|
-
},
|
|
1641
1162
|
"targets": {
|
|
1642
1163
|
"dotnet": {
|
|
1643
1164
|
"namespace": "Amazon.CDK.AWS.FraudDetector"
|
|
@@ -1651,13 +1172,6 @@
|
|
|
1651
1172
|
}
|
|
1652
1173
|
},
|
|
1653
1174
|
"aws-cdk-lib.aws_fsx": {
|
|
1654
|
-
"locationInModule": {
|
|
1655
|
-
"filename": "lib/index.ts",
|
|
1656
|
-
"line": 88
|
|
1657
|
-
},
|
|
1658
|
-
"readme": {
|
|
1659
|
-
"markdown": "# Amazon FSx Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n[Amazon FSx](https://docs.aws.amazon.com/fsx/?id=docs_gateway) provides fully managed third-party file systems with the\nnative compatibility and feature sets for workloads such as Microsoft Windows–based storage, high-performance computing,\nmachine learning, and electronic design automation.\n\nAmazon FSx supports two file system types: [Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/index.html) and\n[Windows](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/index.html) File Server.\n\n## FSx for Lustre\n\nAmazon FSx for Lustre makes it easy and cost-effective to launch and run the popular, high-performance Lustre file\nsystem. You use Lustre for workloads where speed matters, such as machine learning, high performance computing (HPC),\nvideo processing, and financial modeling.\n\nThe open-source Lustre file system is designed for applications that require fast storage—where you want your storage\nto keep up with your compute. Lustre was built to solve the problem of quickly and cheaply processing the world's\never-growing datasets. It's a widely used file system designed for the fastest computers in the world. It provides\nsubmillisecond latencies, up to hundreds of GBps of throughput, and up to millions of IOPS. For more information on\nLustre, see the [Lustre website](http://lustre.org/).\n\nAs a fully managed service, Amazon FSx makes it easier for you to use Lustre for workloads where storage speed matters.\nAmazon FSx for Lustre eliminates the traditional complexity of setting up and managing Lustre file systems, enabling\nyou to spin up and run a battle-tested high-performance file system in minutes. It also provides multiple deployment\noptions so you can optimize cost for your needs.\n\nAmazon FSx for Lustre is POSIX-compliant, so you can use your current Linux-based applications without having to make\nany changes. Amazon FSx for Lustre provides a native file system interface and works as any file system does with your\nLinux operating system. It also provides read-after-write consistency and supports file locking.\n\n### Installation\n\nImport to your project:\n\n```ts\nimport { aws_fsx as fsx } from 'aws-cdk-lib';\n```\n\n### Basic Usage\n\nSetup required properties and create:\n\n```ts\nconst stack = new Stack(app, 'Stack');\nconst vpc = new Vpc(stack, 'VPC');\n\nconst fileSystem = new LustreFileSystem(stack, 'FsxLustreFileSystem', {\n lustreConfiguration: { deploymentType: LustreDeploymentType.SCRATCH_2 },\n storageCapacityGiB: 1200,\n vpc,\n vpcSubnet: vpc.privateSubnets[0]});\n```\n\n### Connecting\n\nTo control who can access the file system, use the `.connections` attribute. FSx has a fixed default port, so you don't\nneed to specify the port. This example allows an EC2 instance to connect to a file system:\n\n```ts\nfileSystem.connections.allowDefaultPortFrom(instance);\n```\n\n### Mounting\n\nThe LustreFileSystem Construct exposes both the DNS name of the file system as well as its mount name, which can be\nused to mount the file system on an EC2 instance. The following example shows how to bring up a file system and EC2\ninstance, and then use User Data to mount the file system on the instance at start-up:\n\n```ts\nconst app = new App();\nconst stack = new Stack(app, 'AwsCdkFsxLustre');\nconst vpc = new Vpc(stack, 'VPC');\n\nconst lustreConfiguration = {\n deploymentType: LustreDeploymentType.SCRATCH_2,\n};\nconst fs = new LustreFileSystem(stack, 'FsxLustreFileSystem', {\n lustreConfiguration,\n storageCapacityGiB: 1200,\n vpc,\n vpcSubnet: vpc.privateSubnets[0]});\n\nconst inst = new Instance(stack, 'inst', {\n instanceType: InstanceType.of(InstanceClass.T2, InstanceSize.LARGE),\n machineImage: new AmazonLinuxImage({\n generation: AmazonLinuxGeneration.AMAZON_LINUX_2,\n }),\n vpc,\n vpcSubnets: {\n subnetType: SubnetType.PUBLIC,\n },\n});\nfs.connections.allowDefaultPortFrom(inst);\n\n// Need to give the instance access to read information about FSx to determine the file system's mount name.\ninst.role.addManagedPolicy(ManagedPolicy.fromAwsManagedPolicyName('AmazonFSxReadOnlyAccess'));\n\nconst mountPath = '/mnt/fsx';\nconst dnsName = fs.dnsName;\nconst mountName = fs.mountName;\n\ninst.userData.addCommands(\n 'set -eux',\n 'yum update -y',\n 'amazon-linux-extras install -y lustre2.10',\n // Set up the directory to mount the file system to and change the owner to the AL2 default ec2-user.\n `mkdir -p ${mountPath}`,\n `chmod 777 ${mountPath}`,\n `chown ec2-user:ec2-user ${mountPath}`,\n // Set the file system up to mount automatically on start up and mount it.\n `echo \"${dnsName}@tcp:/${mountName} ${mountPath} lustre defaults,noatime,flock,_netdev 0 0\" >> /etc/fstab`,\n 'mount -a');\n```\n\n### Importing\n\nAn FSx for Lustre file system can be imported with `fromLustreFileSystemAttributes(stack, id, attributes)`. The\nfollowing example lays out how you could import the SecurityGroup a file system belongs to, use that to import the file\nsystem, and then also import the VPC the file system is in and add an EC2 instance to it, giving it access to the file\nsystem.\n\n```ts\nconst app = new App();\nconst stack = new Stack(app, 'AwsCdkFsxLustreImport');\n\nconst sg = SecurityGroup.fromSecurityGroupId(stack, 'FsxSecurityGroup', '{SECURITY-GROUP-ID}');\nconst fs = LustreFileSystem.fromLustreFileSystemAttributes(stack, 'FsxLustreFileSystem', {\n dnsName: '{FILE-SYSTEM-DNS-NAME}'\n fileSystemId: '{FILE-SYSTEM-ID}',\n securityGroup: sg\n});\n\nconst vpc = Vpc.fromVpcAttributes(stack, 'Vpc', {\n availabilityZones: ['us-west-2a', 'us-west-2b'],\n publicSubnetIds: ['{US-WEST-2A-SUBNET-ID}', '{US-WEST-2B-SUBNET-ID}'],\n vpcId: '{VPC-ID}'\n});\nconst inst = new Instance(stack, 'inst', {\n instanceType: InstanceType.of(InstanceClass.T2, InstanceSize.LARGE),\n machineImage: new AmazonLinuxImage({\n generation: AmazonLinuxGeneration.AMAZON_LINUX_2\n }),\n vpc,\n vpcSubnets: {\n subnetType: SubnetType.PUBLIC,\n }\n});\nfs.connections.allowDefaultPortFrom(inst);\n```\n\n## FSx for Windows File Server\n\nThe L2 construct for the FSx for Windows File Server has not yet been implemented. To instantiate an FSx for Windows\nfile system, the L1 constructs can be used as defined by CloudFormation.\n"
|
|
1660
|
-
},
|
|
1661
1175
|
"targets": {
|
|
1662
1176
|
"dotnet": {
|
|
1663
1177
|
"namespace": "Amazon.CDK.AWS.FSx"
|
|
@@ -1671,10 +1185,6 @@
|
|
|
1671
1185
|
}
|
|
1672
1186
|
},
|
|
1673
1187
|
"aws-cdk-lib.aws_gamelift": {
|
|
1674
|
-
"locationInModule": {
|
|
1675
|
-
"filename": "lib/index.ts",
|
|
1676
|
-
"line": 89
|
|
1677
|
-
},
|
|
1678
1188
|
"targets": {
|
|
1679
1189
|
"dotnet": {
|
|
1680
1190
|
"namespace": "Amazon.CDK.AWS.GameLift"
|
|
@@ -1688,13 +1198,6 @@
|
|
|
1688
1198
|
}
|
|
1689
1199
|
},
|
|
1690
1200
|
"aws-cdk-lib.aws_globalaccelerator": {
|
|
1691
|
-
"locationInModule": {
|
|
1692
|
-
"filename": "lib/index.ts",
|
|
1693
|
-
"line": 90
|
|
1694
|
-
},
|
|
1695
|
-
"readme": {
|
|
1696
|
-
"markdown": "# AWS::GlobalAccelerator Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Introduction\n\nAWS Global Accelerator (AGA) is a service that improves the availability and\nperformance of your applications with local or global users.\n\nIt intercepts your user's network connection at an edge location close to\nthem, and routes it to one of potentially multiple, redundant backends across\nthe more reliable and less congested AWS global network.\n\nAGA can be used to route traffic to Application Load Balancers, Network Load\nBalancers, EC2 Instances and Elastic IP Addresses.\n\nFor more information, see the [AWS Global\nAccelerator Developer Guide](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_GlobalAccelerator.html).\n\n## Example\n\nHere's an example that sets up a Global Accelerator for two Application Load\nBalancers in two different AWS Regions:\n\n```ts\nimport { aws_globalaccelerator as globalaccelerator } from 'aws-cdk-lib';\nimport { aws_globalaccelerator_endpoints as ga_endpoints } from 'aws-cdk-lib';\nimport { aws_elasticloadbalancingv2 as elbv2 } from 'aws-cdk-lib';\n\n// Create an Accelerator\nconst accelerator = new globalaccelerator.Accelerator(stack, 'Accelerator');\n\n// Create a Listener\nconst listener = accelerator.addListener('Listener', {\n portRanges: [\n { fromPort: 80 },\n { fromPort: 443 },\n ],\n});\n\n// Import the Load Balancers\nconst nlb1 = elbv2.NetworkLoadBalancer.fromNetworkLoadBalancerAttributes(stack, 'NLB1', {\n loadBalancerArn: 'arn:aws:elasticloadbalancing:us-west-2:111111111111:loadbalancer/app/my-load-balancer1/e16bef66805b',\n});\nconst nlb2 = elbv2.NetworkLoadBalancer.fromNetworkLoadBalancerAttributes(stack, 'NLB2', {\n loadBalancerArn: 'arn:aws:elasticloadbalancing:ap-south-1:111111111111:loadbalancer/app/my-load-balancer2/5513dc2ea8a1',\n});\n\n// Add one EndpointGroup for each Region we are targeting\nlistener.addEndpointGroup('Group1', {\n endpoints: [new ga_endpoints.NetworkLoadBalancerEndpoint(nlb1)],\n});\nlistener.addEndpointGroup('Group2', {\n // Imported load balancers automatically calculate their Region from the ARN.\n // If you are load balancing to other resources, you must also pass a `region`\n // parameter here.\n endpoints: [new ga_endpoints.NetworkLoadBalancerEndpoint(nlb2)],\n});\n```\n\n## Concepts\n\nThe **Accelerator** construct defines a Global Accelerator resource.\n\nAn Accelerator includes one or more **Listeners** that accepts inbound\nconnections on one or more ports.\n\nEach Listener has one or more **Endpoint Groups**, representing multiple\ngeographically distributed copies of your application. There is one Endpoint\nGroup per Region, and user traffic is routed to the closest Region by default.\n\nAn Endpoint Group consists of one or more **Endpoints**, which is where the\nuser traffic coming in on the Listener is ultimately sent. The Endpoint port\nused is the same as the traffic came in on at the Listener, unless overridden.\n\n## Types of Endpoints\n\nThere are 4 types of Endpoints, and they can be found in the\n`@aws-cdk/aws-globalaccelerator-endpoints` package:\n\n* Application Load Balancers\n* Network Load Balancers\n* EC2 Instances\n* Elastic IP Addresses\n\n### Application Load Balancers\n\n```ts\nconst alb = new elbv2.ApplicationLoadBalancer(...);\n\nlistener.addEndpointGroup('Group', {\n endpoints: [\n new ga_endpoints.ApplicationLoadBalancerEndpoint(alb, {\n weight: 128,\n preserveClientIp: true,\n }),\n ],\n});\n```\n\n### Network Load Balancers\n\n```ts\nconst nlb = new elbv2.NetworkLoadBalancer(...);\n\nlistener.addEndpointGroup('Group', {\n endpoints: [\n new ga_endpoints.NetworkLoadBalancerEndpoint(nlb, {\n weight: 128,\n }),\n ],\n});\n```\n\n### EC2 Instances\n\n```ts\nconst instance = new ec2.instance(...);\n\nlistener.addEndpointGroup('Group', {\n endpoints: [\n new ga_endpoints.InstanceEndpoint(instance, {\n weight: 128,\n preserveClientIp: true,\n }),\n ],\n});\n```\n\n### Elastic IP Addresses\n\n```ts\nconst eip = new ec2.CfnEIP(...);\n\nlistener.addEndpointGroup('Group', {\n endpoints: [\n new ga_endpoints.CfnEipEndpoint(eip, {\n weight: 128,\n }),\n ],\n});\n```\n\n## Client IP Address Preservation and Security Groups\n\nWhen using the `preserveClientIp` feature, AGA creates\n**Elastic Network Interfaces** (ENIs) in your AWS account, that are\nassociated with a Security Group AGA creates for you. You can use the\nsecurity group created by AGA as a source group in other security groups\n(such as those for EC2 instances or Elastic Load Balancers), if you want to\nrestrict incoming traffic to the AGA security group rules.\n\nAGA creates a specific security group called `GlobalAccelerator` for each VPC\nit has an ENI in (this behavior can not be changed). CloudFormation doesn't\nsupport referencing the security group created by AGA, but this construct\nlibrary comes with a custom resource that enables you to reference the AGA\nsecurity group.\n\nCall `endpointGroup.connectionsPeer()` to obtain a reference to the Security Group\nwhich you can use in connection rules. You must pass a reference to the VPC in whose\ncontext the security group will be looked up. Example:\n\n```ts\n// ...\n\n// Non-open ALB\nconst alb = new elbv2.ApplicationLoadBalancer(stack, 'ALB', { /* ... */ });\n\nconst endpointGroup = listener.addEndpointGroup('Group', {\n endpoints: [\n new ga_endpoints.ApplicationLoadBalancerEndpoint(alb, {\n preserveClientIps: true,\n })],\n ],\n});\n\n// Remember that there is only one AGA security group per VPC.\nconst agaSg = endpointGroup.connectionsPeer('GlobalAcceleratorSG', vpc);\n\n// Allow connections from the AGA to the ALB\nalb.connections.allowFrom(agaSg, Port.tcp(443));\n```\n"
|
|
1697
|
-
},
|
|
1698
1201
|
"targets": {
|
|
1699
1202
|
"dotnet": {
|
|
1700
1203
|
"namespace": "Amazon.CDK.AWS.GlobalAccelerator"
|
|
@@ -1708,13 +1211,6 @@
|
|
|
1708
1211
|
}
|
|
1709
1212
|
},
|
|
1710
1213
|
"aws-cdk-lib.aws_globalaccelerator_endpoints": {
|
|
1711
|
-
"locationInModule": {
|
|
1712
|
-
"filename": "lib/index.ts",
|
|
1713
|
-
"line": 91
|
|
1714
|
-
},
|
|
1715
|
-
"readme": {
|
|
1716
|
-
"markdown": "# Endpoints for AWS Global Accelerator\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library contains integration classes to reference endpoints in AWS\nGlobal Accelerator. Instances of these classes should be passed to the\n`endpointGroup.addEndpoint()` method.\n\nSee the README of the `@aws-cdk/aws-globalaccelerator` library for more information on\nAWS Global Accelerator, and examples of all the integration classes available in\nthis module.\n"
|
|
1717
|
-
},
|
|
1718
1214
|
"targets": {
|
|
1719
1215
|
"dotnet": {
|
|
1720
1216
|
"namespace": "Amazon.CDK.AWS.GlobalAccelerator.Endpoints"
|
|
@@ -1728,10 +1224,6 @@
|
|
|
1728
1224
|
}
|
|
1729
1225
|
},
|
|
1730
1226
|
"aws-cdk-lib.aws_glue": {
|
|
1731
|
-
"locationInModule": {
|
|
1732
|
-
"filename": "lib/index.ts",
|
|
1733
|
-
"line": 92
|
|
1734
|
-
},
|
|
1735
1227
|
"targets": {
|
|
1736
1228
|
"dotnet": {
|
|
1737
1229
|
"namespace": "Amazon.CDK.AWS.Glue"
|
|
@@ -1745,10 +1237,6 @@
|
|
|
1745
1237
|
}
|
|
1746
1238
|
},
|
|
1747
1239
|
"aws-cdk-lib.aws_greengrass": {
|
|
1748
|
-
"locationInModule": {
|
|
1749
|
-
"filename": "lib/index.ts",
|
|
1750
|
-
"line": 93
|
|
1751
|
-
},
|
|
1752
1240
|
"targets": {
|
|
1753
1241
|
"dotnet": {
|
|
1754
1242
|
"namespace": "Amazon.CDK.AWS.Greengrass"
|
|
@@ -1762,10 +1250,6 @@
|
|
|
1762
1250
|
}
|
|
1763
1251
|
},
|
|
1764
1252
|
"aws-cdk-lib.aws_greengrassv2": {
|
|
1765
|
-
"locationInModule": {
|
|
1766
|
-
"filename": "lib/index.ts",
|
|
1767
|
-
"line": 94
|
|
1768
|
-
},
|
|
1769
1253
|
"targets": {
|
|
1770
1254
|
"dotnet": {
|
|
1771
1255
|
"namespace": "Amazon.CDK.AWS.GreengrassV2"
|
|
@@ -1779,10 +1263,6 @@
|
|
|
1779
1263
|
}
|
|
1780
1264
|
},
|
|
1781
1265
|
"aws-cdk-lib.aws_groundstation": {
|
|
1782
|
-
"locationInModule": {
|
|
1783
|
-
"filename": "lib/index.ts",
|
|
1784
|
-
"line": 95
|
|
1785
|
-
},
|
|
1786
1266
|
"targets": {
|
|
1787
1267
|
"dotnet": {
|
|
1788
1268
|
"namespace": "Amazon.CDK.AWS.GroundStation"
|
|
@@ -1796,10 +1276,6 @@
|
|
|
1796
1276
|
}
|
|
1797
1277
|
},
|
|
1798
1278
|
"aws-cdk-lib.aws_guardduty": {
|
|
1799
|
-
"locationInModule": {
|
|
1800
|
-
"filename": "lib/index.ts",
|
|
1801
|
-
"line": 96
|
|
1802
|
-
},
|
|
1803
1279
|
"targets": {
|
|
1804
1280
|
"dotnet": {
|
|
1805
1281
|
"namespace": "Amazon.CDK.AWS.GuardDuty"
|
|
@@ -1813,13 +1289,6 @@
|
|
|
1813
1289
|
}
|
|
1814
1290
|
},
|
|
1815
1291
|
"aws-cdk-lib.aws_iam": {
|
|
1816
|
-
"locationInModule": {
|
|
1817
|
-
"filename": "lib/index.ts",
|
|
1818
|
-
"line": 97
|
|
1819
|
-
},
|
|
1820
|
-
"readme": {
|
|
1821
|
-
"markdown": "# AWS Identity and Access Management Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nDefine a role and add permissions to it. This will automatically create and\nattach an IAM policy to the role:\n\n[attaching permissions to role](test/example.role.lit.ts)\n\nDefine a policy and attach it to groups, users and roles. Note that it is possible to attach\nthe policy either by calling `xxx.attachInlinePolicy(policy)` or `policy.attachToXxx(xxx)`.\n\n[attaching policies to user and group](test/example.attaching.lit.ts)\n\nManaged policies can be attached using `xxx.addManagedPolicy(ManagedPolicy.fromAwsManagedPolicyName(policyName))`:\n\n[attaching managed policies](test/example.managedpolicy.lit.ts)\n\n## Granting permissions to resources\n\nMany of the AWS CDK resources have `grant*` methods that allow you to grant other resources access to that resource. As an example, the following code gives a Lambda function write permissions (Put, Update, Delete) to a DynamoDB table.\n\n```ts\nconst fn = new lambda.Function(this, 'Function', functionProps);\nconst table = new dynamodb.Table(this, 'Table', tableProps);\n\ntable.grantWriteData(fn);\n```\n\nThe more generic `grant` method allows you to give specific permissions to a resource:\n\n```ts\nconst fn = new lambda.Function(this, 'Function', functionProps);\nconst table = new dynamodb.Table(this, 'Table', tableProps);\n\ntable.grant(fn, 'dynamodb:PutItem');\n```\n\nThe `grant*` methods accept an `IGrantable` object. This interface is implemented by IAM principlal resources (groups, users and roles) and resources that assume a role such as a Lambda function, EC2 instance or a Codebuild project.\n\nYou can find which `grant*` methods exist for a resource in the [AWS CDK API Reference](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-construct-library.html).\n\n## Roles\n\nMany AWS resources require *Roles* to operate. These Roles define the AWS API\ncalls an instance or other AWS service is allowed to make.\n\nCreating Roles and populating them with the right permissions *Statements* is\na necessary but tedious part of setting up AWS infrastructure. In order to\nhelp you focus on your business logic, CDK will take care of creating\nroles and populating them with least-privilege permissions automatically.\n\nAll constructs that require Roles will create one for you if don't specify\none at construction time. Permissions will be added to that role\nautomatically if you associate the construct with other constructs from the\nAWS Construct Library (for example, if you tell an *AWS CodePipeline* to trigger\nan *AWS Lambda Function*, the Pipeline's Role will automatically get\n`lambda:InvokeFunction` permissions on that particular Lambda Function),\nor if you explicitly grant permissions using `grant` functions (see the\nprevious section).\n\n### Opting out of automatic permissions management\n\nYou may prefer to manage a Role's permissions yourself instead of having the\nCDK automatically manage them for you. This may happen in one of the\nfollowing cases:\n\n* You don't like the permissions that CDK automatically generates and\n want to substitute your own set.\n* The least-permissions policy that the CDK generates is becoming too\n big for IAM to store, and you need to add some wildcards to keep the\n policy size down.\n\nTo prevent constructs from updating your Role's policy, pass the object\nreturned by `myRole.withoutPolicyUpdates()` instead of `myRole` itself.\n\nFor example, to have an AWS CodePipeline *not* automatically add the required\npermissions to trigger the expected targets, do the following:\n\n```ts\nconst role = new iam.Role(this, 'Role', {\n assumedBy: new iam.ServicePrincipal('codepipeline.amazonaws.com'),\n // custom description if desired\n description: 'This is a custom role...',\n});\n\nnew codepipeline.Pipeline(this, 'Pipeline', {\n // Give the Pipeline an immutable view of the Role\n role: role.withoutPolicyUpdates(),\n});\n\n// You now have to manage the Role policies yourself\nrole.addToPolicy(new iam.PolicyStatement({\n actions: [/* whatever actions you want */],\n resources: [/* whatever resources you intend to touch */],\n}));\n```\n\n### Using existing roles\n\nIf there are Roles in your account that have already been created which you\nwould like to use in your CDK application, you can use `Role.fromRoleArn` to\nimport them, as follows:\n\n```ts\nconst role = iam.Role.fromRoleArn(this, 'Role', 'arn:aws:iam::123456789012:role/MyExistingRole', {\n // Set 'mutable' to 'false' to use the role as-is and prevent adding new\n // policies to it. The default is 'true', which means the role may be\n // modified as part of the deployment.\n mutable: false,\n});\n```\n\n## Configuring an ExternalId\n\nIf you need to create Roles that will be assumed by third parties, it is generally a good idea to [require an `ExternalId`\nto assume them](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html). Configuring\nan `ExternalId` works like this:\n\n[supplying an external ID](test/example.external-id.lit.ts)\n\n## Principals vs Identities\n\nWhen we say *Principal*, we mean an entity you grant permissions to. This\nentity can be an AWS Service, a Role, or something more abstract such as \"all\nusers in this account\" or even \"all users in this organization\". An\n*Identity* is an IAM representing a single IAM entity that can have\na policy attached, one of `Role`, `User`, or `Group`.\n\n## IAM Principals\n\nWhen defining policy statements as part of an AssumeRole policy or as part of a\nresource policy, statements would usually refer to a specific IAM principal\nunder `Principal`.\n\nIAM principals are modeled as classes that derive from the `iam.PolicyPrincipal`\nabstract class. Principal objects include principal type (string) and value\n(array of string), optional set of conditions and the action that this principal\nrequires when it is used in an assume role policy document.\n\nTo add a principal to a policy statement you can either use the abstract\n`statement.addPrincipal`, one of the concrete `addXxxPrincipal` methods:\n\n* `addAwsPrincipal`, `addArnPrincipal` or `new ArnPrincipal(arn)` for `{ \"AWS\": arn }`\n* `addAwsAccountPrincipal` or `new AccountPrincipal(accountId)` for `{ \"AWS\": account-arn }`\n* `addServicePrincipal` or `new ServicePrincipal(service)` for `{ \"Service\": service }`\n* `addAccountRootPrincipal` or `new AccountRootPrincipal()` for `{ \"AWS\": { \"Ref: \"AWS::AccountId\" } }`\n* `addCanonicalUserPrincipal` or `new CanonicalUserPrincipal(id)` for `{ \"CanonicalUser\": id }`\n* `addFederatedPrincipal` or `new FederatedPrincipal(federated, conditions, assumeAction)` for\n `{ \"Federated\": arn }` and a set of optional conditions and the assume role action to use.\n* `addAnyPrincipal` or `new AnyPrincipal` for `{ \"AWS\": \"*\" }`\n\nIf multiple principals are added to the policy statement, they will be merged together:\n\n```ts\nconst statement = new iam.PolicyStatement();\nstatement.addServicePrincipal('cloudwatch.amazonaws.com');\nstatement.addServicePrincipal('ec2.amazonaws.com');\nstatement.addArnPrincipal('arn:aws:boom:boom');\n```\n\nWill result in:\n\n```json\n{\n \"Principal\": {\n \"Service\": [ \"cloudwatch.amazonaws.com\", \"ec2.amazonaws.com\" ],\n \"AWS\": \"arn:aws:boom:boom\"\n }\n}\n```\n\nThe `CompositePrincipal` class can also be used to define complex principals, for example:\n\n```ts\nconst role = new iam.Role(this, 'MyRole', {\n assumedBy: new iam.CompositePrincipal(\n new iam.ServicePrincipal('ec2.amazonaws.com'),\n new iam.AccountPrincipal('1818188181818187272')\n )\n});\n```\n\nThe `PrincipalWithConditions` class can be used to add conditions to a\nprincipal, especially those that don't take a `conditions` parameter in their\nconstructor. The `principal.withConditions()` method can be used to create a\n`PrincipalWithConditions` from an existing principal, for example:\n\n```ts\nconst principal = new iam.AccountPrincipal('123456789000')\n .withConditions({ StringEquals: { foo: \"baz\" } });\n```\n\n> NOTE: If you need to define an IAM condition that uses a token (such as a\n> deploy-time attribute of another resource) in a JSON map key, use `CfnJson` to\n> render this condition. See [this test](./test/integ-condition-with-ref.ts) for\n> an example.\n\nThe `WebIdentityPrincipal` class can be used as a principal for web identities like\nCognito, Amazon, Google or Facebook, for example:\n\n```ts\nconst principal = new iam.WebIdentityPrincipal('cognito-identity.amazonaws.com')\n .withConditions({\n \"StringEquals\": { \"cognito-identity.amazonaws.com:aud\": \"us-east-2:12345678-abcd-abcd-abcd-123456\" },\n \"ForAnyValue:StringLike\": {\"cognito-identity.amazonaws.com:amr\": \"unauthenticated\"}\n });\n```\n\n## Parsing JSON Policy Documents\n\nThe `PolicyDocument.fromJson` and `PolicyStatement.fromJson` static methods can be used to parse JSON objects. For example:\n\n```ts\nconst policyDocument = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"FirstStatement\",\n \"Effect\": \"Allow\",\n \"Action\": [\"iam:ChangePassword\"],\n \"Resource\": \"*\"\n },\n {\n \"Sid\": \"SecondStatement\",\n \"Effect\": \"Allow\",\n \"Action\": \"s3:ListAllMyBuckets\",\n \"Resource\": \"*\"\n },\n {\n \"Sid\": \"ThirdStatement\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:List*\",\n \"s3:Get*\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::confidential-data\",\n \"arn:aws:s3:::confidential-data/*\"\n ],\n \"Condition\": {\"Bool\": {\"aws:MultiFactorAuthPresent\": \"true\"}}\n }\n ]\n};\n\nconst customPolicyDocument = iam.PolicyDocument.fromJson(policyDocument);\n\n// You can pass this document as an initial document to a ManagedPolicy\n// or inline Policy.\nconst newManagedPolicy = new ManagedPolicy(stack, 'MyNewManagedPolicy', {\n document: customPolicyDocument\n});\nconst newPolicy = new Policy(stack, 'MyNewPolicy', {\n document: customPolicyDocument\n});\n```\n\n## Permissions Boundaries\n\n[Permissions\nBoundaries](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html)\ncan be used as a mechanism to prevent privilege esclation by creating new\n`Role`s. Permissions Boundaries are a Managed Policy, attached to Roles or\nUsers, that represent the *maximum* set of permissions they can have. The\neffective set of permissions of a Role (or User) will be the intersection of\nthe Identity Policy and the Permissions Boundary attached to the Role (or\nUser). Permissions Boundaries are typically created by account\nAdministrators, and their use on newly created `Role`s will be enforced by\nIAM policies.\n\nIt is possible to attach Permissions Boundaries to all Roles created in a construct\ntree all at once:\n\n```ts\n// This imports an existing policy.\nconst boundary = iam.ManagedPolicy.fromManagedPolicyArn(this, 'Boundary', 'arn:aws:iam::123456789012:policy/boundary');\n\n// This creates a new boundary\nconst boundary2 = new iam.ManagedPolicy(this, 'Boundary2', {\n statements: [\n new iam.PolicyStatement({\n effect: iam.Effect.DENY,\n actions: ['iam:*'],\n resources: ['*'],\n }),\n ],\n});\n\n// Directly apply the boundary to a Role you create\niam.PermissionsBoundary.of(role).apply(boundary);\n\n// Apply the boundary to an Role that was implicitly created for you\niam.PermissionsBoundary.of(lambdaFunction).apply(boundary);\n\n// Apply the boundary to all Roles in a stack\niam.PermissionsBoundary.of(stack).apply(boundary);\n\n// Remove a Permissions Boundary that is inherited, for example from the Stack level\niam.PermissionsBoundary.of(customResource).clear();\n```\n\n## OpenID Connect Providers\n\nOIDC identity providers are entities in IAM that describe an external identity\nprovider (IdP) service that supports the [OpenID Connect] (OIDC) standard, such\nas Google or Salesforce. You use an IAM OIDC identity provider when you want to\nestablish trust between an OIDC-compatible IdP and your AWS account. This is\nuseful when creating a mobile app or web application that requires access to AWS\nresources, but you don't want to create custom sign-in code or manage your own\nuser identities. For more information about this scenario, see [About Web\nIdentity Federation] and the relevant documentation in the [Amazon Cognito\nIdentity Pools Developer Guide].\n\n[OpenID Connect]: http://openid.net/connect\n[About Web Identity Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html\n[Amazon Cognito Identity Pools Developer Guide]: https://docs.aws.amazon.com/cognito/latest/developerguide/open-id.html\n\nThe following examples defines an OpenID Connect provider. Two client IDs\n(audiences) are will be able to send authentication requests to\nhttps://openid/connect.\n\n```ts\nconst provider = new iam.OpenIdConnectProvider(this, 'MyProvider', {\n url: 'https://openid/connect',\n clientIds: [ 'myclient1', 'myclient2' ],\n});\n```\n\nYou can specify an optional list of `thumbprints`. If not specified, the\nthumbprint of the root certificate authority (CA) will automatically be obtained\nfrom the host as described\n[here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html).\n\nOnce you define an OpenID connect provider, you can use it with AWS services\nthat expect an IAM OIDC provider. For example, when you define an [Amazon\nCognito identity\npool](https://docs.aws.amazon.com/cognito/latest/developerguide/open-id.html)\nyou can reference the provider's ARN as follows:\n\n```ts\nnew cognito.CfnIdentityPool(this, 'IdentityPool', {\n openIdConnectProviderArns: [myProvider.openIdConnectProviderArn],\n // And the other properties for your identity pool\n allowUnauthenticatedIdentities,\n});\n```\n\nThe `OpenIdConnectPrincipal` class can be used as a principal used with a `OpenIdConnectProvider`, for example:\n\n```ts\nconst provider = new iam.OpenIdConnectProvider(this, 'MyProvider', {\n url: 'https://openid/connect',\n clientIds: [ 'myclient1', 'myclient2' ]\n});\nconst principal = new iam.OpenIdConnectPrincipal(provider);\n```\n\n## SAML provider\n\nAn IAM SAML 2.0 identity provider is an entity in IAM that describes an external\nidentity provider (IdP) service that supports the SAML 2.0 (Security Assertion\nMarkup Language 2.0) standard. You use an IAM identity provider when you want\nto establish trust between a SAML-compatible IdP such as Shibboleth or Active\nDirectory Federation Services and AWS, so that users in your organization can\naccess AWS resources. IAM SAML identity providers are used as principals in an\nIAM trust policy.\n\n```ts\nnew iam.SamlProvider(this, 'Provider', {\n metadataDocument: iam.SamlMetadataDocument.fromFile('/path/to/saml-metadata-document.xml'),\n});\n```\n\nThe `SamlPrincipal` class can be used as a principal with a `SamlProvider`:\n\n```ts\nconst provider = new iam.SamlProvider(this, 'Provider', {\n metadataDocument: iam.SamlMetadataDocument.fromFile('/path/to/saml-metadata-document.xml'),\n});\nconst principal = new iam.SamlPrincipal(provider, {\n StringEquals: {\n 'SAML:iss': 'issuer',\n },\n});\n```\n\nWhen creating a role for programmatic and AWS Management Console access, use the `SamlConsolePrincipal`\nclass:\n\n```ts\nconst provider = new iam.SamlProvider(this, 'Provider', {\n metadataDocument: iam.SamlMetadataDocument.fromFile('/path/to/saml-metadata-document.xml'),\n});\nnew iam.Role(this, 'Role', {\n assumedBy: new iam.SamlConsolePrincipal(provider),\n});\n```\n\n## Users\n\nIAM manages users for your AWS account. To create a new user:\n\n```ts\nconst user = new User(this, 'MyUser');\n```\n\nTo import an existing user by name [with path](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names):\n\n```ts\nconst user = User.fromUserName(stack, 'MyImportedUserByName', 'johnsmith');\n```\n\nTo import an existing user by ARN:\n\n```ts\nconst user = User.fromUserArn(this, 'MyImportedUserByArn', 'arn:aws:iam::123456789012:user/johnsmith');\n```\n\nTo import an existing user by attributes:\n\n```ts\nconst user = User.fromUserAttributes(stack, 'MyImportedUserByAttributes', {\n userArn: 'arn:aws:iam::123456789012:user/johnsmith',\n});\n```\n\nTo add a user to a group (both for a new and imported user/group):\n\n```ts\nconst user = new User(this, 'MyUser'); // or User.fromUserName(stack, 'User', 'johnsmith');\nconst group = new Group(this, 'MyGroup'); // or Group.fromGroupArn(stack, 'Group', 'arn:aws:iam::account-id:group/group-name');\n\nuser.addToGroup(group);\n// or\ngroup.addUser(user);\n```\n\n\n## Features\n\n * Policy name uniqueness is enforced. If two policies by the same name are attached to the same\n principal, the attachment will fail.\n * Policy names are not required - the CDK logical ID will be used and ensured to be unique.\n * Policies are validated during synthesis to ensure that they have actions, and that policies\n attached to IAM principals specify relevant resources, while policies attached to resources\n specify which IAM principals they apply to.\n"
|
|
1822
|
-
},
|
|
1823
1292
|
"targets": {
|
|
1824
1293
|
"dotnet": {
|
|
1825
1294
|
"namespace": "Amazon.CDK.AWS.IAM"
|
|
@@ -1833,10 +1302,6 @@
|
|
|
1833
1302
|
}
|
|
1834
1303
|
},
|
|
1835
1304
|
"aws-cdk-lib.aws_imagebuilder": {
|
|
1836
|
-
"locationInModule": {
|
|
1837
|
-
"filename": "lib/index.ts",
|
|
1838
|
-
"line": 98
|
|
1839
|
-
},
|
|
1840
1305
|
"targets": {
|
|
1841
1306
|
"dotnet": {
|
|
1842
1307
|
"namespace": "Amazon.CDK.AWS.ImageBuilder"
|
|
@@ -1850,10 +1315,6 @@
|
|
|
1850
1315
|
}
|
|
1851
1316
|
},
|
|
1852
1317
|
"aws-cdk-lib.aws_inspector": {
|
|
1853
|
-
"locationInModule": {
|
|
1854
|
-
"filename": "lib/index.ts",
|
|
1855
|
-
"line": 99
|
|
1856
|
-
},
|
|
1857
1318
|
"targets": {
|
|
1858
1319
|
"dotnet": {
|
|
1859
1320
|
"namespace": "Amazon.CDK.AWS.Inspector"
|
|
@@ -1867,10 +1328,6 @@
|
|
|
1867
1328
|
}
|
|
1868
1329
|
},
|
|
1869
1330
|
"aws-cdk-lib.aws_iot": {
|
|
1870
|
-
"locationInModule": {
|
|
1871
|
-
"filename": "lib/index.ts",
|
|
1872
|
-
"line": 100
|
|
1873
|
-
},
|
|
1874
1331
|
"targets": {
|
|
1875
1332
|
"dotnet": {
|
|
1876
1333
|
"namespace": "Amazon.CDK.AWS.IoT"
|
|
@@ -1884,10 +1341,6 @@
|
|
|
1884
1341
|
}
|
|
1885
1342
|
},
|
|
1886
1343
|
"aws-cdk-lib.aws_iot1click": {
|
|
1887
|
-
"locationInModule": {
|
|
1888
|
-
"filename": "lib/index.ts",
|
|
1889
|
-
"line": 101
|
|
1890
|
-
},
|
|
1891
1344
|
"targets": {
|
|
1892
1345
|
"dotnet": {
|
|
1893
1346
|
"namespace": "Amazon.CDK.AWS.IoT1Click"
|
|
@@ -1901,10 +1354,6 @@
|
|
|
1901
1354
|
}
|
|
1902
1355
|
},
|
|
1903
1356
|
"aws-cdk-lib.aws_iotanalytics": {
|
|
1904
|
-
"locationInModule": {
|
|
1905
|
-
"filename": "lib/index.ts",
|
|
1906
|
-
"line": 102
|
|
1907
|
-
},
|
|
1908
1357
|
"targets": {
|
|
1909
1358
|
"dotnet": {
|
|
1910
1359
|
"namespace": "Amazon.CDK.AWS.IoTAnalytics"
|
|
@@ -1918,10 +1367,6 @@
|
|
|
1918
1367
|
}
|
|
1919
1368
|
},
|
|
1920
1369
|
"aws-cdk-lib.aws_iotcoredeviceadvisor": {
|
|
1921
|
-
"locationInModule": {
|
|
1922
|
-
"filename": "lib/index.ts",
|
|
1923
|
-
"line": 103
|
|
1924
|
-
},
|
|
1925
1370
|
"targets": {
|
|
1926
1371
|
"dotnet": {
|
|
1927
1372
|
"namespace": "Amazon.CDK.AWS.IoTCoreDeviceAdvisor"
|
|
@@ -1935,10 +1380,6 @@
|
|
|
1935
1380
|
}
|
|
1936
1381
|
},
|
|
1937
1382
|
"aws-cdk-lib.aws_iotevents": {
|
|
1938
|
-
"locationInModule": {
|
|
1939
|
-
"filename": "lib/index.ts",
|
|
1940
|
-
"line": 104
|
|
1941
|
-
},
|
|
1942
1383
|
"targets": {
|
|
1943
1384
|
"dotnet": {
|
|
1944
1385
|
"namespace": "Amazon.CDK.AWS.IoTEvents"
|
|
@@ -1952,10 +1393,6 @@
|
|
|
1952
1393
|
}
|
|
1953
1394
|
},
|
|
1954
1395
|
"aws-cdk-lib.aws_iotfleethub": {
|
|
1955
|
-
"locationInModule": {
|
|
1956
|
-
"filename": "lib/index.ts",
|
|
1957
|
-
"line": 105
|
|
1958
|
-
},
|
|
1959
1396
|
"targets": {
|
|
1960
1397
|
"dotnet": {
|
|
1961
1398
|
"namespace": "Amazon.CDK.AWS.IoTFleetHub"
|
|
@@ -1969,10 +1406,6 @@
|
|
|
1969
1406
|
}
|
|
1970
1407
|
},
|
|
1971
1408
|
"aws-cdk-lib.aws_iotsitewise": {
|
|
1972
|
-
"locationInModule": {
|
|
1973
|
-
"filename": "lib/index.ts",
|
|
1974
|
-
"line": 106
|
|
1975
|
-
},
|
|
1976
1409
|
"targets": {
|
|
1977
1410
|
"dotnet": {
|
|
1978
1411
|
"namespace": "Amazon.CDK.AWS.IoTSiteWise"
|
|
@@ -1986,10 +1419,6 @@
|
|
|
1986
1419
|
}
|
|
1987
1420
|
},
|
|
1988
1421
|
"aws-cdk-lib.aws_iotthingsgraph": {
|
|
1989
|
-
"locationInModule": {
|
|
1990
|
-
"filename": "lib/index.ts",
|
|
1991
|
-
"line": 107
|
|
1992
|
-
},
|
|
1993
1422
|
"targets": {
|
|
1994
1423
|
"dotnet": {
|
|
1995
1424
|
"namespace": "Amazon.CDK.AWS.IoTThingsGraph"
|
|
@@ -2003,10 +1432,6 @@
|
|
|
2003
1432
|
}
|
|
2004
1433
|
},
|
|
2005
1434
|
"aws-cdk-lib.aws_iotwireless": {
|
|
2006
|
-
"locationInModule": {
|
|
2007
|
-
"filename": "lib/index.ts",
|
|
2008
|
-
"line": 108
|
|
2009
|
-
},
|
|
2010
1435
|
"targets": {
|
|
2011
1436
|
"dotnet": {
|
|
2012
1437
|
"namespace": "Amazon.CDK.AWS.IoTWireless"
|
|
@@ -2020,10 +1445,6 @@
|
|
|
2020
1445
|
}
|
|
2021
1446
|
},
|
|
2022
1447
|
"aws-cdk-lib.aws_ivs": {
|
|
2023
|
-
"locationInModule": {
|
|
2024
|
-
"filename": "lib/index.ts",
|
|
2025
|
-
"line": 109
|
|
2026
|
-
},
|
|
2027
1448
|
"targets": {
|
|
2028
1449
|
"dotnet": {
|
|
2029
1450
|
"namespace": "Amazon.CDK.AWS.Ivs"
|
|
@@ -2037,10 +1458,6 @@
|
|
|
2037
1458
|
}
|
|
2038
1459
|
},
|
|
2039
1460
|
"aws-cdk-lib.aws_kendra": {
|
|
2040
|
-
"locationInModule": {
|
|
2041
|
-
"filename": "lib/index.ts",
|
|
2042
|
-
"line": 110
|
|
2043
|
-
},
|
|
2044
1461
|
"targets": {
|
|
2045
1462
|
"dotnet": {
|
|
2046
1463
|
"namespace": "Amazon.CDK.AWS.Kendra"
|
|
@@ -2054,13 +1471,6 @@
|
|
|
2054
1471
|
}
|
|
2055
1472
|
},
|
|
2056
1473
|
"aws-cdk-lib.aws_kinesis": {
|
|
2057
|
-
"locationInModule": {
|
|
2058
|
-
"filename": "lib/index.ts",
|
|
2059
|
-
"line": 111
|
|
2060
|
-
},
|
|
2061
|
-
"readme": {
|
|
2062
|
-
"markdown": "# Amazon Kinesis Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n[Amazon Kinesis](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) provides collection and processing of large\n[streams](https://aws.amazon.com/streaming-data/) of data records in real time. Kinesis data streams can be used for rapid and continuous data\nintake and aggregation.\n\n## Table Of Contents\n\n- [Streams](#streams)\n - [Encryption](#encryption)\n - [Import](#import)\n - [Permission Grants](#permission-grants)\n - [Read Permissions](#read-permissions)\n - [Write Permissions](#write-permissions)\n - [Custom Permissions](#custom-permissions)\n - [Metrics](#metrics)\n\n## Streams\n\nAmazon Kinesis Data Streams ingests a large amount of data in real time, durably stores the data, and makes the data available for consumption.\n\nUsing the CDK, a new Kinesis stream can be created as part of the stack using the construct's constructor. You may specify the `streamName` to give\nyour own identifier to the stream. If not, CloudFormation will generate a name.\n\n```ts\nnew Stream(this, \"MyFirstStream\", {\n streamName: \"my-awesome-stream\"\n});\n```\n\nYou can also specify properties such as `shardCount` to indicate how many shards the stream should choose and a `retentionPeriod`\nto specify how long the data in the shards should remain accessible.\nRead more at [Creating and Managing Streams](https://docs.aws.amazon.com/streams/latest/dev/working-with-streams.html)\n\n```ts\nnew Stream(this, \"MyFirstStream\", {\n streamName: \"my-awesome-stream\",\n shardCount: 3,\n retentionPeriod: Duration.hours(48)\n});\n```\n\n### Encryption\n\n[Stream encryption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html) enables\nserver-side encryption using an AWS KMS key for a specified stream.\n\nEncryption is enabled by default on your stream with the master key owned by Kinesis Data Streams in regions where it is supported.\n\n```ts\nnew Stream(this, 'MyEncryptedStream');\n```\n\nYou can enable encryption on your stream with a user-managed key by specifying the `encryption` property.\nA KMS key will be created for you and associated with the stream.\n\n```ts\nnew Stream(this, \"MyEncryptedStream\", {\n encryption: StreamEncryption.KMS\n});\n```\n\nYou can also supply your own external KMS key to use for stream encryption by specifying the `encryptionKey` property.\n\n```ts\nimport { aws_kms as kms } from 'aws-cdk-lib';\n\nconst key = new kms.Key(this, \"MyKey\");\n\nnew Stream(this, \"MyEncryptedStream\", {\n encryption: StreamEncryption.KMS,\n encryptionKey: key\n});\n```\n\n### Import\n\nAny Kinesis stream that has been created outside the stack can be imported into your CDK app.\n\nStreams can be imported by their ARN via the `Stream.fromStreamArn()` API\n\n```ts\nconst stack = new Stack(app, \"MyStack\");\n\nconst importedStream = Stream.fromStreamArn(\n stack,\n \"ImportedStream\",\n \"arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j\"\n);\n```\n\nEncrypted Streams can also be imported by their attributes via the `Stream.fromStreamAttributes()` API\n\n```ts\nimport { Key } from 'aws-cdk-lib/aws-kms';\n\nconst stack = new Stack(app, \"MyStack\");\n\nconst importedStream = Stream.fromStreamAttributes(\n stack,\n \"ImportedEncryptedStream\",\n {\n streamArn: \"arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j\",\n encryptionKey: kms.Key.fromKeyArn(\n \"arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\"\n )\n }\n);\n```\n\n### Permission Grants\n\nIAM roles, users or groups which need to be able to work with Amazon Kinesis streams at runtime should be granted IAM permissions.\n\nAny object that implements the `IGrantable` interface (has an associated principal) can be granted permissions by calling:\n\n- `grantRead(principal)` - grants the principal read access\n- `grantWrite(principal)` - grants the principal write permissions to a Stream\n- `grantReadWrite(principal)` - grants principal read and write permissions\n\n#### Read Permissions\n\nGrant `read` access to a stream by calling the `grantRead()` API.\nIf the stream has an encryption key, read permissions will also be granted to the key.\n\n```ts\nconst lambdaRole = new iam.Role(this, 'Role', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n description: 'Example role...',\n}\n\nconst stream = new Stream(this, 'MyEncryptedStream', {\n encryption: StreamEncryption.KMS\n});\n\n// give lambda permissions to read stream\nstream.grantRead(lambdaRole);\n```\n\nThe following read permissions are provided to a service principal by the `grantRead()` API:\n\n- `kinesis:DescribeStreamSummary`\n- `kinesis:GetRecords`\n- `kinesis:GetShardIterator`\n- `kinesis:ListShards`\n- `kinesis:SubscribeToShard`\n\n#### Write Permissions\n\nGrant `write` permissions to a stream is provided by calling the `grantWrite()` API.\nIf the stream has an encryption key, write permissions will also be granted to the key.\n\n```ts\nconst lambdaRole = new iam.Role(this, 'Role', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n description: 'Example role...',\n}\n\nconst stream = new Stream(this, 'MyEncryptedStream', {\n encryption: StreamEncryption.KMS\n});\n\n// give lambda permissions to write to stream\nstream.grantWrite(lambdaRole);\n```\n\nThe following write permissions are provided to a service principal by the `grantWrite()` API:\n\n- `kinesis:ListShards`\n- `kinesis:PutRecord`\n- `kinesis:PutRecords`\n\n#### Custom Permissions\n\nYou can add any set of permissions to a stream by calling the `grant()` API.\n\n```ts\nconst user = new iam.User(stack, 'MyUser');\n\nconst stream = new Stream(stack, 'MyStream');\n\n// give my user permissions to list shards\nstream.grant(user, 'kinesis:ListShards');\n```\n\n### Metrics\n\nYou can use common metrics from your stream to create alarms and/or dashboards. The `stream.metric('MetricName')` method creates a metric with the stream namespace and dimension. You can also use pre-define methods like `stream.metricGetRecordsSuccess()`. To find out more about Kinesis metrics check [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html).\n\n```ts\nconst stream = new Stream(stack, 'MyStream');\n\n// Using base metric method passing the metric name\nstream.metric('GetRecords.Success');\n\n// using pre-defined metric method\nstream.metricGetRecordsSuccess();\n\n// using pre-defined and overriding the statistic\nstream.metricGetRecordsSuccess({ statistic: 'Maximum' });\n```\n\n"
|
|
2063
|
-
},
|
|
2064
1474
|
"targets": {
|
|
2065
1475
|
"dotnet": {
|
|
2066
1476
|
"namespace": "Amazon.CDK.AWS.Kinesis"
|
|
@@ -2074,10 +1484,6 @@
|
|
|
2074
1484
|
}
|
|
2075
1485
|
},
|
|
2076
1486
|
"aws-cdk-lib.aws_kinesisanalytics": {
|
|
2077
|
-
"locationInModule": {
|
|
2078
|
-
"filename": "lib/index.ts",
|
|
2079
|
-
"line": 112
|
|
2080
|
-
},
|
|
2081
1487
|
"targets": {
|
|
2082
1488
|
"dotnet": {
|
|
2083
1489
|
"namespace": "Amazon.CDK.AWS.KinesisAnalytics"
|
|
@@ -2091,10 +1497,6 @@
|
|
|
2091
1497
|
}
|
|
2092
1498
|
},
|
|
2093
1499
|
"aws-cdk-lib.aws_kinesisfirehose": {
|
|
2094
|
-
"locationInModule": {
|
|
2095
|
-
"filename": "lib/index.ts",
|
|
2096
|
-
"line": 113
|
|
2097
|
-
},
|
|
2098
1500
|
"targets": {
|
|
2099
1501
|
"dotnet": {
|
|
2100
1502
|
"namespace": "Amazon.CDK.AWS.KinesisFirehose"
|
|
@@ -2108,13 +1510,6 @@
|
|
|
2108
1510
|
}
|
|
2109
1511
|
},
|
|
2110
1512
|
"aws-cdk-lib.aws_kms": {
|
|
2111
|
-
"locationInModule": {
|
|
2112
|
-
"filename": "lib/index.ts",
|
|
2113
|
-
"line": 114
|
|
2114
|
-
},
|
|
2115
|
-
"readme": {
|
|
2116
|
-
"markdown": "# AWS Key Management Service Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nDefine a KMS key:\n\n```ts\nimport { aws_kms as kms } from 'aws-cdk-lib';\n\nnew kms.Key(this, 'MyKey', {\n enableKeyRotation: true\n});\n```\n\nDefine a KMS key with waiting period:\n\nSpecifies the number of days in the waiting period before AWS KMS deletes a CMK that has been removed from a CloudFormation stack.\n\n```ts\nconst key = new kms.Key(this, 'MyKey', {\n pendingWindow: 10 // Default to 30 Days\n});\n```\n\n\nAdd a couple of aliases:\n\n```ts\nconst key = new kms.Key(this, 'MyKey');\nkey.addAlias('alias/foo');\nkey.addAlias('alias/bar');\n```\n\n\nDefine a key with specific key spec and key usage:\n\nValid `keySpec` values depends on `keyUsage` value.\n\n```ts\nconst key = new kms.Key(this, 'MyKey', {\n keySpec: kms.KeySpec.ECC_SECG_P256K1, // Default to SYMMETRIC_DEFAULT\n keyUsage: kms.KeyUsage.SIGN_VERIFY // and ENCRYPT_DECRYPT\n});\n```\n\n## Sharing keys between stacks\n\nTo use a KMS key in a different stack in the same CDK application,\npass the construct to the other stack:\n\n[sharing key between stacks](test/integ.key-sharing.lit.ts)\n\n\n## Importing existing keys\n\n### Import key by ARN\n\nTo use a KMS key that is not defined in this CDK app, but is created through other means, use\n`Key.fromKeyArn(parent, name, ref)`:\n\n```ts\nconst myKeyImported = kms.Key.fromKeyArn(this, 'MyImportedKey', 'arn:aws:...');\n\n// you can do stuff with this imported key.\nmyKeyImported.addAlias('alias/foo');\n```\n\nNote that a call to `.addToResourcePolicy(statement)` on `myKeyImported` will not have\nan affect on the key's policy because it is not owned by your stack. The call\nwill be a no-op.\n\n### Import key by alias\n\nIf a Key has an associated Alias, the Alias can be imported by name and used in place\nof the Key as a reference. A common scenario for this is in referencing AWS managed keys.\n\n```ts\nconst myKeyAlias = kms.Alias.fromAliasName(this, 'myKey', 'alias/aws/s3');\nconst trail = new cloudtrail.Trail(this, 'myCloudTrail', {\n sendToCloudWatchLogs: true,\n kmsKey: myKeyAlias\n});\n```\n\nNote that calls to `addToResourcePolicy` and `grant*` methods on `myKeyAlias` will be\nno-ops, and `addAlias` and `aliasTargetKey` will fail, as the imported alias does not\nhave a reference to the underlying KMS Key.\n\n### Lookup key by alias\n\nIf you can't use a KMS key imported by alias (e.g. because you need access to the key id), you can lookup the key with `Key.fromLookup()`.\n\nIn general, the preferred method would be to use `Alias.fromAliasName()` which returns an `IAlias` object which extends `IKey`. However, some services need to have access to the underlying key id. In this case, `Key.fromLookup()` allows to lookup the key id.\n\nThe result of the `Key.fromLookup()` operation will be written to a file\ncalled `cdk.context.json`. You must commit this file to source control so\nthat the lookup values are available in non-privileged environments such\nas CI build steps, and to ensure your template builds are repeatable.\n\nHere's how `Key.fromLookup()` can be used:\n\n```ts\nconst myKeyLookup = kms.Key.fromLookup(this, 'MyKeyLookup', {\n aliasName: 'alias/KeyAlias'\n});\n\nconst role = new iam.Role(this, 'MyRole', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n});\nmyKeyLookup.grantEncryptDecrypt(role);\n```\n\nNote that a call to `.addToResourcePolicy(statement)` on `myKeyLookup` will not have\nan affect on the key's policy because it is not owned by your stack. The call\nwill be a no-op.\n\n## Key Policies\n\nControlling access and usage of KMS Keys requires the use of key policies (resource-based policies attached to the key);\nthis is in contrast to most other AWS resources where access can be entirely controlled with IAM policies,\nand optionally complemented with resource policies. For more in-depth understanding of KMS key access and policies, see\n\n* https://docs.aws.amazon.com/kms/latest/developerguide/control-access-overview.html\n* https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html\n\nKMS keys can be created to trust IAM policies. This is the default behavior for both the KMS APIs and in\nthe console. This behavior is enabled by the '@aws-cdk/aws-kms:defaultKeyPolicies' feature flag,\nwhich is set for all new projects; for existing projects, this same behavior can be enabled by\npassing the `trustAccountIdentities` property as `true` when creating the key:\n\n```ts\nnew kms.Key(stack, 'MyKey', { trustAccountIdentities: true });\n```\n\nWith either the `@aws-cdk/aws-kms:defaultKeyPolicies` feature flag set,\nor the `trustAccountIdentities` prop set, the Key will be given the following default key policy:\n\n```json\n{\n \"Effect\": \"Allow\",\n \"Principal\": {\"AWS\": \"arn:aws:iam::111122223333:root\"},\n \"Action\": \"kms:*\",\n \"Resource\": \"*\"\n}\n```\n\nThis policy grants full access to the key to the root account user.\nThis enables the root account user -- via IAM policies -- to grant access to other IAM principals.\nWith the above default policy, future permissions can be added to either the key policy or IAM principal policy.\n\n```ts\nconst key = new kms.Key(stack, 'MyKey');\nconst user = new iam.User(stack, 'MyUser');\nkey.grantEncrypt(user); // Adds encrypt permissions to user policy; key policy is unmodified.\n```\n\nAdopting the default KMS key policy (and so trusting account identities)\nsolves many issues around cyclic dependencies between stacks.\nWithout this default key policy, future permissions must be added to both the key policy and IAM principal policy,\nwhich can cause cyclic dependencies if the permissions cross stack boundaries.\n(For example, an encrypted bucket in one stack, and Lambda function that accesses it in another.)\n\n### Appending to or replacing the default key policy\n\nThe default key policy can be amended or replaced entirely, depending on your use case and requirements.\nA common addition to the key policy would be to add other key admins that are allowed to administer the key\n(e.g., change permissions, revoke, delete). Additional key admins can be specified at key creation or after\nvia the `grantAdmin` method.\n\n```ts\nconst myTrustedAdminRole = iam.Role.fromRoleArn(stack, 'TrustedRole', 'arn:aws:iam:....');\nconst key = new kms.Key(stack, 'MyKey', {\n admins: [myTrustedAdminRole],\n});\n\nconst secondKey = new kms.Key(stack, 'MyKey2');\nsecondKey.grantAdmin(myTrustedAdminRole);\n```\n\nAlternatively, a custom key policy can be specified, which will replace the default key policy.\n\n> **Note**: In applications without the '@aws-cdk/aws-kms:defaultKeyPolicies' feature flag set\nand with `trustedAccountIdentities` set to false (the default), specifying a policy at key creation _appends_ the\nprovided policy to the default key policy, rather than _replacing_ the default policy.\n\n```ts\nconst myTrustedAdminRole = iam.Role.fromRoleArn(stack, 'TrustedRole', 'arn:aws:iam:....');\n// Creates a limited admin policy and assigns to the account root.\nconst myCustomPolicy = new iam.PolicyDocument({\n statements: [new iam.PolicyStatement({\n actions: [\n 'kms:Create*',\n 'kms:Describe*',\n 'kms:Enable*',\n 'kms:List*',\n 'kms:Put*',\n ],\n principals: [new iam.AccountRootPrincipal()],\n resources: ['*'],\n })],\n});\nconst key = new kms.Key(stack, 'MyKey', {\n policy: myCustomPolicy,\n});\n```\n\n> **Warning:** Replacing the default key policy with one that only grants access to a specific user or role\nruns the risk of the key becoming unmanageable if that user or role is deleted.\nIt is highly recommended that the key policy grants access to the account root, rather than specific principals.\nSee https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html for more information.\n"
|
|
2117
|
-
},
|
|
2118
1513
|
"targets": {
|
|
2119
1514
|
"dotnet": {
|
|
2120
1515
|
"namespace": "Amazon.CDK.AWS.KMS"
|
|
@@ -2128,10 +1523,6 @@
|
|
|
2128
1523
|
}
|
|
2129
1524
|
},
|
|
2130
1525
|
"aws-cdk-lib.aws_lakeformation": {
|
|
2131
|
-
"locationInModule": {
|
|
2132
|
-
"filename": "lib/index.ts",
|
|
2133
|
-
"line": 115
|
|
2134
|
-
},
|
|
2135
1526
|
"targets": {
|
|
2136
1527
|
"dotnet": {
|
|
2137
1528
|
"namespace": "Amazon.CDK.AWS.LakeFormation"
|
|
@@ -2145,13 +1536,6 @@
|
|
|
2145
1536
|
}
|
|
2146
1537
|
},
|
|
2147
1538
|
"aws-cdk-lib.aws_lambda": {
|
|
2148
|
-
"locationInModule": {
|
|
2149
|
-
"filename": "lib/index.ts",
|
|
2150
|
-
"line": 116
|
|
2151
|
-
},
|
|
2152
|
-
"readme": {
|
|
2153
|
-
"markdown": "# AWS Lambda Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis construct library allows you to define AWS Lambda Functions.\n\n```ts\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n});\n```\n\n## Handler Code\n\nThe `lambda.Code` class includes static convenience methods for various types of\nruntime code.\n\n * `lambda.Code.fromBucket(bucket, key[, objectVersion])` - specify an S3 object\n that contains the archive of your runtime code.\n * `lambda.Code.fromInline(code)` - inline the handle code as a string. This is\n limited to supported runtimes and the code cannot exceed 4KiB.\n * `lambda.Code.fromAsset(path)` - specify a directory or a .zip file in the local\n filesystem which will be zipped and uploaded to S3 before deployment. See also\n [bundling asset code](#bundling-asset-code).\n * `lambda.Code.fromDockerBuild(path, options)` - use the result of a Docker\n build as code. The runtime code is expected to be located at `/asset` in the\n image and will be zipped and uploaded to S3 as an asset.\n\nThe following example shows how to define a Python function and deploy the code\nfrom the local directory `my-lambda-handler` to it:\n\n[Example of Lambda Code from Local Assets](test/integ.assets.lit.ts)\n\nWhen deploying a stack that contains this code, the directory will be zip\narchived and then uploaded to an S3 bucket, then the exact location of the S3\nobjects will be passed when the stack is deployed.\n\nDuring synthesis, the CDK expects to find a directory on disk at the asset\ndirectory specified. Note that we are referencing the asset directory relatively\nto our CDK project directory. This is especially important when we want to share\nthis construct through a library. Different programming languages will have\ndifferent techniques for bundling resources into libraries.\n\n## Docker Images\n\nLambda functions allow specifying their handlers within docker images. The docker\nimage can be an image from ECR or a local asset that the CDK will package and load\ninto ECR.\n\nThe following `DockerImageFunction` construct uses a local folder with a\nDockerfile as the asset that will be used as the function handler.\n\n```ts\nnew DockerImageFunction(this, 'AssetFunction', {\n code: DockerImageCode.fromImageAsset(path.join(__dirname, 'docker-handler')),\n});\n```\n\nYou can also specify an image that already exists in ECR as the function handler.\n\n```ts\nimport { aws_ecr as ecr } from 'aws-cdk-lib';\nconst repo = new ecr.Repository(this, 'Repository');\n\nnew DockerImageFunction(this, 'ECRFunction', {\n code: DockerImageCode.fromEcr(repo),\n});\n```\n\n## Execution Role\n\nLambda functions assume an IAM role during execution. In CDK by default, Lambda\nfunctions will use an autogenerated Role if one is not provided.\n\nThe autogenerated Role is automatically given permissions to execute the Lambda\nfunction. To reference the autogenerated Role:\n\n```ts\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n});\n\nfn.role // the Role\n```\n\nYou can also provide your own IAM role. Provided IAM roles will not automatically\nbe given permissions to execute the Lambda function. To provide a role and grant\nit appropriate permissions:\n\n```ts\nimport { aws_iam as iam } from 'aws-cdk-lib';\nconst myRole = new iam.Role(this, 'My Role', {\n assumedBy: new iam.ServicePrincipal('sns.amazonaws.com'),\n});\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n role: myRole // user-provided role\n});\n\nmyRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName(\"service-role/AWSLambdaBasicExecutionRole\"));\nmyRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName(\"service-role/AWSLambdaVPCAccessExecutionRole\")); // only required if your function lives in a VPC\n```\n\n## Resource-based Policies\n\nAWS Lambda supports resource-based policies for controlling access to Lambda\nfunctions and layers on a per-resource basis. In particular, this allows you to\ngive permission to AWS services and other AWS accounts to modify and invoke your\nfunctions. You can also restrict permissions given to AWS services by providing\na source account or ARN (representing the account and identifier of the resource\nthat accesses the function or layer).\n\n```ts fixture=function\nimport { aws_iam as iam } from 'aws-cdk-lib';\nconst principal = new iam.ServicePrincipal('my-service');\n\nfn.grantInvoke(principal);\n\n// Equivalent to:\nfn.addPermission('my-service Invocation', {\n principal: principal,\n});\n```\n\nFor more information, see [Resource-based\npolicies](https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html)\nin the AWS Lambda Developer Guide.\n\nProviding an unowned principal (such as account principals, generic ARN\nprincipals, service principals, and principals in other accounts) to a call to\n`fn.grantInvoke` will result in a resource-based policy being created. If the\nprincipal in question has conditions limiting the source account or ARN of the\noperation (see above), these conditions will be automatically added to the\nresource policy.\n\n```ts fixture=function\nimport { aws_iam as iam } from 'aws-cdk-lib';\nconst servicePrincipal = new iam.ServicePrincipal('my-service');\nconst sourceArn = 'arn:aws:s3:::my-bucket';\nconst sourceAccount = '111122223333';\nconst servicePrincipalWithConditions = servicePrincipal.withConditions({\n ArnLike: {\n 'aws:SourceArn': sourceArn,\n },\n StringEquals: {\n 'aws:SourceAccount': sourceAccount,\n },\n});\n\nfn.grantInvoke(servicePrincipalWithConditions);\n\n// Equivalent to:\nfn.addPermission('my-service Invocation', {\n principal: servicePrincipal,\n sourceArn: sourceArn,\n sourceAccount: sourceAccount,\n});\n```\n\n## Versions\n\nYou can use\n[versions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)\nto manage the deployment of your AWS Lambda functions. For example, you can\npublish a new version of a function for beta testing without affecting users of\nthe stable production version.\n\nThe function version includes the following information:\n\n* The function code and all associated dependencies.\n* The Lambda runtime that executes the function.\n* All of the function settings, including the environment variables.\n* A unique Amazon Resource Name (ARN) to identify this version of the function.\n\nYou could create a version to your lambda function using the `Version` construct.\n\n```ts\nconst fn = new Function(this, 'MyFunction', ...);\nconst version = new Version(this, 'MyVersion', {\n lambda: fn,\n});\n```\n\nThe major caveat to know here is that a function version must always point to a\nspecific 'version' of the function. When the function is modified, the version\nwill continue to point to the 'then version' of the function.\n\nOne way to ensure that the `lambda.Version` always points to the latest version\nof your `lambda.Function` is to set an environment variable which changes at\nleast as often as your code does. This makes sure the function always has the\nlatest code. For instance -\n\n```ts\nconst codeVersion = \"stringOrMethodToGetCodeVersion\";\nconst fn = new lambda.Function(this, 'MyFunction', {\n environment: {\n 'CodeVersionString': codeVersion\n }\n});\n```\n\nThe `fn.latestVersion` property returns a `lambda.IVersion` which represents\nthe `$LATEST` pseudo-version.\n\nHowever, most AWS services require a specific AWS Lambda version,\nand won't allow you to use `$LATEST`. Therefore, you would normally want\nto use `lambda.currentVersion`.\n\nThe `fn.currentVersion` property can be used to obtain a `lambda.Version`\nresource that represents the AWS Lambda function defined in your application.\nAny change to your function's code or configuration will result in the creation\nof a new version resource. You can specify options for this version through the\n`currentVersionOptions` property.\n\nNOTE: The `currentVersion` property is only supported when your AWS Lambda function\nuses either `lambda.Code.fromAsset` or `lambda.Code.fromInline`. Other types\nof code providers (such as `lambda.Code.fromBucket`) require that you define a\n`lambda.Version` resource directly since the CDK is unable to determine if\ntheir contents had changed.\n\n### `currentVersion`: Updated hashing logic\n\nTo produce a new lambda version each time the lambda function is modified, the\n`currentVersion` property under the hood, computes a new logical id based on the\nproperties of the function. This informs CloudFormation that a new\n`AWS::Lambda::Version` resource should be created pointing to the updated Lambda\nfunction.\n\nHowever, a bug was introduced in this calculation that caused the logical id to\nchange when it was not required (ex: when the Function's `Tags` property, or\nwhen the `DependsOn` clause was modified). This caused the deployment to fail\nsince the Lambda service does not allow creating duplicate versions.\n\nThis has been fixed in the AWS CDK but *existing* users need to opt-in via a\n[feature flag]. Users who have run `cdk init` since this fix will be opted in,\nby default.\n\nExisting users will need to enable the [feature flag]\n`@aws-cdk/aws-lambda:recognizeVersionProps`. Since CloudFormation does not\nallow duplicate versions, they will also need to make some modification to\ntheir function so that a new version can be created. Any trivial change such as\na whitespace change in the code or a no-op environment variable will suffice.\n\nWhen the new logic is in effect, you may rarely come across the following error:\n`The following properties are not recognized as version properties`. This will\noccur, typically when [property overrides] are used, when a new property\nintroduced in `AWS::Lambda::Function` is used that CDK is still unaware of.\n\nTo overcome this error, use the API `Function.classifyVersionProperty()` to\nrecord whether a new version should be generated when this property is changed.\nThis can be typically determined by checking whether the property can be\nmodified using the *[UpdateFunctionConfiguration]* API or not.\n\n[feature flag]: https://docs.aws.amazon.com/cdk/latest/guide/featureflags.html\n[property overrides]: https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw\n[UpdateFunctionConfiguration]: https://docs.aws.amazon.com/lambda/latest/dg/API_UpdateFunctionConfiguration.html\n\n## Aliases\n\nYou can define one or more\n[aliases](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)\nfor your AWS Lambda function. A Lambda alias is like a pointer to a specific\nLambda function version. Users can access the function version using the alias\nARN.\n\nThe `version.addAlias()` method can be used to define an AWS Lambda alias that\npoints to a specific version.\n\nThe following example defines an alias named `live` which will always point to a\nversion that represents the function as defined in your CDK app. When you change\nyour lambda code or configuration, a new resource will be created. You can\nspecify options for the current version through the `currentVersionOptions`\nproperty.\n\n```ts\nimport * as cdk from 'aws-cdk-lib';\n\nconst fn = new Function(this, 'MyFunction', {\n currentVersionOptions: {\n removalPolicy: cdk.RemovalPolicy.RETAIN, // retain old versions\n retryAttempts: 1 // async retry attempts\n },\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n});\n\nfn.currentVersion.addAlias('live');\n```\n\n## Layers\n\nThe `lambda.LayerVersion` class can be used to define Lambda layers and manage\ngranting permissions to other AWS accounts or organizations.\n\n[Example of Lambda Layer usage](test/integ.layer-version.lit.ts)\n\nBy default, updating a layer creates a new layer version, and CloudFormation will delete the old version as part of the stack update.\n\nAlternatively, a removal policy can be used to retain the old version:\n\n```ts\nimport * as cdk from 'aws-cdk-lib';\n\nnew LayerVersion(this, 'MyLayer', {\n removalPolicy: cdk.RemovalPolicy.RETAIN,\n code: Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n});\n```\n\n## Lambda Insights\n\nLambda functions can be configured to use CloudWatch [Lambda Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Lambda-Insights.html)\nwhich provides low-level runtime metrics for a Lambda functions.\n\n```ts\nimport { lambda as lambda } from 'aws-cdk-lib';\n\nnew Function(this, 'MyFunction', {\n insightsVersion: lambda.LambdaInsightsVersion.VERSION_1_0_98_0\n})\n```\n\nIf the version of insights is not yet available in the CDK, you can also provide the ARN directly as so -\n\n```ts\nconst layerArn = 'arn:aws:lambda:us-east-1:580247275435:layer:LambdaInsightsExtension:14';\nnew Function(this, 'MyFunction', {\n insightsVersion: lambda.LambdaInsightsVersion.fromInsightVersionArn(layerArn)\n})\n```\n\n## Event Rule Target\n\nYou can use an AWS Lambda function as a target for an Amazon CloudWatch event\nrule:\n\n```ts fixture=function\nimport { aws_events as events } from 'aws-cdk-lib';\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\nconst rule = new events.Rule(this, 'Schedule Rule', {\n schedule: events.Schedule.cron({ minute: '0', hour: '4' }),\n});\nrule.addTarget(new targets.LambdaFunction(fn));\n```\n\n## Event Sources\n\nAWS Lambda supports a [variety of event sources](https://docs.aws.amazon.com/lambda/latest/dg/invoking-lambda-function.html).\n\nIn most cases, it is possible to trigger a function as a result of an event by\nusing one of the `add<Event>Notification` methods on the source construct. For\nexample, the `s3.Bucket` construct has an `onEvent` method which can be used to\ntrigger a Lambda when an event, such as PutObject occurs on an S3 bucket.\n\nAn alternative way to add event sources to a function is to use `function.addEventSource(source)`.\nThis method accepts an `IEventSource` object. The module __@aws-cdk/aws-lambda-event-sources__\nincludes classes for the various event sources supported by AWS Lambda.\n\nFor example, the following code adds an SQS queue as an event source for a function:\n\n```ts fixture=function\nimport { aws_lambda_event_sources as eventsources } from 'aws-cdk-lib';\nimport { aws_sqs as sqs } from 'aws-cdk-lib';\nconst queue = new sqs.Queue(this, 'Queue');\nfn.addEventSource(new eventsources.SqsEventSource(queue));\n```\n\nThe following code adds an S3 bucket notification as an event source:\n\n```ts fixture=function\nimport { aws_lambda_event_sources as eventsources } from 'aws-cdk-lib';\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\nconst bucket = new s3.Bucket(this, 'Bucket');\nfn.addEventSource(new eventsources.S3EventSource(bucket, {\n events: [ s3.EventType.OBJECT_CREATED, s3.EventType.OBJECT_REMOVED ],\n filters: [ { prefix: 'subdir/' } ] // optional\n}));\n```\n\nSee the documentation for the __@aws-cdk/aws-lambda-event-sources__ module for more details.\n\n## Lambda with DLQ\n\nA dead-letter queue can be automatically created for a Lambda function by\nsetting the `deadLetterQueueEnabled: true` configuration.\n\n```ts\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromInline('exports.handler = function(event, ctx, cb) { return cb(null, \"hi\"); }'),\n deadLetterQueueEnabled: true\n});\n```\n\nIt is also possible to provide a dead-letter queue instead of getting a new queue created:\n\n```ts\nimport { aws_sqs as sqs } from 'aws-cdk-lib';\n\nconst dlq = new sqs.Queue(this, 'DLQ');\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromInline('exports.handler = function(event, ctx, cb) { return cb(null, \"hi\"); }'),\n deadLetterQueue: dlq\n});\n```\n\nSee [the AWS documentation](https://docs.aws.amazon.com/lambda/latest/dg/dlq.html)\nto learn more about AWS Lambdas and DLQs.\n\n## Lambda with X-Ray Tracing\n\n```ts\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromInline('exports.handler = function(event, ctx, cb) { return cb(null, \"hi\"); }'),\n tracing: Tracing.ACTIVE\n});\n```\n\nSee [the AWS documentation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-x-ray.html)\nto learn more about AWS Lambda's X-Ray support.\n\n## Lambda with Profiling\n\nThe following code configures the lambda function with CodeGuru profiling. By default, this creates a new CodeGuru\nprofiling group -\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\n\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.PYTHON_3_6,\n handler: 'index.handler',\n code: Code.fromAsset('lambda-handler'),\n profiling: true\n});\n```\n\nThe `profilingGroup` property can be used to configure an existing CodeGuru profiler group.\n\nCodeGuru profiling is supported for all Java runtimes and Python3.6+ runtimes.\n\nSee [the AWS documentation](https://docs.aws.amazon.com/codeguru/latest/profiler-ug/setting-up-lambda.html)\nto learn more about AWS Lambda's Profiling support.\n\n## Lambda with Reserved Concurrent Executions\n\n```ts\nconst fn = new Function(this, 'MyFunction', {\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromInline('exports.handler = function(event, ctx, cb) { return cb(null, \"hi\"); }'),\n reservedConcurrentExecutions: 100\n});\n```\n\nSee [the AWS documentation](https://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html)\nmanaging concurrency.\n\n## AutoScaling\n\nYou can use Application AutoScaling to automatically configure the provisioned concurrency for your functions. AutoScaling can be set to track utilization or be based on a schedule. To configure AutoScaling on a function alias:\n\n```ts fixture=function\nimport { aws_autoscaling as autoscaling } from 'aws-cdk-lib';\nconst alias = new Alias(this, 'Alias', {\n aliasName: 'prod',\n version: fn.latestVersion,\n});\n\n// Create AutoScaling target\nconst as = alias.addAutoScaling({ maxCapacity: 50 })\n\n// Configure Target Tracking\nas.scaleOnUtilization({\n utilizationTarget: 0.5,\n});\n\n// Configure Scheduled Scaling\nas.scaleOnSchedule('ScaleUpInTheMorning', {\n schedule: autoscaling.Schedule.cron({ hour: '8', minute: '0'}),\n minCapacity: 20,\n});\n```\n\n[Example of Lambda AutoScaling usage](test/integ.autoscaling.lit.ts)\n\nSee [the AWS documentation](https://docs.aws.amazon.com/lambda/latest/dg/invocation-scaling.html) on autoscaling lambda functions.\n\n## Log Group\n\nLambda functions automatically create a log group with the name `/aws/lambda/<function-name>` upon first execution with\nlog data set to never expire.\n\nThe `logRetention` property can be used to set a different expiration period.\n\nIt is possible to obtain the function's log group as a `logs.ILogGroup` by calling the `logGroup` property of the\n`Function` construct.\n\nBy default, CDK uses the AWS SDK retry options when creating a log group. The `logRetentionRetryOptions` property\nallows you to customize the maximum number of retries and base backoff duration.\n\n*Note* that, if either `logRetention` is set or `logGroup` property is called, a [CloudFormation custom\nresource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added\nto the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the\ncorrect log retention period (never expire, by default).\n\n*Further note* that, if the log group already exists and the `logRetention` is not set, the custom resource will reset\nthe log retention to never expire even if it was configured with a different value.\n\n## FileSystem Access\n\nYou can configure a function to mount an Amazon Elastic File System (Amazon EFS) to a\ndirectory in your runtime environment with the `filesystem` property. To access Amazon EFS\nfrom lambda function, the Amazon EFS access point will be required.\n\nThe following sample allows the lambda function to mount the Amazon EFS access point to `/mnt/msg` in the runtime environment and access the filesystem with the POSIX identity defined in `posixUser`.\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_efs as efs } from 'aws-cdk-lib';\n\n// create a new VPC\nconst vpc = new ec2.Vpc(this, 'VPC');\n\n// create a new Amazon EFS filesystem\nconst fileSystem = new efs.FileSystem(this, 'Efs', { vpc });\n\n// create a new access point from the filesystem\nconst accessPoint = fileSystem.addAccessPoint('AccessPoint', {\n // set /export/lambda as the root of the access point\n path: '/export/lambda',\n // as /export/lambda does not exist in a new efs filesystem, the efs will create the directory with the following createAcl\n createAcl: {\n ownerUid: '1001',\n ownerGid: '1001',\n permissions: '750',\n },\n // enforce the POSIX identity so lambda function will access with this identity\n posixUser: {\n uid: '1001',\n gid: '1001',\n },\n});\n\nconst fn = new Function(this, 'MyLambda', {\n // mount the access point to /mnt/msg in the lambda runtime environment\n filesystem: FileSystem.fromEfsAccessPoint(accessPoint, '/mnt/msg'),\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n vpc,\n});\n```\n\n\n## Singleton Function\n\nThe `SingletonFunction` construct is a way to guarantee that a lambda function will be guaranteed to be part of the stack,\nonce and only once, irrespective of how many times the construct is declared to be part of the stack. This is guaranteed\nas long as the `uuid` property and the optional `lambdaPurpose` property stay the same whenever they're declared into the\nstack.\n\nA typical use case of this function is when a higher level construct needs to declare a Lambda function as part of it but\nneeds to guarantee that the function is declared once. However, a user of this higher level construct can declare it any\nnumber of times and with different properties. Using `SingletonFunction` here with a fixed `uuid` will guarantee this.\n\nFor example, the `LogRetention` construct requires only one single lambda function for all different log groups whose\nretention it seeks to manage.\n\n## Bundling Asset Code\n\nWhen using `lambda.Code.fromAsset(path)` it is possible to bundle the code by running a\ncommand in a Docker container. The asset path will be mounted at `/asset-input`. The\nDocker container is responsible for putting content at `/asset-output`. The content at\n`/asset-output` will be zipped and used as Lambda code.\n\nExample with Python:\n\n```ts\nnew Function(this, 'Function', {\n code: Code.fromAsset(path.join(__dirname, 'my-python-handler'), {\n bundling: {\n image: Runtime.PYTHON_3_9.bundlingImage,\n command: [\n 'bash', '-c',\n 'pip install -r requirements.txt -t /asset-output && cp -au . /asset-output'\n ],\n },\n }),\n runtime: Runtime.PYTHON_3_9,\n handler: 'index.handler',\n});\n```\n\nRuntimes expose a `bundlingImage` property that points to the [AWS SAM](https://github.com/awslabs/aws-sam-cli) build image.\n\nUse `cdk.DockerImage.fromRegistry(image)` to use an existing image or\n`cdk.DockerImage.fromBuild(path)` to build a specific image:\n\n```ts\nimport * as cdk from 'aws-cdk-lib';\n\nnew Function(this, 'Function', {\n code: Code.fromAsset('/path/to/handler', {\n bundling: {\n image: cdk.DockerImage.fromBuild('/path/to/dir/with/DockerFile', {\n buildArgs: {\n ARG1: 'value1',\n },\n }),\n command: ['my', 'cool', 'command'],\n },\n }),\n runtime: Runtime.PYTHON_3_9,\n handler: 'index.handler',\n});\n```\n\n## Language-specific APIs\n\nLanguage-specific higher level constructs are provided in separate modules:\n\n* `@aws-cdk/aws-lambda-nodejs`: [Github](https://github.com/aws/aws-cdk/tree/master/packages/%40aws-cdk/aws-lambda-nodejs) & [CDK Docs](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-lambda-nodejs-readme.html)\n* `@aws-cdk/aws-lambda-python`: [Github](https://github.com/aws/aws-cdk/tree/master/packages/%40aws-cdk/aws-lambda-python) & [CDK Docs](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-lambda-python-readme.html)\n\n## Code Signing\n\nCode signing for AWS Lambda helps to ensure that only trusted code runs in your Lambda functions. \nWhen enabled, AWS Lambda checks every code deployment and verifies that the code package is signed by a trusted source.\nFor more information, see [Configuring code signing for AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html).\nThe following code configures a function with code signing.\n\n```typescript\nimport { aws_signer as signer } from 'aws-cdk-lib';\n\nconst signingProfile = new signer.SigningProfile(this, 'SigningProfile', {\n platform: signer.Platform.AWS_LAMBDA_SHA384_ECDSA\n});\n\nconst codeSigningConfig = new CodeSigningConfig(this, 'CodeSigningConfig', {\n signingProfiles: [signingProfile],\n});\n\nnew Function(this, 'Function', {\n codeSigningConfig,\n runtime: Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n});\n```\n"
|
|
2154
|
-
},
|
|
2155
1539
|
"targets": {
|
|
2156
1540
|
"dotnet": {
|
|
2157
1541
|
"namespace": "Amazon.CDK.AWS.Lambda"
|
|
@@ -2165,13 +1549,6 @@
|
|
|
2165
1549
|
}
|
|
2166
1550
|
},
|
|
2167
1551
|
"aws-cdk-lib.aws_lambda_destinations": {
|
|
2168
|
-
"locationInModule": {
|
|
2169
|
-
"filename": "lib/index.ts",
|
|
2170
|
-
"line": 117
|
|
2171
|
-
},
|
|
2172
|
-
"readme": {
|
|
2173
|
-
"markdown": "# Amazon Lambda Destinations Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library provides constructs for adding destinations to a Lambda function.\nDestinations can be added by specifying the `onFailure` or `onSuccess` props when creating a function or alias.\n\n## Destinations\n\nThe following destinations are supported\n\n* Lambda function\n* SQS queue\n* SNS topic\n* EventBridge event bus\n\nExample with a SNS topic for successful invocations:\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_lambda_destinations as destinations } from 'aws-cdk-lib';\nimport { aws_sns as sns } from 'aws-cdk-lib';\n\nconst myTopic = new sns.Topic(this, 'Topic');\n\nconst myFn = new lambda.Function(this, 'Fn', {\n // other props\n onSuccess: new destinations.SnsDestination(myTopic)\n})\n```\n\nSee also [Configuring Destinations for Asynchronous Invocation](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations).\n\n### Invocation record\n\nWhen a lambda function is configured with a destination, an invocation record is created by the Lambda service\nwhen the lambda function completes. The invocation record contains the details of the function, its context, and\nthe request and response payloads.\n\nThe following example shows the format of the invocation record for a successful invocation:\n\n```json\n{\n\t\"version\": \"1.0\",\n\t\"timestamp\": \"2019-11-24T23:08:25.651Z\",\n\t\"requestContext\": {\n\t\t\"requestId\": \"c2a6f2ae-7dbb-4d22-8782-d0485c9877e2\",\n\t\t\"functionArn\": \"arn:aws:lambda:sa-east-1:123456789123:function:event-destinations:$LATEST\",\n\t\t\"condition\": \"Success\",\n\t\t\"approximateInvokeCount\": 1\n\t},\n\t\"requestPayload\": {\n\t\t\"Success\": true\n\t},\n\t\"responseContext\": {\n\t\t\"statusCode\": 200,\n\t\t\"executedVersion\": \"$LATEST\"\n\t},\n\t\"responsePayload\": \"<data returned by the function here>\"\n}\n```\n\nIn case of failure, the record contains the reason and error object:\n\n```json\n{\n \"version\": \"1.0\",\n \"timestamp\": \"2019-11-24T21:52:47.333Z\",\n \"requestContext\": {\n \"requestId\": \"8ea123e4-1db7-4aca-ad10-d9ca1234c1fd\",\n \"functionArn\": \"arn:aws:lambda:sa-east-1:123456678912:function:event-destinations:$LATEST\",\n \"condition\": \"RetriesExhausted\",\n \"approximateInvokeCount\": 3\n },\n \"requestPayload\": {\n \"Success\": false\n },\n \"responseContext\": {\n \"statusCode\": 200,\n \"executedVersion\": \"$LATEST\",\n \"functionError\": \"Handled\"\n },\n \"responsePayload\": {\n \"errorMessage\": \"Failure from event, Success = false, I am failing!\",\n \"errorType\": \"Error\",\n \"stackTrace\": [ \"exports.handler (/var/task/index.js:18:18)\" ]\n }\n}\n```\n\n#### Destination-specific JSON format\n\n* For SNS/SQS (`SnsDestionation`/`SqsDestination`), the invocation record JSON is passed as the `Message` to the destination.\n* For Lambda (`LambdaDestination`), the invocation record JSON is passed as the payload to the function.\n* For EventBridge (`EventBridgeDestination`), the invocation record JSON is passed as the `detail` in the PutEvents call.\nThe value for the event field `source` is `lambda`, and the value for the event field `detail-type`\nis either 'Lambda Function Invocation Result - Success' or 'Lambda Function Invocation Result – Failure',\ndepending on whether the lambda function invocation succeeded or failed. The event field `resource`\ncontains the function and destination ARNs. See [AWS Events](https://docs.aws.amazon.com/eventbridge/latest/userguide/aws-events.html)\nfor the different event fields.\n\n### Auto-extract response payload with lambda destination\n\nThe `responseOnly` option of `LambdaDestination` allows to auto-extract the response payload from the\ninvocation record:\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_lambda_destinations as destinations } from 'aws-cdk-lib';\n\nconst destinationFn = new lambda.Function(this, 'Destination', {\n // props\n});\n\nconst sourceFn = new lambda.Function(this, 'Source', {\n // other props\n onSuccess: new destinations.LambdaDestination(destinationFn, {\n responseOnly: true // auto-extract\n });\n})\n```\n\nIn the above example, `destinationFn` will be invoked with the payload returned by `sourceFn`\n(`responsePayload` in the invocation record, not the full record).\n\nWhen used with `onFailure`, the destination function is invoked with the error object returned\nby the source function.\n\nUsing the `responseOnly` option allows to easily chain asynchronous Lambda functions without\nhaving to deal with data extraction in the runtime code.\n"
|
|
2174
|
-
},
|
|
2175
1552
|
"targets": {
|
|
2176
1553
|
"dotnet": {
|
|
2177
1554
|
"namespace": "Amazon.CDK.AWS.Lambda.Destinations"
|
|
@@ -2185,13 +1562,6 @@
|
|
|
2185
1562
|
}
|
|
2186
1563
|
},
|
|
2187
1564
|
"aws-cdk-lib.aws_lambda_event_sources": {
|
|
2188
|
-
"locationInModule": {
|
|
2189
|
-
"filename": "lib/index.ts",
|
|
2190
|
-
"line": 118
|
|
2191
|
-
},
|
|
2192
|
-
"readme": {
|
|
2193
|
-
"markdown": "# AWS Lambda Event Sources\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAn event source mapping is an AWS Lambda resource that reads from an event source and invokes a Lambda function.\nYou can use event source mappings to process items from a stream or queue in services that don't invoke Lambda\nfunctions directly. Lambda provides event source mappings for the following services. Read more about lambda\nevent sources [here](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html).\n\nThis module includes classes that allow using various AWS services as event\nsources for AWS Lambda via the high-level `lambda.addEventSource(source)` API.\n\nNOTE: In most cases, it is also possible to use the resource APIs to invoke an\nAWS Lambda function. This library provides a uniform API for all Lambda event\nsources regardless of the underlying mechanism they use.\n\nThe following code sets up a lambda function with an SQS queue event source -\n\n```ts\nconst fn = new lambda.Function(this, 'MyFunction', { /* ... */ });\n\nconst queue = new sqs.Queue(this, 'MyQueue');\nconst eventSource = fn.addEventSource(new SqsEventSource(queue));\n\nconst eventSourceId = eventSource.eventSourceId;\n```\n\nThe `eventSourceId` property contains the event source id. This will be a\n[token](https://docs.aws.amazon.com/cdk/latest/guide/tokens.html) that will resolve to the final value at the time of\ndeployment.\n\n## SQS\n\nAmazon Simple Queue Service (Amazon SQS) allows you to build asynchronous\nworkflows. For more information about Amazon SQS, see Amazon Simple Queue\nService. You can configure AWS Lambda to poll for these messages as they arrive\nand then pass the event to a Lambda function invocation. To view a sample event,\nsee [Amazon SQS Event](https://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-sqs).\n\nTo set up Amazon Simple Queue Service as an event source for AWS Lambda, you\nfirst create or update an Amazon SQS queue and select custom values for the\nqueue parameters. The following parameters will impact Amazon SQS's polling\nbehavior:\n\n* __visibilityTimeout__: May impact the period between retries.\n* __receiveMessageWaitTime__: Will determine [long\n poll](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html)\n duration. The default value is 20 seconds.\n* __batchSize__: Determines how many records are buffered before invoking your lambda function.\n* __maxBatchingWindow__: The maximum amount of time to gather records before invoking the lambda. This increases the likelihood of a full batch at the cost of delayed processing.\n* __enabled__: If the SQS event source mapping should be enabled. The default is true.\n\n```ts\nimport { aws_sqs as sqs } from 'aws-cdk-lib';\nimport { SqsEventSource } from 'aws-cdk-lib/aws-lambda-event-sources';\nimport { Duration } from 'aws-cdk-lib';\n\nconst queue = new sqs.Queue(this, 'MyQueue', {\n visibilityTimeout: Duration.seconds(30) // default,\n receiveMessageWaitTime: Duration.seconds(20) // default\n});\n\nlambda.addEventSource(new SqsEventSource(queue, {\n batchSize: 10, // default\n maxBatchingWindow: Duration.minutes(5),\n}));\n```\n\n## S3\n\nYou can write Lambda functions to process S3 bucket events, such as the\nobject-created or object-deleted events. For example, when a user uploads a\nphoto to a bucket, you might want Amazon S3 to invoke your Lambda function so\nthat it reads the image and creates a thumbnail for the photo.\n\nYou can use the bucket notification configuration feature in Amazon S3 to\nconfigure the event source mapping, identifying the bucket events that you want\nAmazon S3 to publish and which Lambda function to invoke.\n\n```ts\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\nimport { S3EventSource } from 'aws-cdk-lib/aws-lambda-event-sources';\n\nconst bucket = new s3.Bucket(...);\n\nlambda.addEventSource(new S3EventSource(bucket, {\n events: [ s3.EventType.OBJECT_CREATED, s3.EventType.OBJECT_REMOVED ],\n filters: [ { prefix: 'subdir/' } ] // optional\n}));\n```\n\n## SNS\n\nYou can write Lambda functions to process Amazon Simple Notification Service\nnotifications. When a message is published to an Amazon SNS topic, the service\ncan invoke your Lambda function by passing the message payload as a parameter.\nYour Lambda function code can then process the event, for example publish the\nmessage to other Amazon SNS topics, or send the message to other AWS services.\n\nThis also enables you to trigger a Lambda function in response to Amazon\nCloudWatch alarms and other AWS services that use Amazon SNS.\n\nFor an example event, see [Appendix: Message and JSON\nFormats](https://docs.aws.amazon.com/sns/latest/dg/json-formats.html) and\n[Amazon SNS Sample\nEvent](https://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-sns).\nFor an example use case, see [Using AWS Lambda with Amazon SNS from Different\nAccounts](https://docs.aws.amazon.com/lambda/latest/dg/with-sns.html).\n\n```ts\nimport { aws_sns as sns } from 'aws-cdk-lib';\nimport { SnsEventSource } from 'aws-cdk-lib/aws-lambda-event-sources';\n\nconst topic = new sns.Topic(...);\nconst deadLetterQueue = new sqs.Queue(this, 'deadLetterQueue');\n\nlambda.addEventSource(new SnsEventSource(topic, {\n filterPolicy: { ... },\n deadLetterQueue: deadLetterQueue\n}));\n```\n\nWhen a user calls the SNS Publish API on a topic that your Lambda function is\nsubscribed to, Amazon SNS will call Lambda to invoke your function\nasynchronously. Lambda will then return a delivery status. If there was an error\ncalling Lambda, Amazon SNS will retry invoking the Lambda function up to three\ntimes. After three tries, if Amazon SNS still could not successfully invoke the\nLambda function, then Amazon SNS will send a delivery status failure message to\nCloudWatch.\n\n## DynamoDB Streams\n\nYou can write Lambda functions to process change events from a DynamoDB Table. An event is emitted to a DynamoDB stream (if configured) whenever a write (Put, Delete, Update)\noperation is performed against the table. See [Using AWS Lambda with Amazon DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) for more information about configuring Lambda function event sources with DynamoDB.\n\nTo process events with a Lambda function, first create or update a DynamoDB table and enable a `stream` specification. Then, create a `DynamoEventSource`\nand add it to your Lambda function. The following parameters will impact Amazon DynamoDB's polling behavior:\n\n* __batchSize__: Determines how many records are buffered before invoking your lambda function - could impact your function's memory usage (if too high) and ability to keep up with incoming data velocity (if too low).\n* __bisectBatchOnError__: If a batch encounters an error, this will cause the batch to be split in two and have each new smaller batch retried, allowing the records in error to be isolated.\n* __reportBatchItemFailures__: Allow functions to return partially successful responses for a batch of records.\n* __maxBatchingWindow__: The maximum amount of time to gather records before invoking the lambda. This increases the likelihood of a full batch at the cost of delayed processing.\n* __maxRecordAge__: The maximum age of a record that will be sent to the function for processing. Records that exceed the max age will be treated as failures.\n* __onFailure__: In the event a record fails after all retries or if the record age has exceeded the configured value, the record will be sent to SQS queue or SNS topic that is specified here\n* __parallelizationFactor__: The number of batches to concurrently process on each shard.\n* __retryAttempts__: The maximum number of times a record should be retried in the event of failure.\n* __startingPosition__: Will determine where to being consumption, either at the most recent ('LATEST') record or the oldest record ('TRIM_HORIZON'). 'TRIM_HORIZON' will ensure you process all available data, while 'LATEST' will ignore all records that arrived prior to attaching the event source.\n* __tumblingWindow__: The duration in seconds of a processing window when using streams.\n* __enabled__: If the DynamoDB Streams event source mapping should be enabled. The default is true.\n\n```ts\nimport { aws_dynamodb as dynamodb } from 'aws-cdk-lib';\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_sqs as sqs } from 'aws-cdk-lib';\nimport { DynamoEventSource, SqsDlq } from 'aws-cdk-lib/aws-lambda-event-sources';\n\nconst table = new dynamodb.Table(..., {\n partitionKey: ...,\n stream: dynamodb.StreamViewType.NEW_IMAGE // make sure stream is configured\n});\n\nconst deadLetterQueue = new sqs.Queue(this, 'deadLetterQueue');\n\nconst function = new lambda.Function(...);\nfunction.addEventSource(new DynamoEventSource(table, {\n startingPosition: lambda.StartingPosition.TRIM_HORIZON,\n batchSize: 5,\n bisectBatchOnError: true,\n onFailure: new SqsDlq(deadLetterQueue),\n retryAttempts: 10\n}));\n```\n\n## Kinesis\n\nYou can write Lambda functions to process streaming data in Amazon Kinesis Streams. For more information about Amazon Kinesis, see [Amazon Kinesis\nService](https://aws.amazon.com/kinesis/data-streams/). To learn more about configuring Lambda function event sources with kinesis and view a sample event,\nsee [Amazon Kinesis Event](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html).\n\nTo set up Amazon Kinesis as an event source for AWS Lambda, you\nfirst create or update an Amazon Kinesis stream and select custom values for the\nevent source parameters. The following parameters will impact Amazon Kinesis's polling\nbehavior:\n\n* __batchSize__: Determines how many records are buffered before invoking your lambda function - could impact your function's memory usage (if too high) and ability to keep up with incoming data velocity (if too low).\n* __bisectBatchOnError__: If a batch encounters an error, this will cause the batch to be split in two and have each new smaller batch retried, allowing the records in error to be isolated.\n* __reportBatchItemFailures__: Allow functions to return partially successful responses for a batch of records.\n* __maxBatchingWindow__: The maximum amount of time to gather records before invoking the lambda. This increases the likelihood of a full batch at the cost of possibly delaying processing.\n* __maxRecordAge__: The maximum age of a record that will be sent to the function for processing. Records that exceed the max age will be treated as failures.\n* __onFailure__: In the event a record fails and consumes all retries, the record will be sent to SQS queue or SNS topic that is specified here\n* __parallelizationFactor__: The number of batches to concurrently process on each shard.\n* __retryAttempts__: The maximum number of times a record should be retried in the event of failure.\n* __startingPosition__: Will determine where to being consumption, either at the most recent ('LATEST') record or the oldest record ('TRIM_HORIZON'). 'TRIM_HORIZON' will ensure you process all available data, while 'LATEST' will ignore all records that arrived prior to attaching the event source.\n* __tumblingWindow__: The duration in seconds of a processing window when using streams.\n* __enabled__: If the DynamoDB Streams event source mapping should be enabled. The default is true.\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_kinesis as kinesis } from 'aws-cdk-lib';\nimport { KinesisEventSource } from 'aws-cdk-lib/aws-lambda-event-sources';\n\nconst stream = new kinesis.Stream(this, 'MyStream');\n\nmyFunction.addEventSource(new KinesisEventSource(stream, {\n batchSize: 100, // default\n startingPosition: lambda.StartingPosition.TRIM_HORIZON\n}));\n```\n\n## Kafka\n\nYou can write Lambda functions to process data either from [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) or a [self managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/kafka-smaa.html) cluster.\n\nThe following code sets up Amazon MSK as an event source for a lambda function. Credentials will need to be configured to access the\nMSK cluster, as described in [Username/Password authentication](https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html).\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_lambda as msk } from 'aws-cdk-lib';\nimport { Secret } from 'aws-cdk-lib/aws-secretmanager';\nimport { ManagedKafkaEventSource } from 'aws-cdk-lib/aws-lambda-event-sources';\n\n// Your MSK cluster arn\nconst cluster = 'arn:aws:kafka:us-east-1:0123456789019:cluster/SalesCluster/abcd1234-abcd-cafe-abab-9876543210ab-4';\n\n// The Kafka topic you want to subscribe to\nconst topic = 'some-cool-topic'\n\n// The secret that allows access to your MSK cluster\n// You still have to make sure that it is associated with your cluster as described in the documentation\nconst secret = new Secret(this, 'Secret', { secretName: 'AmazonMSK_KafkaSecret' });\n\nmyFunction.addEventSource(new ManagedKafkaEventSource({\n clusterArn,\n topic: topic,\n secret: secret,\n batchSize: 100, // default\n startingPosition: lambda.StartingPosition.TRIM_HORIZON\n}));\n```\n\nThe following code sets up a self managed Kafka cluster as an event source. Username and password based authentication\nwill need to be set up as described in [Managing access and permissions](https://docs.aws.amazon.com/lambda/latest/dg/smaa-permissions.html#smaa-permissions-add-secret).\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { Secret } from 'aws-cdk-lib/aws-secretmanager';\nimport { SelfManagedKafkaEventSource } from 'aws-cdk-lib/aws-lambda-event-sources';\n\n// The list of Kafka brokers\nconst bootstrapServers = ['kafka-broker:9092']\n\n// The Kafka topic you want to subscribe to\nconst topic = 'some-cool-topic'\n\n// The secret that allows access to your self hosted Kafka cluster\nconst secret = new Secret(this, 'Secret', { ... });\n\nmyFunction.addEventSource(new SelfManagedKafkaEventSource({\n bootstrapServers: bootstrapServers,\n topic: topic,\n secret: secret,\n batchSize: 100, // default\n startingPosition: lambda.StartingPosition.TRIM_HORIZON\n}));\n```\n\nIf your self managed Kafka cluster is only reachable via VPC also configure `vpc` `vpcSubnets` and `securityGroup`.\n\n## Roadmap\n\nEventually, this module will support all the event sources described under\n[Supported Event\nSources](https://docs.aws.amazon.com/lambda/latest/dg/invoking-lambda-function.html)\nin the AWS Lambda Developer Guide.\n"
|
|
2194
|
-
},
|
|
2195
1565
|
"targets": {
|
|
2196
1566
|
"dotnet": {
|
|
2197
1567
|
"namespace": "Amazon.CDK.AWS.Lambda.EventSources"
|
|
@@ -2205,13 +1575,6 @@
|
|
|
2205
1575
|
}
|
|
2206
1576
|
},
|
|
2207
1577
|
"aws-cdk-lib.aws_lambda_nodejs": {
|
|
2208
|
-
"locationInModule": {
|
|
2209
|
-
"filename": "lib/index.ts",
|
|
2210
|
-
"line": 119
|
|
2211
|
-
},
|
|
2212
|
-
"readme": {
|
|
2213
|
-
"markdown": "# Amazon Lambda Node.js Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library provides constructs for Node.js Lambda functions.\n\n## Node.js Function\n\nThe `NodejsFunction` construct creates a Lambda function with automatic transpiling and bundling\nof TypeScript or Javascript code. This results in smaller Lambda packages that contain only the\ncode and dependencies needed to run the function.\n\nIt uses [esbuild](https://esbuild.github.io/) under the hood.\n\n## Reference project architecture\n\nThe `NodejsFunction` allows you to define your CDK and runtime dependencies in a single\npackage.json and to collocate your runtime code with your infrastructure code:\n\n```plaintext\n.\n├── lib\n│ ├── my-construct.api.ts # Lambda handler for API\n│ ├── my-construct.auth.ts # Lambda handler for Auth\n│ └── my-construct.ts # CDK construct with two Lambda functions\n├── package-lock.json # single lock file\n├── package.json # CDK and runtime dependencies defined in a single package.json\n└── tsconfig.json\n```\n\nBy default, the construct will use the name of the defining file and the construct's\nid to look up the entry file. In `my-construct.ts` above we have:\n\n```ts\n// automatic entry look up\nconst apiHandler = new lambda.NodejsFunction(this, 'api');\nconst authHandler = new lambda.NodejsFunction(this, 'auth');\n```\n\nAlternatively, an entry file and handler can be specified:\n\n```ts\nnew lambda.NodejsFunction(this, 'MyFunction', {\n entry: '/path/to/my/file.ts', // accepts .js, .jsx, .ts and .tsx files\n handler: 'myExportedFunc', // defaults to 'handler'\n});\n```\n\nFor monorepos, the reference architecture becomes:\n\n```plaintext\n.\n├── packages\n│ ├── cool-package\n│ │ ├── lib\n│ │ │ ├── cool-construct.api.ts\n│ │ │ ├── cool-construct.auth.ts\n│ │ │ └── cool-construct.ts\n│ │ ├── package.json # CDK and runtime dependencies for cool-package\n│ │ └── tsconfig.json\n│ └── super-package\n│ ├── lib\n│ │ ├── super-construct.handler.ts\n│ │ └── super-construct.ts\n│ ├── package.json # CDK and runtime dependencies for super-package\n│ └── tsconfig.json\n├── package-lock.json # single lock file\n├── package.json # root dependencies\n└── tsconfig.json\n```\n\n## Customizing the underlying Lambda function\n\nAll properties of `lambda.Function` can be used to customize the underlying `lambda.Function`.\n\nSee also the [AWS Lambda construct library](https://github.com/aws/aws-cdk/tree/master/packages/%40aws-cdk/aws-lambda).\n\nThe `NodejsFunction` construct automatically [reuses existing connections](https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/node-reusing-connections.html)\nwhen working with the AWS SDK for JavaScript. Set the `awsSdkConnectionReuse` prop to `false` to disable it.\n\n## Lock file\n\nThe `NodejsFunction` requires a dependencies lock file (`yarn.lock`, `pnpm-lock.yaml` or\n`package-lock.json`). When bundling in a Docker container, the path containing this lock file is\nused as the source (`/asset-input`) for the volume mounted in the container.\n\nBy default, the construct will try to automatically determine your project lock file.\nAlternatively, you can specify the `depsLockFilePath` prop manually. In this\ncase you need to ensure that this path includes `entry` and any module/dependencies\nused by your function. Otherwise bundling will fail.\n\n## Local bundling\n\nIf `esbuild` is available it will be used to bundle your code in your environment. Otherwise,\nbundling will happen in a [Lambda compatible Docker container](https://gallery.ecr.aws/sam/build-nodejs12.x).\n\nFor macOS the recommendend approach is to install `esbuild` as Docker volume performance is really poor.\n\n`esbuild` can be installed with:\n\n```console\n$ npm install --save-dev esbuild@0\n```\n\nOR\n\n```console\n$ yarn add --dev esbuild@0\n```\n\nTo force bundling in a Docker container even if `esbuild` is available in your environment,\nset `bundling.forceDockerBundling` to `true`. This is useful if your function relies on node\nmodules that should be installed (`nodeModules` prop, see [below](#install-modules)) in a Lambda\ncompatible environment. This is usually the case with modules using native dependencies.\n\n## Working with modules\n\n### Externals\n\nBy default, all node modules are bundled except for `aws-sdk`. This can be configured by specifying\n`bundling.externalModules`:\n\n```ts\nnew lambda.NodejsFunction(this, 'my-handler', {\n bundling: {\n externalModules: [\n 'aws-sdk', // Use the 'aws-sdk' available in the Lambda runtime\n 'cool-module', // 'cool-module' is already available in a Layer\n ],\n },\n});\n```\n\n### Install modules\n\nBy default, all node modules referenced in your Lambda code will be bundled by `esbuild`.\nUse the `nodeModules` prop under `bundling` to specify a list of modules that should not be\nbundled but instead included in the `node_modules` folder of the Lambda package. This is useful\nwhen working with native dependencies or when `esbuild` fails to bundle a module.\n\n```ts\nnew lambda.NodejsFunction(this, 'my-handler', {\n bundling: {\n nodeModules: ['native-module', 'other-module'],\n },\n});\n```\n\nThe modules listed in `nodeModules` must be present in the `package.json`'s dependencies or\ninstalled. The same version will be used for installation. The lock file (`yarn.lock`,\n`pnpm-lock.yaml` or `package-lock.json`) will be used along with the right installer (`yarn`,\n`pnpm` or `npm`).\n\nWhen working with `nodeModules` using native dependencies, you might want to force bundling in a\nDocker container even if `esbuild` is available in your environment. This can be done by setting\n`bundling.forceDockerBundling` to `true`.\n\n## Configuring `esbuild`\n\nThe `NodejsFunction` construct exposes some [esbuild options](https://esbuild.github.io/api/#build-api)\nvia properties under `bundling`:\n\n```ts\nnew lambda.NodejsFunction(this, 'my-handler', {\n bundling: {\n minify: true, // minify code, defaults to false\n sourceMap: true, // include source map, defaults to false\n sourceMapMode: SourceMapMode.INLINE, // defaults to SourceMapMode.DEFAULT\n target: 'es2020', // target environment for the generated JavaScript code\n loader: { // Use the 'dataurl' loader for '.png' files\n '.png': 'dataurl',\n },\n define: { // Replace strings during build time\n 'process.env.API_KEY': JSON.stringify('xxx-xxxx-xxx'),\n 'process.env.PRODUCTION': JSON.stringify(true),\n 'process.env.NUMBER': JSON.stringify(123),\n },\n logLevel: LogLevel.SILENT, // defaults to LogLevel.WARNING\n keepNames: true, // defaults to false\n tsconfig: 'custom-tsconfig.json', // use custom-tsconfig.json instead of default,\n metafile: true, // include meta file, defaults to false\n banner : '/* comments */', // requires esbuild >= 0.9.0, defaults to none\n footer : '/* comments */', // requires esbuild >= 0.9.0, defaults to none\n },\n});\n```\n\n## Command hooks\n\nIt is possible to run additional commands by specifying the `commandHooks` prop:\n\n```ts\nnew lambda.NodejsFunction(this, 'my-handler-with-commands', {\n bundling: {\n commandHooks: {\n // Copy a file so that it will be included in the bundled asset\n afterBundling(inputDir: string, outputDir: string): string[] {\n return [`cp ${inputDir}/my-binary.node ${outputDir}`];\n }\n // ...\n }\n // ...\n }\n});\n```\n\nThe following hooks are available:\n\n- `beforeBundling`: runs before all bundling commands\n- `beforeInstall`: runs before node modules installation\n- `afterBundling`: runs after all bundling commands\n\nThey all receive the directory containing the lock file (`inputDir`) and the\ndirectory where the bundled asset will be output (`outputDir`). They must return\nan array of commands to run. Commands are chained with `&&`.\n\nThe commands will run in the environment in which bundling occurs: inside the\ncontainer for Docker bundling or on the host OS for local bundling.\n\n## Customizing Docker bundling\n\nUse `bundling.environment` to define environments variables when `esbuild` runs:\n\n```ts\nnew lambda.NodejsFunction(this, 'my-handler', {\n bundling: {\n environment: {\n NODE_ENV: 'production',\n },\n },\n});\n```\n\nUse `bundling.buildArgs` to pass build arguments when building the Docker bundling image:\n\n```ts\nnew lambda.NodejsFunction(this, 'my-handler', {\n bundling: {\n buildArgs: {\n HTTPS_PROXY: 'https://127.0.0.1:3001',\n },\n }\n});\n```\n\nUse `bundling.dockerImage` to use a custom Docker bundling image:\n\n```ts\nnew lambda.NodejsFunction(this, 'my-handler', {\n bundling: {\n dockerImage: cdk.DockerImage.fromBuild('/path/to/Dockerfile'),\n },\n});\n```\n\nThis image should have `esbuild` installed **globally**. If you plan to use `nodeModules` it\nshould also have `npm`, `yarn` or `pnpm` depending on the lock file you're using.\n\nUse the [default image provided by `@aws-cdk/aws-lambda-nodejs`](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/aws-lambda-nodejs/lib/Dockerfile)\nas a source of inspiration.\n"
|
|
2214
|
-
},
|
|
2215
1578
|
"targets": {
|
|
2216
1579
|
"dotnet": {
|
|
2217
1580
|
"namespace": "Amazon.CDK.AWS.Lambda.Nodejs"
|
|
@@ -2225,10 +1588,6 @@
|
|
|
2225
1588
|
}
|
|
2226
1589
|
},
|
|
2227
1590
|
"aws-cdk-lib.aws_licensemanager": {
|
|
2228
|
-
"locationInModule": {
|
|
2229
|
-
"filename": "lib/index.ts",
|
|
2230
|
-
"line": 120
|
|
2231
|
-
},
|
|
2232
1591
|
"targets": {
|
|
2233
1592
|
"dotnet": {
|
|
2234
1593
|
"namespace": "Amazon.CDK.AWS.LicenseManager"
|
|
@@ -2242,10 +1601,6 @@
|
|
|
2242
1601
|
}
|
|
2243
1602
|
},
|
|
2244
1603
|
"aws-cdk-lib.aws_location": {
|
|
2245
|
-
"locationInModule": {
|
|
2246
|
-
"filename": "lib/index.ts",
|
|
2247
|
-
"line": 121
|
|
2248
|
-
},
|
|
2249
1604
|
"targets": {
|
|
2250
1605
|
"dotnet": {
|
|
2251
1606
|
"namespace": "Amazon.CDK.AWS.Location"
|
|
@@ -2259,13 +1614,6 @@
|
|
|
2259
1614
|
}
|
|
2260
1615
|
},
|
|
2261
1616
|
"aws-cdk-lib.aws_logs": {
|
|
2262
|
-
"locationInModule": {
|
|
2263
|
-
"filename": "lib/index.ts",
|
|
2264
|
-
"line": 122
|
|
2265
|
-
},
|
|
2266
|
-
"readme": {
|
|
2267
|
-
"markdown": "# Amazon CloudWatch Logs Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library supplies constructs for working with CloudWatch Logs.\n\n## Log Groups/Streams\n\nThe basic unit of CloudWatch is a *Log Group*. Every log group typically has the\nsame kind of data logged to it, in the same format. If there are multiple\napplications or services logging into the Log Group, each of them creates a new\n*Log Stream*.\n\nEvery log operation creates a \"log event\", which can consist of a simple string\nor a single-line JSON object. JSON objects have the advantage that they afford\nmore filtering abilities (see below).\n\nThe only configurable attribute for log streams is the retention period, which\nconfigures after how much time the events in the log stream expire and are\ndeleted.\n\nThe default retention period if not supplied is 2 years, but it can be set to\none of the values in the `RetentionDays` enum to configure a different\nretention period (including infinite retention).\n\n[retention example](test/example.retention.lit.ts)\n\n## LogRetention\n\nThe `LogRetention` construct is a way to control the retention period of log groups that are created outside of the CDK. The construct is usually\nused on log groups that are auto created by AWS services, such as [AWS\nlambda](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html).\n\nThis is implemented using a [CloudFormation custom\nresource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html)\nwhich pre-creates the log group if it doesn't exist, and sets the specified log retention period (never expire, by default).\n\nBy default, the log group will be created in the same region as the stack. The `logGroupRegion` property can be used to configure\nlog groups in other regions. This is typically useful when controlling retention for log groups auto-created by global services that\npublish their log group to a specific region, such as AWS Chatbot creating a log group in `us-east-1`.\n\n## Encrypting Log Groups\n\nBy default, log group data is always encrypted in CloudWatch Logs. You have the\noption to encrypt log group data using a AWS KMS customer master key (CMK) should\nyou not wish to use the default AWS encryption. Keep in mind that if you decide to\nencrypt a log group, any service or IAM identity that needs to read the encrypted\nlog streams in the future will require the same CMK to decrypt the data.\n\nHere's a simple example of creating an encrypted Log Group using a KMS CMK.\n\n```ts\nimport { aws_kms as kms } from 'aws-cdk-lib';\n\nnew LogGroup(this, 'LogGroup', {\n encryptionKey: new kms.Key(this, 'Key'),\n});\n```\n\nSee the AWS documentation for more detailed information about [encrypting CloudWatch\nLogs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html).\n\n## Subscriptions and Destinations\n\nLog events matching a particular filter can be sent to either a Lambda function\nor a Kinesis stream.\n\nIf the Kinesis stream lives in a different account, a `CrossAccountDestination`\nobject needs to be added in the destination account which will act as a proxy\nfor the remote Kinesis stream. This object is automatically created for you\nif you use the CDK Kinesis library.\n\nCreate a `SubscriptionFilter`, initialize it with an appropriate `Pattern` (see\nbelow) and supply the intended destination:\n\n```ts\nconst fn = new lambda.Function(this, 'Lambda', { ... });\nconst logGroup = new LogGroup(this, 'LogGroup', { ... });\n\nnew SubscriptionFilter(this, 'Subscription', {\n logGroup,\n destination: new LogsDestinations.LambdaDestination(fn),\n filterPattern: FilterPattern.allTerms(\"ERROR\", \"MainThread\")\n});\n```\n\n## Metric Filters\n\nCloudWatch Logs can extract and emit metrics based on a textual log stream.\nDepending on your needs, this may be a more convenient way of generating metrics\nfor you application than making calls to CloudWatch Metrics yourself.\n\nA `MetricFilter` either emits a fixed number every time it sees a log event\nmatching a particular pattern (see below), or extracts a number from the log\nevent and uses that as the metric value.\n\nExample:\n\n[metricfilter example](test/integ.metricfilter.lit.ts)\n\nRemember that if you want to use a value from the log event as the metric value,\nyou must mention it in your pattern somewhere.\n\nA very simple MetricFilter can be created by using the `logGroup.extractMetric()`\nhelper function:\n\n```ts\nlogGroup.extractMetric('$.jsonField', 'Namespace', 'MetricName');\n```\n\nWill extract the value of `jsonField` wherever it occurs in JSON-structed\nlog records in the LogGroup, and emit them to CloudWatch Metrics under\nthe name `Namespace/MetricName`.\n\n### Exposing Metric on a Metric Filter\n\nYou can expose a metric on a metric filter by calling the `MetricFilter.metric()` API.\nThis has a default of `statistic = 'avg'` if the statistic is not set in the `props`.\n\n```ts\nconst mf = new MetricFilter(this, 'MetricFilter', {\n logGroup,\n metricNamespace: 'MyApp',\n metricName: 'Latency',\n filterPattern: FilterPattern.exists('$.latency'),\n metricValue: '$.latency',\n});\n\n//expose a metric from the metric filter\nconst metric = mf.metric();\n\n//you can use the metric to create a new alarm\nnew Alarm(this, 'alarm from metric filter', {\n metric,\n threshold: 100,\n evaluationPeriods: 2,\n});\n```\n\n## Patterns\n\nPatterns describe which log events match a subscription or metric filter. There\nare three types of patterns:\n\n* Text patterns\n* JSON patterns\n* Space-delimited table patterns\n\nAll patterns are constructed by using static functions on the `FilterPattern`\nclass.\n\nIn addition to the patterns above, the following special patterns exist:\n\n* `FilterPattern.allEvents()`: matches all log events.\n* `FilterPattern.literal(string)`: if you already know what pattern expression to\n use, this function takes a string and will use that as the log pattern. For\n more information, see the [Filter and Pattern\n Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).\n\n### Text Patterns\n\nText patterns match if the literal strings appear in the text form of the log\nline.\n\n* `FilterPattern.allTerms(term, term, ...)`: matches if all of the given terms\n (substrings) appear in the log event.\n* `FilterPattern.anyTerm(term, term, ...)`: matches if all of the given terms\n (substrings) appear in the log event.\n* `FilterPattern.anyGroup([term, term, ...], [term, term, ...], ...)`: matches if\n all of the terms in any of the groups (specified as arrays) matches. This is\n an OR match.\n\n\nExamples:\n\n```ts\n// Search for lines that contain both \"ERROR\" and \"MainThread\"\nconst pattern1 = FilterPattern.allTerms('ERROR', 'MainThread');\n\n// Search for lines that either contain both \"ERROR\" and \"MainThread\", or\n// both \"WARN\" and \"Deadlock\".\nconst pattern2 = FilterPattern.anyGroup(\n ['ERROR', 'MainThread'],\n ['WARN', 'Deadlock'],\n );\n```\n\n## JSON Patterns\n\nJSON patterns apply if the log event is the JSON representation of an object\n(without any other characters, so it cannot include a prefix such as timestamp\nor log level). JSON patterns can make comparisons on the values inside the\nfields.\n\n* **Strings**: the comparison operators allowed for strings are `=` and `!=`.\n String values can start or end with a `*` wildcard.\n* **Numbers**: the comparison operators allowed for numbers are `=`, `!=`,\n `<`, `<=`, `>`, `>=`.\n\nFields in the JSON structure are identified by identifier the complete object as `$`\nand then descending into it, such as `$.field` or `$.list[0].field`.\n\n* `FilterPattern.stringValue(field, comparison, string)`: matches if the given\n field compares as indicated with the given string value.\n* `FilterPattern.numberValue(field, comparison, number)`: matches if the given\n field compares as indicated with the given numerical value.\n* `FilterPattern.isNull(field)`: matches if the given field exists and has the\n value `null`.\n* `FilterPattern.notExists(field)`: matches if the given field is not in the JSON\n structure.\n* `FilterPattern.exists(field)`: matches if the given field is in the JSON\n structure.\n* `FilterPattern.booleanValue(field, boolean)`: matches if the given field\n is exactly the given boolean value.\n* `FilterPattern.all(jsonPattern, jsonPattern, ...)`: matches if all of the\n given JSON patterns match. This makes an AND combination of the given\n patterns.\n* `FilterPattern.any(jsonPattern, jsonPattern, ...)`: matches if any of the\n given JSON patterns match. This makes an OR combination of the given\n patterns.\n\n\nExample:\n\n```ts\n// Search for all events where the component field is equal to\n// \"HttpServer\" and either error is true or the latency is higher\n// than 1000.\nconst pattern = FilterPattern.all(\n FilterPattern.stringValue('$.component', '=', 'HttpServer'),\n FilterPattern.any(\n FilterPattern.booleanValue('$.error', true),\n FilterPattern.numberValue('$.latency', '>', 1000)\n ));\n```\n\n## Space-delimited table patterns\n\nIf the log events are rows of a space-delimited table, this pattern can be used\nto identify the columns in that structure and add conditions on any of them. The\ncanonical example where you would apply this type of pattern is Apache server\nlogs.\n\nText that is surrounded by `\"...\"` quotes or `[...]` square brackets will\nbe treated as one column.\n\n* `FilterPattern.spaceDelimited(column, column, ...)`: construct a\n `SpaceDelimitedTextPattern` object with the indicated columns. The columns\n map one-by-one the columns found in the log event. The string `\"...\"` may\n be used to specify an arbitrary number of unnamed columns anywhere in the\n name list (but may only be specified once).\n\nAfter constructing a `SpaceDelimitedTextPattern`, you can use the following\ntwo members to add restrictions:\n\n* `pattern.whereString(field, comparison, string)`: add a string condition.\n The rules are the same as for JSON patterns.\n* `pattern.whereNumber(field, comparison, number)`: add a numerical condition.\n The rules are the same as for JSON patterns.\n\nMultiple restrictions can be added on the same column; they must all apply.\n\nExample:\n\n```ts\n// Search for all events where the component is \"HttpServer\" and the\n// result code is not equal to 200.\nconst pattern = FilterPattern.spaceDelimited('time', 'component', '...', 'result_code', 'latency')\n .whereString('component', '=', 'HttpServer')\n .whereNumber('result_code', '!=', 200);\n```\n\n## Notes\n\nBe aware that Log Group ARNs will always have the string `:*` appended to\nthem, to match the behavior of [the CloudFormation `AWS::Logs::LogGroup`\nresource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html#aws-resource-logs-loggroup-return-values).\n"
|
|
2268
|
-
},
|
|
2269
1617
|
"targets": {
|
|
2270
1618
|
"dotnet": {
|
|
2271
1619
|
"namespace": "Amazon.CDK.AWS.Logs"
|
|
@@ -2279,13 +1627,6 @@
|
|
|
2279
1627
|
}
|
|
2280
1628
|
},
|
|
2281
1629
|
"aws-cdk-lib.aws_logs_destinations": {
|
|
2282
|
-
"locationInModule": {
|
|
2283
|
-
"filename": "lib/index.ts",
|
|
2284
|
-
"line": 123
|
|
2285
|
-
},
|
|
2286
|
-
"readme": {
|
|
2287
|
-
"markdown": "# CDK Construct Libray for AWS XXX\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nA short description here.\n"
|
|
2288
|
-
},
|
|
2289
1630
|
"targets": {
|
|
2290
1631
|
"dotnet": {
|
|
2291
1632
|
"namespace": "Amazon.CDK.AWS.Logs.Destinations"
|
|
@@ -2299,10 +1640,6 @@
|
|
|
2299
1640
|
}
|
|
2300
1641
|
},
|
|
2301
1642
|
"aws-cdk-lib.aws_lookoutequipment": {
|
|
2302
|
-
"locationInModule": {
|
|
2303
|
-
"filename": "lib/index.ts",
|
|
2304
|
-
"line": 124
|
|
2305
|
-
},
|
|
2306
1643
|
"targets": {
|
|
2307
1644
|
"dotnet": {
|
|
2308
1645
|
"namespace": "Amazon.CDK.AWS.LookoutEquipment"
|
|
@@ -2316,10 +1653,6 @@
|
|
|
2316
1653
|
}
|
|
2317
1654
|
},
|
|
2318
1655
|
"aws-cdk-lib.aws_lookoutmetrics": {
|
|
2319
|
-
"locationInModule": {
|
|
2320
|
-
"filename": "lib/index.ts",
|
|
2321
|
-
"line": 125
|
|
2322
|
-
},
|
|
2323
1656
|
"targets": {
|
|
2324
1657
|
"dotnet": {
|
|
2325
1658
|
"namespace": "Amazon.CDK.AWS.LookoutMetrics"
|
|
@@ -2333,10 +1666,6 @@
|
|
|
2333
1666
|
}
|
|
2334
1667
|
},
|
|
2335
1668
|
"aws-cdk-lib.aws_lookoutvision": {
|
|
2336
|
-
"locationInModule": {
|
|
2337
|
-
"filename": "lib/index.ts",
|
|
2338
|
-
"line": 126
|
|
2339
|
-
},
|
|
2340
1669
|
"targets": {
|
|
2341
1670
|
"dotnet": {
|
|
2342
1671
|
"namespace": "Amazon.CDK.AWS.LookoutVision"
|
|
@@ -2350,10 +1679,6 @@
|
|
|
2350
1679
|
}
|
|
2351
1680
|
},
|
|
2352
1681
|
"aws-cdk-lib.aws_macie": {
|
|
2353
|
-
"locationInModule": {
|
|
2354
|
-
"filename": "lib/index.ts",
|
|
2355
|
-
"line": 127
|
|
2356
|
-
},
|
|
2357
1682
|
"targets": {
|
|
2358
1683
|
"dotnet": {
|
|
2359
1684
|
"namespace": "Amazon.CDK.AWS.Macie"
|
|
@@ -2367,10 +1692,6 @@
|
|
|
2367
1692
|
}
|
|
2368
1693
|
},
|
|
2369
1694
|
"aws-cdk-lib.aws_managedblockchain": {
|
|
2370
|
-
"locationInModule": {
|
|
2371
|
-
"filename": "lib/index.ts",
|
|
2372
|
-
"line": 128
|
|
2373
|
-
},
|
|
2374
1695
|
"targets": {
|
|
2375
1696
|
"dotnet": {
|
|
2376
1697
|
"namespace": "Amazon.CDK.AWS.ManagedBlockchain"
|
|
@@ -2384,10 +1705,6 @@
|
|
|
2384
1705
|
}
|
|
2385
1706
|
},
|
|
2386
1707
|
"aws-cdk-lib.aws_mediaconnect": {
|
|
2387
|
-
"locationInModule": {
|
|
2388
|
-
"filename": "lib/index.ts",
|
|
2389
|
-
"line": 129
|
|
2390
|
-
},
|
|
2391
1708
|
"targets": {
|
|
2392
1709
|
"dotnet": {
|
|
2393
1710
|
"namespace": "Amazon.CDK.AWS.MediaConnect"
|
|
@@ -2401,10 +1718,6 @@
|
|
|
2401
1718
|
}
|
|
2402
1719
|
},
|
|
2403
1720
|
"aws-cdk-lib.aws_mediaconvert": {
|
|
2404
|
-
"locationInModule": {
|
|
2405
|
-
"filename": "lib/index.ts",
|
|
2406
|
-
"line": 130
|
|
2407
|
-
},
|
|
2408
1721
|
"targets": {
|
|
2409
1722
|
"dotnet": {
|
|
2410
1723
|
"namespace": "Amazon.CDK.AWS.MediaConvert"
|
|
@@ -2418,10 +1731,6 @@
|
|
|
2418
1731
|
}
|
|
2419
1732
|
},
|
|
2420
1733
|
"aws-cdk-lib.aws_medialive": {
|
|
2421
|
-
"locationInModule": {
|
|
2422
|
-
"filename": "lib/index.ts",
|
|
2423
|
-
"line": 131
|
|
2424
|
-
},
|
|
2425
1734
|
"targets": {
|
|
2426
1735
|
"dotnet": {
|
|
2427
1736
|
"namespace": "Amazon.CDK.AWS.MediaLive"
|
|
@@ -2435,10 +1744,6 @@
|
|
|
2435
1744
|
}
|
|
2436
1745
|
},
|
|
2437
1746
|
"aws-cdk-lib.aws_mediapackage": {
|
|
2438
|
-
"locationInModule": {
|
|
2439
|
-
"filename": "lib/index.ts",
|
|
2440
|
-
"line": 132
|
|
2441
|
-
},
|
|
2442
1747
|
"targets": {
|
|
2443
1748
|
"dotnet": {
|
|
2444
1749
|
"namespace": "Amazon.CDK.AWS.MediaPackage"
|
|
@@ -2452,10 +1757,6 @@
|
|
|
2452
1757
|
}
|
|
2453
1758
|
},
|
|
2454
1759
|
"aws-cdk-lib.aws_mediastore": {
|
|
2455
|
-
"locationInModule": {
|
|
2456
|
-
"filename": "lib/index.ts",
|
|
2457
|
-
"line": 133
|
|
2458
|
-
},
|
|
2459
1760
|
"targets": {
|
|
2460
1761
|
"dotnet": {
|
|
2461
1762
|
"namespace": "Amazon.CDK.AWS.MediaStore"
|
|
@@ -2469,10 +1770,6 @@
|
|
|
2469
1770
|
}
|
|
2470
1771
|
},
|
|
2471
1772
|
"aws-cdk-lib.aws_msk": {
|
|
2472
|
-
"locationInModule": {
|
|
2473
|
-
"filename": "lib/index.ts",
|
|
2474
|
-
"line": 134
|
|
2475
|
-
},
|
|
2476
1773
|
"targets": {
|
|
2477
1774
|
"dotnet": {
|
|
2478
1775
|
"namespace": "Amazon.CDK.AWS.Msk"
|
|
@@ -2486,10 +1783,6 @@
|
|
|
2486
1783
|
}
|
|
2487
1784
|
},
|
|
2488
1785
|
"aws-cdk-lib.aws_mwaa": {
|
|
2489
|
-
"locationInModule": {
|
|
2490
|
-
"filename": "lib/index.ts",
|
|
2491
|
-
"line": 135
|
|
2492
|
-
},
|
|
2493
1786
|
"targets": {
|
|
2494
1787
|
"dotnet": {
|
|
2495
1788
|
"namespace": "Amazon.CDK.AWS.MWAA"
|
|
@@ -2503,10 +1796,6 @@
|
|
|
2503
1796
|
}
|
|
2504
1797
|
},
|
|
2505
1798
|
"aws-cdk-lib.aws_neptune": {
|
|
2506
|
-
"locationInModule": {
|
|
2507
|
-
"filename": "lib/index.ts",
|
|
2508
|
-
"line": 136
|
|
2509
|
-
},
|
|
2510
1799
|
"targets": {
|
|
2511
1800
|
"dotnet": {
|
|
2512
1801
|
"namespace": "Amazon.CDK.AWS.Neptune"
|
|
@@ -2520,10 +1809,6 @@
|
|
|
2520
1809
|
}
|
|
2521
1810
|
},
|
|
2522
1811
|
"aws-cdk-lib.aws_networkfirewall": {
|
|
2523
|
-
"locationInModule": {
|
|
2524
|
-
"filename": "lib/index.ts",
|
|
2525
|
-
"line": 137
|
|
2526
|
-
},
|
|
2527
1812
|
"targets": {
|
|
2528
1813
|
"dotnet": {
|
|
2529
1814
|
"namespace": "Amazon.CDK.AWS.NetworkFirewall"
|
|
@@ -2537,10 +1822,6 @@
|
|
|
2537
1822
|
}
|
|
2538
1823
|
},
|
|
2539
1824
|
"aws-cdk-lib.aws_networkmanager": {
|
|
2540
|
-
"locationInModule": {
|
|
2541
|
-
"filename": "lib/index.ts",
|
|
2542
|
-
"line": 138
|
|
2543
|
-
},
|
|
2544
1825
|
"targets": {
|
|
2545
1826
|
"dotnet": {
|
|
2546
1827
|
"namespace": "Amazon.CDK.AWS.NetworkManager"
|
|
@@ -2554,10 +1835,6 @@
|
|
|
2554
1835
|
}
|
|
2555
1836
|
},
|
|
2556
1837
|
"aws-cdk-lib.aws_nimblestudio": {
|
|
2557
|
-
"locationInModule": {
|
|
2558
|
-
"filename": "lib/index.ts",
|
|
2559
|
-
"line": 139
|
|
2560
|
-
},
|
|
2561
1838
|
"targets": {
|
|
2562
1839
|
"dotnet": {
|
|
2563
1840
|
"namespace": "Amazon.CDK.AWS.NimbleStudio"
|
|
@@ -2571,10 +1848,6 @@
|
|
|
2571
1848
|
}
|
|
2572
1849
|
},
|
|
2573
1850
|
"aws-cdk-lib.aws_opsworks": {
|
|
2574
|
-
"locationInModule": {
|
|
2575
|
-
"filename": "lib/index.ts",
|
|
2576
|
-
"line": 140
|
|
2577
|
-
},
|
|
2578
1851
|
"targets": {
|
|
2579
1852
|
"dotnet": {
|
|
2580
1853
|
"namespace": "Amazon.CDK.AWS.OpsWorks"
|
|
@@ -2588,10 +1861,6 @@
|
|
|
2588
1861
|
}
|
|
2589
1862
|
},
|
|
2590
1863
|
"aws-cdk-lib.aws_opsworkscm": {
|
|
2591
|
-
"locationInModule": {
|
|
2592
|
-
"filename": "lib/index.ts",
|
|
2593
|
-
"line": 141
|
|
2594
|
-
},
|
|
2595
1864
|
"targets": {
|
|
2596
1865
|
"dotnet": {
|
|
2597
1866
|
"namespace": "Amazon.CDK.AWS.OpsWorksCM"
|
|
@@ -2605,10 +1874,6 @@
|
|
|
2605
1874
|
}
|
|
2606
1875
|
},
|
|
2607
1876
|
"aws-cdk-lib.aws_pinpoint": {
|
|
2608
|
-
"locationInModule": {
|
|
2609
|
-
"filename": "lib/index.ts",
|
|
2610
|
-
"line": 142
|
|
2611
|
-
},
|
|
2612
1877
|
"targets": {
|
|
2613
1878
|
"dotnet": {
|
|
2614
1879
|
"namespace": "Amazon.CDK.AWS.Pinpoint"
|
|
@@ -2622,10 +1887,6 @@
|
|
|
2622
1887
|
}
|
|
2623
1888
|
},
|
|
2624
1889
|
"aws-cdk-lib.aws_pinpointemail": {
|
|
2625
|
-
"locationInModule": {
|
|
2626
|
-
"filename": "lib/index.ts",
|
|
2627
|
-
"line": 143
|
|
2628
|
-
},
|
|
2629
1890
|
"targets": {
|
|
2630
1891
|
"dotnet": {
|
|
2631
1892
|
"namespace": "Amazon.CDK.AWS.PinpointEmail"
|
|
@@ -2639,10 +1900,6 @@
|
|
|
2639
1900
|
}
|
|
2640
1901
|
},
|
|
2641
1902
|
"aws-cdk-lib.aws_qldb": {
|
|
2642
|
-
"locationInModule": {
|
|
2643
|
-
"filename": "lib/index.ts",
|
|
2644
|
-
"line": 144
|
|
2645
|
-
},
|
|
2646
1903
|
"targets": {
|
|
2647
1904
|
"dotnet": {
|
|
2648
1905
|
"namespace": "Amazon.CDK.AWS.QLDB"
|
|
@@ -2656,10 +1913,6 @@
|
|
|
2656
1913
|
}
|
|
2657
1914
|
},
|
|
2658
1915
|
"aws-cdk-lib.aws_quicksight": {
|
|
2659
|
-
"locationInModule": {
|
|
2660
|
-
"filename": "lib/index.ts",
|
|
2661
|
-
"line": 145
|
|
2662
|
-
},
|
|
2663
1916
|
"targets": {
|
|
2664
1917
|
"dotnet": {
|
|
2665
1918
|
"namespace": "Amazon.CDK.AWS.QuickSight"
|
|
@@ -2673,10 +1926,6 @@
|
|
|
2673
1926
|
}
|
|
2674
1927
|
},
|
|
2675
1928
|
"aws-cdk-lib.aws_ram": {
|
|
2676
|
-
"locationInModule": {
|
|
2677
|
-
"filename": "lib/index.ts",
|
|
2678
|
-
"line": 146
|
|
2679
|
-
},
|
|
2680
1929
|
"targets": {
|
|
2681
1930
|
"dotnet": {
|
|
2682
1931
|
"namespace": "Amazon.CDK.AWS.RAM"
|
|
@@ -2690,13 +1939,6 @@
|
|
|
2690
1939
|
}
|
|
2691
1940
|
},
|
|
2692
1941
|
"aws-cdk-lib.aws_rds": {
|
|
2693
|
-
"locationInModule": {
|
|
2694
|
-
"filename": "lib/index.ts",
|
|
2695
|
-
"line": 147
|
|
2696
|
-
},
|
|
2697
|
-
"readme": {
|
|
2698
|
-
"markdown": "# Amazon Relational Database Service Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\n```ts\nimport { aws_rds as rds } from 'aws-cdk-lib';\n```\n\n## Starting a clustered database\n\nTo set up a clustered database (like Aurora), define a `DatabaseCluster`. You must\nalways launch a database in a VPC. Use the `vpcSubnets` attribute to control whether\nyour instances will be launched privately or publicly:\n\n```ts\nconst cluster = new rds.DatabaseCluster(this, 'Database', {\n engine: rds.DatabaseClusterEngine.auroraMysql({ version: rds.AuroraMysqlEngineVersion.VER_2_08_1 }),\n credentials: rds.Credentials.fromGeneratedSecret('clusteradmin'), // Optional - will default to 'admin' username and generated password\n instanceProps: {\n // optional , defaults to t3.medium\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),\n vpcSubnets: {\n subnetType: ec2.SubnetType.PRIVATE,\n },\n vpc,\n },\n});\n```\n\nIf there isn't a constant for the exact version you want to use,\nall of the `Version` classes have a static `of` method that can be used to create an arbitrary version.\n\n```ts\nconst customEngineVersion = rds.AuroraMysqlEngineVersion.of('5.7.mysql_aurora.2.08.1');\n```\n\nBy default, the master password will be generated and stored in AWS Secrets Manager with auto-generated description.\n\nYour cluster will be empty by default. To add a default database upon construction, specify the\n`defaultDatabaseName` attribute.\n\nUse `DatabaseClusterFromSnapshot` to create a cluster from a snapshot:\n\n```ts\nnew rds.DatabaseClusterFromSnapshot(stack, 'Database', {\n engine: rds.DatabaseClusterEngine.aurora({ version: rds.AuroraEngineVersion.VER_1_22_2 }),\n instanceProps: {\n vpc,\n },\n snapshotIdentifier: 'mySnapshot',\n});\n```\n\n## Starting an instance database\n\nTo set up a instance database, define a `DatabaseInstance`. You must\nalways launch a database in a VPC. Use the `vpcSubnets` attribute to control whether\nyour instances will be launched privately or publicly:\n\n```ts\nconst instance = new rds.DatabaseInstance(this, 'Instance', {\n engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }),\n // optional, defaults to m5.large\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL),\n credentials: rds.Credentials.fromGeneratedSecret('syscdk'), // Optional - will default to 'admin' username and generated password\n vpc,\n vpcSubnets: {\n subnetType: ec2.SubnetType.PRIVATE\n }\n});\n```\n\nIf there isn't a constant for the exact engine version you want to use,\nall of the `Version` classes have a static `of` method that can be used to create an arbitrary version.\n\n```ts\nconst customEngineVersion = rds.OracleEngineVersion.of('19.0.0.0.ru-2020-04.rur-2020-04.r1', '19');\n```\n\nBy default, the master password will be generated and stored in AWS Secrets Manager.\n\nTo use the storage auto scaling option of RDS you can specify the maximum allocated storage.\nThis is the upper limit to which RDS can automatically scale the storage. More info can be found\n[here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling)\nExample for max storage configuration:\n\n```ts\nconst instance = new rds.DatabaseInstance(this, 'Instance', {\n engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }),\n // optional, defaults to m5.large\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),\n vpc,\n maxAllocatedStorage: 200,\n});\n```\n\nUse `DatabaseInstanceFromSnapshot` and `DatabaseInstanceReadReplica` to create an instance from snapshot or\na source database respectively:\n\n```ts\nnew rds.DatabaseInstanceFromSnapshot(stack, 'Instance', {\n snapshotIdentifier: 'my-snapshot',\n engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }),\n // optional, defaults to m5.large\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE),\n vpc,\n});\n\nnew rds.DatabaseInstanceReadReplica(stack, 'ReadReplica', {\n sourceDatabaseInstance: sourceInstance,\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE),\n vpc,\n});\n```\n\nCreating a \"production\" Oracle database instance with option and parameter groups:\n\n[example of setting up a production oracle instance](test/integ.instance.lit.ts)\n\n## Setting Public Accessibility\n\nYou can set public accessibility for the database instance or cluster using the `publiclyAccessible` property.\nIf you specify `true`, it creates an instance with a publicly resolvable DNS name, which resolves to a public IP address.\nIf you specify `false`, it creates an internal instance with a DNS name that resolves to a private IP address.\nThe default value depends on `vpcSubnets`.\nIt will be `true` if `vpcSubnets` is `subnetType: SubnetType.PUBLIC`, `false` otherwise.\n\n```ts\n// Setting public accessibility for DB instance\nnew rds.DatabaseInstance(stack, 'Instance', {\n engine: rds.DatabaseInstanceEngine.mysql({\n version: rds.MysqlEngineVersion.VER_8_0_19,\n }),\n vpc,\n vpcSubnets: {\n subnetType: ec2.SubnetType.PRIVATE,\n },\n publiclyAccessible: true,\n});\n\n// Setting public accessibility for DB cluster\nnew rds.DatabaseCluster(stack, 'DatabaseCluster', {\n engine: DatabaseClusterEngine.AURORA,\n instanceProps: {\n vpc,\n vpcSubnets: {\n subnetType: ec2.SubnetType.PRIVATE,\n },\n publiclyAccessible: true,\n copyTagsToSnapshot: true, // whether to save the cluster tags when creating the snapshot. Default is 'true'\n },\n});\n```\n\n## Instance events\n\nTo define Amazon CloudWatch event rules for database instances, use the `onEvent`\nmethod:\n\n```ts\nconst rule = instance.onEvent('InstanceEvent', { target: new targets.LambdaFunction(fn) });\n```\n\n## Login credentials\n\nBy default, database instances and clusters will have `admin` user with an auto-generated password.\nAn alternative username (and password) may be specified for the admin user instead of the default.\n\nThe following examples use a `DatabaseInstance`, but the same usage is applicable to `DatabaseCluster`.\n\n```ts\nconst engine = rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 });\nnew rds.DatabaseInstance(this, 'InstanceWithUsername', {\n engine,\n vpc,\n credentials: rds.Credentials.fromGeneratedSecret('postgres'), // Creates an admin user of postgres with a generated password\n});\n\nnew rds.DatabaseInstance(this, 'InstanceWithUsernameAndPassword', {\n engine,\n vpc,\n credentials: rds.Credentials.fromPassword('postgres', SecretValue.ssmSecure('/dbPassword', '1')), // Use password from SSM\n});\n\nconst mySecret = secretsmanager.Secret.fromSecretName(this, 'DBSecret', 'myDBLoginInfo');\nnew rds.DatabaseInstance(this, 'InstanceWithSecretLogin', {\n engine,\n vpc,\n credentials: rds.Credentials.fromSecret(mySecret), // Get both username and password from existing secret\n});\n```\n\n## Connecting\n\nTo control who can access the cluster or instance, use the `.connections` attribute. RDS databases have\na default port, so you don't need to specify the port:\n\n```ts\ncluster.connections.allowFromAnyIpv4('Open to the world');\n```\n\nThe endpoints to access your database cluster will be available as the `.clusterEndpoint` and `.readerEndpoint`\nattributes:\n\n```ts\nconst writeAddress = cluster.clusterEndpoint.socketAddress; // \"HOSTNAME:PORT\"\n```\n\nFor an instance database:\n\n```ts\nconst address = instance.instanceEndpoint.socketAddress; // \"HOSTNAME:PORT\"\n```\n\n## Rotating credentials\n\nWhen the master password is generated and stored in AWS Secrets Manager, it can be rotated automatically:\n\n```ts\ninstance.addRotationSingleUser({\n automaticallyAfter: cdk.Duration.days(7), // defaults to 30 days\n excludeCharacters: '!@#$%^&*', // defaults to the set \" %+~`#$&*()|[]{}:;<>?!'/@\\\"\\\\\"\n});\n```\n\n[example of setting up master password rotation for a cluster](test/integ.cluster-rotation.lit.ts)\n\nThe multi user rotation scheme is also available:\n\n```ts\ninstance.addRotationMultiUser('MyUser', {\n secret: myImportedSecret, // This secret must have the `masterarn` key\n});\n```\n\nIt's also possible to create user credentials together with the instance/cluster and add rotation:\n\n```ts\nconst myUserSecret = new rds.DatabaseSecret(this, 'MyUserSecret', {\n username: 'myuser',\n secretName: 'my-user-secret', // optional, defaults to a CloudFormation-generated name\n masterSecret: instance.secret,\n excludeCharacters: '{}[]()\\'\"/\\\\', // defaults to the set \" %+~`#$&*()|[]{}:;<>?!'/@\\\"\\\\\"\n});\nconst myUserSecretAttached = myUserSecret.attach(instance); // Adds DB connections information in the secret\n\ninstance.addRotationMultiUser('MyUser', { // Add rotation using the multi user scheme\n secret: myUserSecretAttached,\n});\n```\n\n**Note**: This user must be created manually in the database using the master credentials.\nThe rotation will start as soon as this user exists.\n\nSee also [@aws-cdk/aws-secretsmanager](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/aws-secretsmanager/README.md) for credentials rotation of existing clusters/instances.\n\n## IAM Authentication\n\nYou can also authenticate to a database instance using AWS Identity and Access Management (IAM) database authentication;\nSee <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html> for more information\nand a list of supported versions and limitations.\n\nThe following example shows enabling IAM authentication for a database instance and granting connection access to an IAM role.\n\n```ts\nconst instance = new rds.DatabaseInstance(stack, 'Instance', {\n engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }),\n vpc,\n iamAuthentication: true, // Optional - will be automatically set if you call grantConnect().\n});\nconst role = new Role(stack, 'DBRole', { assumedBy: new AccountPrincipal(stack.account) });\ninstance.grantConnect(role); // Grant the role connection access to the DB.\n```\n\nThe following example shows granting connection access for RDS Proxy to an IAM role.\n\n```ts\nconst cluster = new rds.DatabaseCluster(stack, 'Database', {\n engine: rds.DatabaseClusterEngine.AURORA,\n instanceProps: { vpc },\n});\n\nconst proxy = new rds.DatabaseProxy(stack, 'Proxy', {\n proxyTarget: rds.ProxyTarget.fromCluster(cluster),\n secrets: [cluster.secret!],\n vpc,\n});\n\nconst role = new Role(stack, 'DBProxyRole', { assumedBy: new AccountPrincipal(stack.account) });\nproxy.grantConnect(role, 'admin'); // Grant the role connection access to the DB Proxy for database user 'admin'.\n```\n\n**Note**: In addition to the setup above, a database user will need to be created to support IAM auth.\nSee <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html> for setup instructions.\n\n## Kerberos Authentication\n\nYou can also authenticate using Kerberos to a database instance using AWS Managed Microsoft AD for authentication;\nSee <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html> for more information\nand a list of supported versions and limitations.\n\nThe following example shows enabling domain support for a database instance and creating an IAM role to access\nDirectory Services.\n\n```ts\nconst role = new iam.Role(stack, 'RDSDirectoryServicesRole', {\n assumedBy: new iam.ServicePrincipal('rds.amazonaws.com'),\n managedPolicies: [\n iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonRDSDirectoryServiceAccess'),\n ],\n});\nconst instance = new rds.DatabaseInstance(stack, 'Instance', {\n engine: rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0_19 }),\n vpc,\n domain: 'd-????????', // The ID of the domain for the instance to join.\n domainRole: role, // Optional - will be create automatically if not provided.\n});\n```\n\n**Note**: In addition to the setup above, you need to make sure that the database instance has network connectivity\nto the domain controllers. This includes enabling cross-VPC traffic if in a different VPC and setting up the\nappropriate security groups/network ACL to allow traffic between the database instance and domain controllers.\nOnce configured, see <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html> for details\non configuring users for each available database engine.\n\n## Metrics\n\nDatabase instances and clusters both expose metrics (`cloudwatch.Metric`):\n\n```ts\n// The number of database connections in use (average over 5 minutes)\nconst dbConnections = instance.metricDatabaseConnections();\n\n// Average CPU utilization over 5 minutes\nconst cpuUtilization = cluster.metricCPUUtilization();\n\n// The average amount of time taken per disk I/O operation (average over 1 minute)\nconst readLatency = instance.metric('ReadLatency', { statistic: 'Average', periodSec: 60 });\n```\n\n## Enabling S3 integration\n\nData in S3 buckets can be imported to and exported from certain database engines using SQL queries. To enable this\nfunctionality, set the `s3ImportBuckets` and `s3ExportBuckets` properties for import and export respectively. When\nconfigured, the CDK automatically creates and configures IAM roles as required.\nAdditionally, the `s3ImportRole` and `s3ExportRole` properties can be used to set this role directly.\n\nYou can read more about loading data to (or from) S3 here:\n\n* Aurora MySQL - [import](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.LoadFromS3.html)\n and [export](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.SaveIntoS3.html).\n* Aurora PostgreSQL - [import](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Migrating.html#USER_PostgreSQL.S3Import)\n and [export](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/postgresql-s3-export.html).\n* Microsoft SQL Server - [import and export](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.Procedural.Importing.html)\n* PostgreSQL - [import](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/PostgreSQL.Procedural.Importing.html)\n and [export](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/postgresql-s3-export.html)\n* Oracle - [import and export](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-s3-integration.html)\n\nThe following snippet sets up a database cluster with different S3 buckets where the data is imported and exported -\n\n```ts\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\n\nconst importBucket = new s3.Bucket(this, 'importbucket');\nconst exportBucket = new s3.Bucket(this, 'exportbucket');\nnew rds.DatabaseCluster(this, 'dbcluster', {\n // ...\n s3ImportBuckets: [importBucket],\n s3ExportBuckets: [exportBucket],\n});\n```\n\n## Creating a Database Proxy\n\nAmazon RDS Proxy sits between your application and your relational database to efficiently manage\nconnections to the database and improve scalability of the application. Learn more about at [Amazon RDS Proxy](https://aws.amazon.com/rds/proxy/)\n\nThe following code configures an RDS Proxy for a `DatabaseInstance`.\n\n```ts\nimport * as cdk from 'aws-cdk-lib';\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_rds as rds } from 'aws-cdk-lib';\nimport { aws_secretsmanager as secrets } from 'aws-cdk-lib';\n\nconst vpc: ec2.IVpc = ...;\nconst securityGroup: ec2.ISecurityGroup = ...;\nconst secrets: secrets.ISecret[] = [...];\nconst dbInstance: rds.IDatabaseInstance = ...;\n\nconst proxy = dbInstance.addProxy('proxy', {\n connectionBorrowTimeout: cdk.Duration.seconds(30),\n maxConnectionsPercent: 50,\n secrets,\n vpc,\n});\n```\n\n## Exporting Logs\n\nYou can publish database logs to Amazon CloudWatch Logs. With CloudWatch Logs, you can perform real-time analysis of the log data,\nstore the data in highly durable storage, and manage the data with the CloudWatch Logs Agent. This is available for both database\ninstances and clusters; the types of logs available depend on the database type and engine being used.\n\n```ts\n// Exporting logs from a cluster\nconst cluster = new rds.DatabaseCluster(this, 'Database', {\n engine: rds.DatabaseClusterEngine.aurora({\n version: rds.AuroraEngineVersion.VER_1_17_9, // different version class for each engine type\n },\n // ...\n cloudwatchLogsExports: ['error', 'general', 'slowquery', 'audit'], // Export all available MySQL-based logs\n cloudwatchLogsRetention: logs.RetentionDays.THREE_MONTHS, // Optional - default is to never expire logs\n cloudwatchLogsRetentionRole: myLogsPublishingRole, // Optional - a role will be created if not provided\n // ...\n});\n\n// Exporting logs from an instance\nconst instance = new rds.DatabaseInstance(this, 'Instance', {\n engine: rds.DatabaseInstanceEngine.postgres({\n version: rds.PostgresEngineVersion.VER_12_3,\n }),\n // ...\n cloudwatchLogsExports: ['postgresql'], // Export the PostgreSQL logs\n // ...\n});\n```\n\n## Option Groups\n\nSome DB engines offer additional features that make it easier to manage data and databases, and to provide additional security for your database.\nAmazon RDS uses option groups to enable and configure these features. An option group can specify features, called options,\nthat are available for a particular Amazon RDS DB instance.\n\n```ts\nconst vpc: ec2.IVpc = ...;\nconst securityGroup: ec2.ISecurityGroup = ...;\nnew rds.OptionGroup(stack, 'Options', {\n engine: rds.DatabaseInstanceEngine.oracleSe2({\n version: rds.OracleEngineVersion.VER_19,\n }),\n configurations: [\n {\n name: 'OEM',\n port: 5500,\n vpc,\n securityGroups: [securityGroup], // Optional - a default group will be created if not provided.\n },\n ],\n});\n```\n\n## Serverless\n\n[Amazon Aurora Serverless](https://aws.amazon.com/rds/aurora/serverless/) is an on-demand, auto-scaling configuration for Amazon\nAurora. The database will automatically start up, shut down, and scale capacity\nup or down based on your application's needs. It enables you to run your database\nin the cloud without managing any database instances.\n\nThe following example initializes an Aurora Serverless PostgreSql cluster.\nAurora Serverless clusters can specify scaling properties which will be used to\nautomatically scale the database cluster seamlessly based on the workload.\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_rds as rds } from 'aws-cdk-lib';\n\nconst vpc = new ec2.Vpc(this, 'myrdsvpc');\n\nconst cluster = new rds.ServerlessCluster(this, 'AnotherCluster', {\n engine: rds.DatabaseClusterEngine.AURORA_POSTGRESQL,\n parameterGroup: rds.ParameterGroup.fromParameterGroupName(this, 'ParameterGroup', 'default.aurora-postgresql10'),\n vpc,\n scaling: {\n autoPause: Duration.minutes(10), // default is to pause after 5 minutes of idle time\n minCapacity: rds.AuroraCapacityUnit.ACU_8, // default is 2 Aurora capacity units (ACUs)\n maxCapacity: rds.AuroraCapacityUnit.ACU_32, // default is 16 Aurora capacity units (ACUs)\n }\n});\n```\n\nAurora Serverless Clusters do not support the following features:\n\n* Loading data from an Amazon S3 bucket\n* Saving data to an Amazon S3 bucket\n* Invoking an AWS Lambda function with an Aurora MySQL native function\n* Aurora replicas\n* Backtracking\n* Multi-master clusters\n* Database cloning\n* IAM database cloning\n* IAM database authentication\n* Restoring a snapshot from MySQL DB instance\n* Performance Insights\n* RDS Proxy\n\nRead more about the [limitations of Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n\nLearn more about using Amazon Aurora Serverless by reading the [documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html)\n\n### Data API\n\nYou can access your Aurora Serverless DB cluster using the built-in Data API. The Data API doesn't require a persistent connection to the DB cluster. Instead, it provides a secure HTTP endpoint and integration with AWS SDKs.\n\nThe following example shows granting Data API access to a Lamba function.\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_rds as rds } from 'aws-cdk-lib';\n\nconst vpc = new ec2.Vpc(this, 'MyVPC');\n\nconst cluster = new rds.ServerlessCluster(this, 'AnotherCluster', {\n engine: rds.DatabaseClusterEngine.AURORA_MYSQL,\n vpc,\n enableDataApi: true, // Optional - will be automatically set if you call grantDataApiAccess()\n});\n\nconst fn = new lambda.Function(this, 'MyFunction', {\n runtime: lambda.Runtime.NODEJS_12_X,\n handler: 'index.handler',\n code: lambda.Code.fromAsset(path.join(__dirname, 'lambda-handler')),\n environment: {\n CLUSTER_ARN: cluster.clusterArn,\n SECRET_ARN: cluster.secret.secretArn,\n },\n});\ncluster.grantDataApiAccess(fn)\n```\n\n**Note**: To invoke the Data API, the resource will need to read the secret associated with the cluster.\n\nTo learn more about using the Data API, see the [documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html).\n"
|
|
2699
|
-
},
|
|
2700
1942
|
"targets": {
|
|
2701
1943
|
"dotnet": {
|
|
2702
1944
|
"namespace": "Amazon.CDK.AWS.RDS"
|
|
@@ -2710,10 +1952,6 @@
|
|
|
2710
1952
|
}
|
|
2711
1953
|
},
|
|
2712
1954
|
"aws-cdk-lib.aws_redshift": {
|
|
2713
|
-
"locationInModule": {
|
|
2714
|
-
"filename": "lib/index.ts",
|
|
2715
|
-
"line": 148
|
|
2716
|
-
},
|
|
2717
1955
|
"targets": {
|
|
2718
1956
|
"dotnet": {
|
|
2719
1957
|
"namespace": "Amazon.CDK.AWS.Redshift"
|
|
@@ -2727,10 +1965,6 @@
|
|
|
2727
1965
|
}
|
|
2728
1966
|
},
|
|
2729
1967
|
"aws-cdk-lib.aws_resourcegroups": {
|
|
2730
|
-
"locationInModule": {
|
|
2731
|
-
"filename": "lib/index.ts",
|
|
2732
|
-
"line": 149
|
|
2733
|
-
},
|
|
2734
1968
|
"targets": {
|
|
2735
1969
|
"dotnet": {
|
|
2736
1970
|
"namespace": "Amazon.CDK.AWS.ResourceGroups"
|
|
@@ -2744,10 +1978,6 @@
|
|
|
2744
1978
|
}
|
|
2745
1979
|
},
|
|
2746
1980
|
"aws-cdk-lib.aws_robomaker": {
|
|
2747
|
-
"locationInModule": {
|
|
2748
|
-
"filename": "lib/index.ts",
|
|
2749
|
-
"line": 150
|
|
2750
|
-
},
|
|
2751
1981
|
"targets": {
|
|
2752
1982
|
"dotnet": {
|
|
2753
1983
|
"namespace": "Amazon.CDK.AWS.RoboMaker"
|
|
@@ -2761,13 +1991,6 @@
|
|
|
2761
1991
|
}
|
|
2762
1992
|
},
|
|
2763
1993
|
"aws-cdk-lib.aws_route53": {
|
|
2764
|
-
"locationInModule": {
|
|
2765
|
-
"filename": "lib/index.ts",
|
|
2766
|
-
"line": 151
|
|
2767
|
-
},
|
|
2768
|
-
"readme": {
|
|
2769
|
-
"markdown": "# Amazon Route53 Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nTo add a public hosted zone:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nnew route53.PublicHostedZone(this, 'HostedZone', {\n zoneName: 'fully.qualified.domain.com'\n});\n```\n\nTo add a private hosted zone, use `PrivateHostedZone`. Note that\n`enableDnsHostnames` and `enableDnsSupport` must have been enabled for the\nVPC you're configuring for private hosted zones.\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nconst vpc = new ec2.Vpc(this, 'VPC');\n\nconst zone = new route53.PrivateHostedZone(this, 'HostedZone', {\n zoneName: 'fully.qualified.domain.com',\n vpc // At least one VPC has to be added to a Private Hosted Zone.\n});\n```\n\nAdditional VPCs can be added with `zone.addVpc()`.\n\n## Adding Records\n\nTo add a TXT record to your zone:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nnew route53.TxtRecord(this, 'TXTRecord', {\n zone: myZone,\n recordName: '_foo', // If the name ends with a \".\", it will be used as-is;\n // if it ends with a \".\" followed by the zone name, a trailing \".\" will be added automatically;\n // otherwise, a \".\", the zone name, and a trailing \".\" will be added automatically.\n // Defaults to zone root if not specified.\n values: [ // Will be quoted for you, and \" will be escaped automatically.\n 'Bar!',\n 'Baz?'\n ],\n ttl: Duration.minutes(90), // Optional - default is 30 minutes\n});\n```\n\nTo add a NS record to your zone:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nnew route53.NsRecord(this, 'NSRecord', {\n zone: myZone,\n recordName: 'foo', \n values: [ \n 'ns-1.awsdns.co.uk.',\n 'ns-2.awsdns.com.'\n ],\n ttl: Duration.minutes(90), // Optional - default is 30 minutes\n});\n```\n\nTo add a DS record to your zone:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nnew route53.DsRecord(this, 'DSRecord', {\n zone: myZone,\n recordName: 'foo',\n values: [\n '12345 3 1 123456789abcdef67890123456789abcdef67890',\n ],\n ttl: Duration.minutes(90), // Optional - default is 30 minutes\n});\n```\n\nTo add an A record to your zone:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nnew route53.ARecord(this, 'ARecord', {\n zone: myZone,\n target: route53.RecordTarget.fromIpAddresses('1.2.3.4', '5.6.7.8')\n});\n```\n\nTo add an A record for an EC2 instance with an Elastic IP (EIP) to your zone:\n\n```ts\nimport { aws_ec2 as ec2 } from 'aws-cdk-lib';\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nconst instance = new ec2.Instance(this, 'Instance', {\n // ...\n});\n\nconst elasticIp = new ec2.CfnEIP(this, 'EIP', {\n domain: 'vpc',\n instanceId: instance.instanceId\n});\n\nnew route53.ARecord(this, 'ARecord', {\n zone: myZone,\n target: route53.RecordTarget.fromIpAddresses(elasticIp.ref)\n});\n```\n\nTo add an AAAA record pointing to a CloudFront distribution:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\nimport { aws_route53_targets as targets } from 'aws-cdk-lib';\n\nnew route53.AaaaRecord(this, 'Alias', {\n zone: myZone,\n target: route53.RecordTarget.fromAlias(new targets.CloudFrontTarget(distribution))\n});\n```\n\nConstructs are available for A, AAAA, CAA, CNAME, MX, NS, SRV and TXT records.\n\nUse the `CaaAmazonRecord` construct to easily restrict certificate authorities\nallowed to issue certificates for a domain to Amazon only.\n\nTo add a NS record to a HostedZone in different account you can do the following:\n\nIn the account containing the parent hosted zone:\n\n```ts\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nconst parentZone = new route53.PublicHostedZone(this, 'HostedZone', {\n zoneName: 'someexample.com',\n crossAccountZoneDelegationPrincipal: new iam.AccountPrincipal('12345678901'),\n crossAccountZoneDelegationRoleName: 'MyDelegationRole',\n});\n```\n\nIn the account containing the child zone to be delegated:\n\n```ts\nimport { aws_iam as iam } from 'aws-cdk-lib';\nimport { aws_route53 as route53 } from 'aws-cdk-lib';\n\nconst subZone = new route53.PublicHostedZone(this, 'SubZone', {\n zoneName: 'sub.someexample.com'\n});\n\n// import the delegation role by constructing the roleArn\nconst delegationRoleArn = Stack.of(this).formatArn({\n region: '', // IAM is global in each partition\n service: 'iam',\n account: 'parent-account-id',\n resource: 'role',\n resourceName: 'MyDelegationRole',\n});\nconst delegationRole = iam.Role.fromRoleArn(this, 'DelegationRole', delegationRoleArn);\n\n// create the record\nnew route53.CrossAccountZoneDelegationRecord(this, 'delegate', {\n delegatedZone: subZone,\n parentHostedZoneName: 'someexample.com', // or you can use parentHostedZoneId\n delegationRole,\n});\n```\n\n## Imports\n\nIf you don't know the ID of the Hosted Zone to import, you can use the \n`HostedZone.fromLookup`:\n\n```ts\nHostedZone.fromLookup(this, 'MyZone', {\n domainName: 'example.com'\n});\n```\n\n`HostedZone.fromLookup` requires an environment to be configured. Check\nout the [documentation](https://docs.aws.amazon.com/cdk/latest/guide/environments.html) for more documentation and examples. CDK \nautomatically looks into your `~/.aws/config` file for the `[default]` profile.\nIf you want to specify a different account run `cdk deploy --profile [profile]`.\n\n```ts\nnew MyDevStack(app, 'dev', { \n env: { \n account: process.env.CDK_DEFAULT_ACCOUNT, \n region: process.env.CDK_DEFAULT_REGION \n}});\n```\n\nIf you know the ID and Name of a Hosted Zone, you can import it directly:\n\n```ts\nconst zone = HostedZone.fromHostedZoneAttributes(this, 'MyZone', {\n zoneName: 'example.com',\n hostedZoneId: 'ZOJJZC49E0EPZ',\n});\n```\n\nAlternatively, use the `HostedZone.fromHostedZoneId` to import hosted zones if\nyou know the ID and the retrieval for the `zoneName` is undesirable.\n\n```ts\nconst zone = HostedZone.fromHostedZoneId(this, 'MyZone', 'ZOJJZC49E0EPZ');\n```\n\n## VPC Endpoint Service Private DNS\n\nWhen you create a VPC endpoint service, AWS generates endpoint-specific DNS hostnames that consumers use to communicate with the service.\nFor example, vpce-1234-abcdev-us-east-1.vpce-svc-123345.us-east-1.vpce.amazonaws.com.\nBy default, your consumers access the service with that DNS name.\nThis can cause problems with HTTPS traffic because the DNS will not match the backend certificate:\n\n```console\ncurl: (60) SSL: no alternative certificate subject name matches target host name 'vpce-abcdefghijklmnopq-rstuvwx.vpce-svc-abcdefghijklmnopq.us-east-1.vpce.amazonaws.com'\n```\n\nEffectively, the endpoint appears untrustworthy. To mitigate this, clients have to create an alias for this DNS name in Route53.\n\nPrivate DNS for an endpoint service lets you configure a private DNS name so consumers can\naccess the service using an existing DNS name without creating this Route53 DNS alias\nThis DNS name can also be guaranteed to match up with the backend certificate.\n\nBefore consumers can use the private DNS name, you must verify that you have control of the domain/subdomain.\n\nAssuming your account has ownership of the particular domain/subdomain,\nthis construct sets up the private DNS configuration on the endpoint service,\ncreates all the necessary Route53 entries, and verifies domain ownership.\n\n```ts\nimport { Stack } from 'aws-cdk-lib';\nimport { Vpc, VpcEndpointService } from 'aws-cdk-lib/aws-ec2';\nimport { NetworkLoadBalancer } from 'aws-cdk-lib/aws-elasticloadbalancingv2';\nimport { PublicHostedZone } from 'aws-cdk-lib/aws-route53';\n\nstack = new Stack();\nvpc = new Vpc(stack, 'VPC');\nnlb = new NetworkLoadBalancer(stack, 'NLB', {\n vpc,\n});\nvpces = new VpcEndpointService(stack, 'VPCES', {\n vpcEndpointServiceLoadBalancers: [nlb],\n});\n// You must use a public hosted zone so domain ownership can be verified\nzone = new PublicHostedZone(stack, 'PHZ', {\n zoneName: 'aws-cdk.dev',\n});\nnew VpcEndpointServiceDomainName(stack, 'EndpointDomain', {\n endpointService: vpces,\n domainName: 'my-stuff.aws-cdk.dev',\n publicHostedZone: zone,\n});\n```\n"
|
|
2770
|
-
},
|
|
2771
1994
|
"targets": {
|
|
2772
1995
|
"dotnet": {
|
|
2773
1996
|
"namespace": "Amazon.CDK.AWS.Route53"
|
|
@@ -2781,13 +2004,6 @@
|
|
|
2781
2004
|
}
|
|
2782
2005
|
},
|
|
2783
2006
|
"aws-cdk-lib.aws_route53_patterns": {
|
|
2784
|
-
"locationInModule": {
|
|
2785
|
-
"filename": "lib/index.ts",
|
|
2786
|
-
"line": 152
|
|
2787
|
-
},
|
|
2788
|
-
"readme": {
|
|
2789
|
-
"markdown": "# CDK Construct library for higher-level Route 53 Constructs\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library provides higher-level Amazon Route 53 constructs which follow common\narchitectural patterns.\n\n## HTTPS Redirect\n\nIf you want to speed up delivery of your web content, you can use Amazon CloudFront,\nthe AWS content delivery network (CDN). CloudFront can deliver your entire website\n—including dynamic, static, streaming, and interactive content—by using a global\nnetwork of edge locations. Requests for your content are automatically routed to the\nedge location that gives your users the lowest latency.\n\nThis construct allows creating a redirect from domainA to domainB using Amazon\nCloudFront and Amazon S3. You can specify multiple domains to be redirected.\n[Learn more](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) about routing traffic to a CloudFront web distribution.\n\nThe `HttpsRedirect` constructs creates:\n\n* Amazon CloudFront distribution - makes website available from data centres\n around the world\n* Amazon S3 bucket - empty bucket used for website hosting redirect (`websiteRedirect`) capabilities.\n* Amazon Route 53 A/AAAA Alias records - routes traffic to the CloudFront distribution\n* AWS Certificate Manager certificate - SSL/TLS certificate used by\n CloudFront for your domain\n\n⚠️ The stack/construct can be used in any region for configuring an HTTPS redirect.\nThe certificate created in Amazon Certificate Manager (ACM) will be in US East (N. Virginia)\nregion. If you use an existing certificate, the AWS region of the certificate\nmust be in US East (N. Virginia).\n\nThe following example creates an HTTPS redirect from `foo.example.com` to `bar.example.com`\nAs an existing certificate is not provided, one will be created in `us-east-1` by the CDK.\n\n ```ts\n new HttpsRedirect(stack, 'Redirect', {\n recordNames: ['foo.example.com'],\n targetDomain: 'bar.example.com',\n zone: HostedZone.fromHostedZoneAttributes(stack, 'HostedZone', {\n hostedZoneId: 'ID',\n zoneName: 'example.com',\n })\n });\n ```\n"
|
|
2790
|
-
},
|
|
2791
2007
|
"targets": {
|
|
2792
2008
|
"dotnet": {
|
|
2793
2009
|
"namespace": "Amazon.CDK.AWS.Route53.Patterns"
|
|
@@ -2801,13 +2017,6 @@
|
|
|
2801
2017
|
}
|
|
2802
2018
|
},
|
|
2803
2019
|
"aws-cdk-lib.aws_route53_targets": {
|
|
2804
|
-
"locationInModule": {
|
|
2805
|
-
"filename": "lib/index.ts",
|
|
2806
|
-
"line": 153
|
|
2807
|
-
},
|
|
2808
|
-
"readme": {
|
|
2809
|
-
"markdown": "# Route53 Alias Record Targets for the CDK Route53 Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library contains Route53 Alias Record targets for:\n\n* API Gateway custom domains\n\n ```ts\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new alias.ApiGateway(restApi)),\n // or - route53.RecordTarget.fromAlias(new alias.ApiGatewayDomain(domainName)),\n });\n ```\n\n* API Gateway V2 custom domains\n\n ```ts\n\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new alias.ApiGatewayv2DomainProperties(domainName.regionalDomainName, domainName.regionalHostedZoneId)),\n });\n ```\n\n* CloudFront distributions\n\n ```ts\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new alias.CloudFrontTarget(distribution)),\n });\n ```\n\n* ELBv2 load balancers\n\n ```ts\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new alias.LoadBalancerTarget(elbv2)),\n // or - route53.RecordTarget.fromAlias(new alias.ApiGatewayDomain(domainName)),\n });\n ```\n\n* Classic load balancers\n\n ```ts\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new alias.ClassicLoadBalancerTarget(elb)),\n // or - route53.RecordTarget.fromAlias(new alias.ApiGatewayDomain(domainName)),\n });\n ```\n\n**Important:** Based on [AWS documentation](https://aws.amazon.com/de/premiumsupport/knowledge-center/alias-resource-record-set-route53-cli/), all alias record in Route 53 that points to a Elastic Load Balancer will always include *dualstack* for the DNSName to resolve IPv4/IPv6 addresses (without *dualstack* IPv6 will not resolve).\n\nFor example, if the Amazon-provided DNS for the load balancer is `ALB-xxxxxxx.us-west-2.elb.amazonaws.com`, CDK will create alias target in Route 53 will be `dualstack.ALB-xxxxxxx.us-west-2.elb.amazonaws.com`.\n\n* GlobalAccelerator\n\n ```ts\n new route53.ARecord(stack, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new targets.GlobalAcceleratorTarget(accelerator)),\n // or - route53.RecordTarget.fromAlias(new targets.GlobalAcceleratorDomainTarget('xyz.awsglobalaccelerator.com')),\n });\n ```\n\n**Important:** If you use GlobalAcceleratorDomainTarget, passing a string rather than an instance of IAccelerator, ensure that the string is a valid domain name of an existing Global Accelerator instance.\nSee [the documentation on DNS addressing](https://docs.aws.amazon.com/global-accelerator/latest/dg/dns-addressing-custom-domains.dns-addressing.html) with Global Accelerator for more info.\n\n* InterfaceVpcEndpoints\n\n**Important:** Based on the CFN docs for VPCEndpoints - [see here](attrDnsEntries) - the attributes returned for DnsEntries in CloudFormation is a combination of the hosted zone ID and the DNS name. The entries are ordered as follows: regional public DNS, zonal public DNS, private DNS, and wildcard DNS. This order is not enforced for AWS Marketplace services, and therefore this CDK construct is ONLY guaranteed to work with non-marketplace services.\n\n ```ts\n new route53.ARecord(stack, \"AliasRecord\", {\n zone,\n target: route53.RecordTarget.fromAlias(new alias.InterfaceVpcEndpointTarget(interfaceVpcEndpoint))\n });\n ```\n\n* S3 Bucket Website:\n\n**Important:** The Bucket name must strictly match the full DNS name.\nSee [the Developer Guide](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/getting-started.html) for more info.\n\n ```ts\n const [recordName, domainName] = ['www', 'example.com'];\n\n const bucketWebsite = new Bucket(this, 'BucketWebsite', {\n bucketName: [recordName, domainName].join('.'), // www.example.com\n publicReadAccess: true,\n websiteIndexDocument: 'index.html',\n });\n\n const zone = HostedZone.fromLookup(this, 'Zone', {domainName}); // example.com\n\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n recordName, // www\n target: route53.RecordTarget.fromAlias(new alias.BucketWebsiteTarget(bucket)),\n });\n ```\n\n* User pool domain\n\n ```ts\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new alias.UserPoolDomainTarget(domain)),\n });\n ```\n\n* Route 53 record\n\n ```ts\n new route53.ARecord(this, 'AliasRecord', {\n zone,\n target: route53.RecordTarget.fromAlias(new targets.Route53RecordTarget(record)),\n });\n ```\n\nSee the documentation of `@aws-cdk/aws-route53` for more information.\n"
|
|
2810
|
-
},
|
|
2811
2020
|
"targets": {
|
|
2812
2021
|
"dotnet": {
|
|
2813
2022
|
"namespace": "Amazon.CDK.AWS.Route53.Targets"
|
|
@@ -2821,10 +2030,6 @@
|
|
|
2821
2030
|
}
|
|
2822
2031
|
},
|
|
2823
2032
|
"aws-cdk-lib.aws_route53recoverycontrol": {
|
|
2824
|
-
"locationInModule": {
|
|
2825
|
-
"filename": "lib/index.ts",
|
|
2826
|
-
"line": 154
|
|
2827
|
-
},
|
|
2828
2033
|
"targets": {
|
|
2829
2034
|
"dotnet": {
|
|
2830
2035
|
"namespace": "Amazon.CDK.AWS.Route53RecoveryControl"
|
|
@@ -2838,10 +2043,6 @@
|
|
|
2838
2043
|
}
|
|
2839
2044
|
},
|
|
2840
2045
|
"aws-cdk-lib.aws_route53recoveryreadiness": {
|
|
2841
|
-
"locationInModule": {
|
|
2842
|
-
"filename": "lib/index.ts",
|
|
2843
|
-
"line": 155
|
|
2844
|
-
},
|
|
2845
2046
|
"targets": {
|
|
2846
2047
|
"dotnet": {
|
|
2847
2048
|
"namespace": "Amazon.CDK.AWS.Route53RecoveryReadiness"
|
|
@@ -2855,10 +2056,6 @@
|
|
|
2855
2056
|
}
|
|
2856
2057
|
},
|
|
2857
2058
|
"aws-cdk-lib.aws_route53resolver": {
|
|
2858
|
-
"locationInModule": {
|
|
2859
|
-
"filename": "lib/index.ts",
|
|
2860
|
-
"line": 156
|
|
2861
|
-
},
|
|
2862
2059
|
"targets": {
|
|
2863
2060
|
"dotnet": {
|
|
2864
2061
|
"namespace": "Amazon.CDK.AWS.Route53Resolver"
|
|
@@ -2872,13 +2069,6 @@
|
|
|
2872
2069
|
}
|
|
2873
2070
|
},
|
|
2874
2071
|
"aws-cdk-lib.aws_s3": {
|
|
2875
|
-
"locationInModule": {
|
|
2876
|
-
"filename": "lib/index.ts",
|
|
2877
|
-
"line": 157
|
|
2878
|
-
},
|
|
2879
|
-
"readme": {
|
|
2880
|
-
"markdown": "# Amazon S3 Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nDefine an unencrypted S3 bucket.\n\n```ts\nnew Bucket(this, 'MyFirstBucket');\n```\n\n`Bucket` constructs expose the following deploy-time attributes:\n\n * `bucketArn` - the ARN of the bucket (i.e. `arn:aws:s3:::bucket_name`)\n * `bucketName` - the name of the bucket (i.e. `bucket_name`)\n * `bucketWebsiteUrl` - the Website URL of the bucket (i.e.\n `http://bucket_name.s3-website-us-west-1.amazonaws.com`)\n * `bucketDomainName` - the URL of the bucket (i.e. `bucket_name.s3.amazonaws.com`)\n * `bucketDualStackDomainName` - the dual-stack URL of the bucket (i.e.\n `bucket_name.s3.dualstack.eu-west-1.amazonaws.com`)\n * `bucketRegionalDomainName` - the regional URL of the bucket (i.e.\n `bucket_name.s3.eu-west-1.amazonaws.com`)\n * `arnForObjects(pattern)` - the ARN of an object or objects within the bucket (i.e.\n `arn:aws:s3:::bucket_name/exampleobject.png` or\n `arn:aws:s3:::bucket_name/Development/*`)\n * `urlForObject(key)` - the HTTP URL of an object within the bucket (i.e.\n `https://s3.cn-north-1.amazonaws.com.cn/china-bucket/mykey`)\n * `virtualHostedUrlForObject(key)` - the virtual-hosted style HTTP URL of an object\n within the bucket (i.e. `https://china-bucket-s3.cn-north-1.amazonaws.com.cn/mykey`)\n * `s3UrlForObject(key)` - the S3 URL of an object within the bucket (i.e.\n `s3://bucket/mykey`)\n\n## Encryption\n\nDefine a KMS-encrypted bucket:\n\n```ts\nconst bucket = new Bucket(this, 'MyEncryptedBucket', {\n encryption: BucketEncryption.KMS\n});\n\n// you can access the encryption key:\nassert(bucket.encryptionKey instanceof kms.Key);\n```\n\nYou can also supply your own key:\n\n```ts\nconst myKmsKey = new kms.Key(this, 'MyKey');\n\nconst bucket = new Bucket(this, 'MyEncryptedBucket', {\n encryption: BucketEncryption.KMS,\n encryptionKey: myKmsKey\n});\n\nassert(bucket.encryptionKey === myKmsKey);\n```\n\nEnable KMS-SSE encryption via [S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html):\n\n```ts\nconst bucket = new Bucket(this, 'MyEncryptedBucket', {\n encryption: BucketEncryption.KMS,\n bucketKeyEnabled: true\n});\n\nassert(bucket.bucketKeyEnabled === true);\n```\n\nUse `BucketEncryption.ManagedKms` to use the S3 master KMS key:\n\n```ts\nconst bucket = new Bucket(this, 'Buck', {\n encryption: BucketEncryption.KMS_MANAGED\n});\n\nassert(bucket.encryptionKey == null);\n```\n\n## Permissions\n\nA bucket policy will be automatically created for the bucket upon the first call to\n`addToResourcePolicy(statement)`:\n\n```ts\nconst bucket = new Bucket(this, 'MyBucket');\nconst result = bucket.addToResourcePolicy(new iam.PolicyStatement({\n actions: ['s3:GetObject'],\n resources: [bucket.arnForObjects('file.txt')],\n principals: [new iam.AccountRootPrincipal()],\n}));\n```\n\nIf you try to add a policy statement to an existing bucket, this method will \nnot do anything:\n\n```ts\nconst bucket = Bucket.fromBucketName(this, 'existingBucket', 'bucket-name');\n\n// Nothing will change here\nconst result = bucket.addToResourcePolicy(new iam.PolicyStatement({\n ...\n}));\n```\n\nThat's because it's not possible to tell whether the bucket \nalready has a policy attached, let alone to re-use that policy to add more \nstatements to it. We recommend that you always check the result of the call:\n\n```ts\nconst result = bucket.addToResourcePolicy(...)\nif (!result.statementAdded) {\n // Uh-oh! Someone probably made a mistake here.\n}\n```\n\nThe bucket policy can be directly accessed after creation to add statements or\nadjust the removal policy.\n\n```ts\nbucket.policy?.applyRemovalPolicy(RemovalPolicy.RETAIN);\n```\n\nMost of the time, you won't have to manipulate the bucket policy directly.\nInstead, buckets have \"grant\" methods called to give prepackaged sets of permissions\nto other resources. For example:\n\n```ts\nconst lambda = new lambda.Function(this, 'Lambda', { /* ... */ });\n\nconst bucket = new Bucket(this, 'MyBucket');\nbucket.grantReadWrite(lambda);\n```\n\nWill give the Lambda's execution role permissions to read and write\nfrom the bucket.\n\n## AWS Foundational Security Best Practices\n\n### Enforcing SSL\n\nTo require all requests use Secure Socket Layer (SSL):\n\n```ts\nconst bucket = new Bucket(this, 'Bucket', {\n enforceSSL: true\n});\n```\n\n## Sharing buckets between stacks\n\nTo use a bucket in a different stack in the same CDK application, pass the object to the other stack:\n\n[sharing bucket between stacks](test/integ.bucket-sharing.lit.ts)\n\n## Importing existing buckets\n\nTo import an existing bucket into your CDK application, use the `Bucket.fromBucketAttributes`\nfactory method. This method accepts `BucketAttributes` which describes the properties of an already\nexisting bucket:\n\n```ts\nconst bucket = Bucket.fromBucketAttributes(this, 'ImportedBucket', {\n bucketArn: 'arn:aws:s3:::my-bucket'\n});\n\n// now you can just call methods on the bucket\nbucket.addEventNotification(EventType.OBJECT_CREATED, ...);\n```\n\nAlternatively, short-hand factories are available as `Bucket.fromBucketName` and\n`Bucket.fromBucketArn`, which will derive all bucket attributes from the bucket\nname or ARN respectively:\n\n```ts\nconst byName = Bucket.fromBucketName(this, 'BucketByName', 'my-bucket');\nconst byArn = Bucket.fromBucketArn(this, 'BucketByArn', 'arn:aws:s3:::my-bucket');\n```\n\nThe bucket's region defaults to the current stack's region, but can also be explicitly set in cases where one of the bucket's\nregional properties needs to contain the correct values.\n\n```ts\nconst myCrossRegionBucket = Bucket.fromBucketAttributes(this, 'CrossRegionImport', {\n bucketArn: 'arn:aws:s3:::my-bucket',\n region: 'us-east-1',\n});\n// myCrossRegionBucket.bucketRegionalDomainName === 'my-bucket.s3.us-east-1.amazonaws.com'\n```\n\n## Bucket Notifications\n\nThe Amazon S3 notification feature enables you to receive notifications when\ncertain events happen in your bucket as described under [S3 Bucket\nNotifications] of the S3 Developer Guide.\n\nTo subscribe for bucket notifications, use the `bucket.addEventNotification` method. The\n`bucket.addObjectCreatedNotification` and `bucket.addObjectRemovedNotification` can also be used for\nthese common use cases.\n\nThe following example will subscribe an SNS topic to be notified of all `s3:ObjectCreated:*` events:\n\n```ts\nimport { aws_s3_notifications as s3n } from 'aws-cdk-lib';\n\nconst topic = new sns.Topic(this, 'MyTopic');\nbucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.SnsDestination(topic));\n```\n\nThis call will also ensure that the topic policy can accept notifications for\nthis specific bucket.\n\nSupported S3 notification targets are exposed by the `@aws-cdk/aws-s3-notifications` package.\n\nIt is also possible to specify S3 object key filters when subscribing. The\nfollowing example will notify `myQueue` when objects prefixed with `foo/` and\nhave the `.jpg` suffix are removed from the bucket.\n\n```ts\nbucket.addEventNotification(s3.EventType.OBJECT_REMOVED,\n new s3n.SqsDestination(myQueue),\n { prefix: 'foo/', suffix: '.jpg' });\n```\n\nAdding notifications on existing buckets:\n\n```ts\nconst bucket = Bucket.fromBucketAttributes(this, 'ImportedBucket', {\n bucketArn: 'arn:aws:s3:::my-bucket'\n});\nbucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.SnsDestination(topic));\n```\n\n[S3 Bucket Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html\n\n\n## Block Public Access\n\nUse `blockPublicAccess` to specify [block public access settings] on the bucket.\n\nEnable all block public access settings:\n\n```ts\nconst bucket = new Bucket(this, 'MyBlockedBucket', {\n blockPublicAccess: BlockPublicAccess.BLOCK_ALL\n});\n```\n\nBlock and ignore public ACLs:\n\n```ts\nconst bucket = new Bucket(this, 'MyBlockedBucket', {\n blockPublicAccess: BlockPublicAccess.BLOCK_ACLS\n});\n```\n\nAlternatively, specify the settings manually:\n\n```ts\nconst bucket = new Bucket(this, 'MyBlockedBucket', {\n blockPublicAccess: new BlockPublicAccess({ blockPublicPolicy: true })\n});\n```\n\nWhen `blockPublicPolicy` is set to `true`, `grantPublicRead()` throws an error.\n\n[block public access settings]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html\n\n## Logging configuration\n\nUse `serverAccessLogsBucket` to describe where server access logs are to be stored.\n\n```ts\nconst accessLogsBucket = new Bucket(this, 'AccessLogsBucket');\n\nconst bucket = new Bucket(this, 'MyBucket', {\n serverAccessLogsBucket: accessLogsBucket,\n});\n```\n\nIt's also possible to specify a prefix for Amazon S3 to assign to all log object keys.\n\n```ts\nconst bucket = new Bucket(this, 'MyBucket', {\n serverAccessLogsBucket: accessLogsBucket,\n serverAccessLogsPrefix: 'logs'\n});\n```\n\n[S3 Server access logging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html\n\n## S3 Inventory\n\nAn [inventory](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) contains a list of the objects in the source bucket and metadata for each object. The inventory lists are stored in the destination bucket as a CSV file compressed with GZIP, as an Apache optimized row columnar (ORC) file compressed with ZLIB, or as an Apache Parquet (Parquet) file compressed with Snappy.\n\nYou can configure multiple inventory lists for a bucket. You can configure what object metadata to include in the inventory, whether to list all object versions or only current versions, where to store the inventory list file output, and whether to generate the inventory on a daily or weekly basis.\n\n```ts\nconst inventoryBucket = new s3.Bucket(this, 'InventoryBucket');\n\nconst dataBucket = new s3.Bucket(this, 'DataBucket', {\n inventories: [\n {\n frequency: s3.InventoryFrequency.DAILY,\n includeObjectVersions: s3.InventoryObjectVersion.CURRENT,\n destination: {\n bucket: inventoryBucket,\n },\n },\n {\n frequency: s3.InventoryFrequency.WEEKLY,\n includeObjectVersions: s3.InventoryObjectVersion.ALL,\n destination: {\n bucket: inventoryBucket,\n prefix: 'with-all-versions',\n },\n }\n ]\n});\n```\n\nIf the destination bucket is created as part of the same CDK application, the necessary permissions will be automatically added to the bucket policy.\nHowever, if you use an imported bucket (i.e `Bucket.fromXXX()`), you'll have to make sure it contains the following policy document:\n\n```json\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"InventoryAndAnalyticsExamplePolicy\",\n \"Effect\": \"Allow\",\n \"Principal\": { \"Service\": \"s3.amazonaws.com\" },\n \"Action\": \"s3:PutObject\",\n \"Resource\": [\"arn:aws:s3:::destinationBucket/*\"]\n }\n ]\n}\n```\n\n## Website redirection\n\nYou can use the two following properties to specify the bucket [redirection policy]. Please note that these methods cannot both be applied to the same bucket.\n\n[redirection policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects\n\n### Static redirection\n\nYou can statically redirect a to a given Bucket URL or any other host name with `websiteRedirect`:\n\n```ts\nconst bucket = new Bucket(this, 'MyRedirectedBucket', {\n websiteRedirect: { hostName: 'www.example.com' }\n});\n```\n\n### Routing rules\n\nAlternatively, you can also define multiple `websiteRoutingRules`, to define complex, conditional redirections:\n\n```ts\nconst bucket = new Bucket(this, 'MyRedirectedBucket', {\n websiteRoutingRules: [{\n hostName: 'www.example.com',\n httpRedirectCode: '302',\n protocol: RedirectProtocol.HTTPS,\n replaceKey: ReplaceKey.prefixWith('test/'),\n condition: {\n httpErrorCodeReturnedEquals: '200',\n keyPrefixEquals: 'prefix',\n }\n }]\n});\n```\n\n## Filling the bucket as part of deployment\n\nTo put files into a bucket as part of a deployment (for example, to host a\nwebsite), see the `@aws-cdk/aws-s3-deployment` package, which provides a\nresource that can do just that.\n\n## The URL for objects\n\nS3 provides two types of URLs for accessing objects via HTTP(S). Path-Style and\n[Virtual Hosted-Style](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)\nURL. Path-Style is a classic way and will be\n[deprecated](https://aws.amazon.com/jp/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story).\nWe recommend to use Virtual Hosted-Style URL for newly made bucket.\n\nYou can generate both of them.\n\n```ts\nbucket.urlForObject('objectname'); // Path-Style URL\nbucket.virtualHostedUrlForObject('objectname'); // Virtual Hosted-Style URL\nbucket.virtualHostedUrlForObject('objectname', { regional: false }); // Virtual Hosted-Style URL but non-regional\n```\n\n### Object Ownership\n\nYou can use the two following properties to specify the bucket [object Ownership].\n\n[object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html\n\n#### Object writer\n\nThe Uploading account will own the object.\n\n```ts\nnew s3.Bucket(this, 'MyBucket', {\n objectOwnership: s3.ObjectOwnership.OBJECT_WRITER,\n});\n```\n\n#### Bucket owner preferred\n\nThe bucket owner will own the object if the object is uploaded with the bucket-owner-full-control canned ACL. Without this setting and canned ACL, the object is uploaded and remains owned by the uploading account.\n\n```ts\nnew s3.Bucket(this, 'MyBucket', {\n objectOwnership: s3.ObjectOwnership.BUCKET_OWNER_PREFERRED,\n});\n```\n\n### Bucket deletion\n\nWhen a bucket is removed from a stack (or the stack is deleted), the S3\nbucket will be removed according to its removal policy (which by default will\nsimply orphan the bucket and leave it in your AWS account). If the removal\npolicy is set to `RemovalPolicy.DESTROY`, the bucket will be deleted as long\nas it does not contain any objects.\n\nTo override this and force all objects to get deleted during bucket deletion,\nenable the`autoDeleteObjects` option.\n\n```ts\nconst bucket = new Bucket(this, 'MyTempFileBucket', {\n removalPolicy: RemovalPolicy.DESTROY,\n autoDeleteObjects: true,\n});\n```\n"
|
|
2881
|
-
},
|
|
2882
2072
|
"targets": {
|
|
2883
2073
|
"dotnet": {
|
|
2884
2074
|
"namespace": "Amazon.CDK.AWS.S3"
|
|
@@ -2892,13 +2082,6 @@
|
|
|
2892
2082
|
}
|
|
2893
2083
|
},
|
|
2894
2084
|
"aws-cdk-lib.aws_s3_assets": {
|
|
2895
|
-
"locationInModule": {
|
|
2896
|
-
"filename": "lib/index.ts",
|
|
2897
|
-
"line": 158
|
|
2898
|
-
},
|
|
2899
|
-
"readme": {
|
|
2900
|
-
"markdown": "# AWS CDK Assets\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAssets are local files or directories which are needed by a CDK app. A common\nexample is a directory which contains the handler code for a Lambda function,\nbut assets can represent any artifact that is needed for the app's operation.\n\nWhen deploying a CDK app that includes constructs with assets, the CDK toolkit\nwill first upload all the assets to S3, and only then deploy the stacks. The S3\nlocations of the uploaded assets will be passed in as CloudFormation Parameters\nto the relevant stacks.\n\nThe following JavaScript example defines an directory asset which is archived as\na .zip file and uploaded to S3 during deployment.\n\n[Example of a ZipDirectoryAsset](./test/integ.assets.directory.lit.ts)\n\nThe following JavaScript example defines a file asset, which is uploaded as-is\nto an S3 bucket during deployment.\n\n[Example of a FileAsset](./test/integ.assets.file.lit.ts)\n\n## Attributes\n\n`Asset` constructs expose the following deploy-time attributes:\n\n * `s3BucketName` - the name of the assets S3 bucket.\n * `s3ObjectKey` - the S3 object key of the asset file (whether it's a file or a zip archive)\n * `s3ObjectUrl` - the S3 object URL of the asset (i.e. s3://mybucket/mykey.zip)\n * `httpUrl` - the S3 HTTP URL of the asset (i.e. https://s3.us-east-1.amazonaws.com/mybucket/mykey.zip)\n\nIn the following example, the various asset attributes are exported as stack outputs:\n\n[Example of referencing an asset](./test/integ.assets.refs.lit.ts)\n\n## Permissions\n\nIAM roles, users or groups which need to be able to read assets in runtime will should be\ngranted IAM permissions. To do that use the `asset.grantRead(principal)` method:\n\nThe following examples grants an IAM group read permissions on an asset:\n\n[Example of granting read access to an asset](./test/integ.assets.permissions.lit.ts)\n\n## How does it work\n\nWhen an asset is defined in a construct, a construct metadata entry\n`aws:cdk:asset` is emitted with instructions on where to find the asset and what\ntype of packaging to perform (`zip` or `file`). Furthermore, the synthesized\nCloudFormation template will also include two CloudFormation parameters: one for\nthe asset's bucket and one for the asset S3 key. Those parameters are used to\nreference the deploy-time values of the asset (using `{ Ref: \"Param\" }`).\n\nThen, when the stack is deployed, the toolkit will package the asset (i.e. zip\nthe directory), calculate an MD5 hash of the contents and will render an S3 key\nfor this asset within the toolkit's asset store. If the file doesn't exist in\nthe asset store, it is uploaded during deployment.\n\n> The toolkit's asset store is an S3 bucket created by the toolkit for each\n environment the toolkit operates in (environment = account + region).\n\nNow, when the toolkit deploys the stack, it will set the relevant CloudFormation\nParameters to point to the actual bucket and key for each asset.\n\n## Asset Bundling\n\nWhen defining an asset, you can use the `bundling` option to specify a command\nto run inside a docker container. The command can read the contents of the asset\nsource from `/asset-input` and is expected to write files under `/asset-output`\n(directories mapped inside the container). The files under `/asset-output` will\nbe zipped and uploaded to S3 as the asset.\n\nThe following example uses custom asset bundling to convert a markdown file to html:\n\n[Example of using asset bundling](./test/integ.assets.bundling.lit.ts).\n\nThe bundling docker image (`image`) can either come from a registry (`DockerImage.fromRegistry`)\nor it can be built from a `Dockerfile` located inside your project (`DockerImage.fromBuild`).\n\nYou can set the `CDK_DOCKER` environment variable in order to provide a custom\ndocker program to execute. This may sometime be needed when building in\nenvironments where the standard docker cannot be executed (see\nhttps://github.com/aws/aws-cdk/issues/8460 for details).\n\nUse `local` to specify a local bundling provider. The provider implements a\nmethod `tryBundle()` which should return `true` if local bundling was performed.\nIf `false` is returned, docker bundling will be done:\n\n```ts\nnew assets.Asset(this, 'BundledAsset', {\n path: '/path/to/asset',\n bundling: {\n local: {\n tryBundle(outputDir: string, options: BundlingOptions) {\n if (canRunLocally) {\n // perform local bundling here\n return true;\n }\n return false;\n },\n },\n // Docker bundling fallback\n image: DockerImage.fromRegistry('alpine'),\n entrypoint: ['/bin/sh', '-c'],\n command: ['bundle'],\n },\n});\n```\n\nAlthough optional, it's recommended to provide a local bundling method which can\ngreatly improve performance.\n\nIf the bundling output contains a single archive file (zip or jar) it will be\nuploaded to S3 as-is and will not be zipped. Otherwise the contents of the\noutput directory will be zipped and the zip file will be uploaded to S3. This\nis the default behavior for `bundling.outputType` (`BundlingOutput.AUTO_DISCOVER`).\n\nUse `BundlingOutput.NOT_ARCHIVED` if the bundling output must always be zipped:\n\n```ts\nconst asset = new assets.Asset(this, 'BundledAsset', {\n path: '/path/to/asset',\n bundling: {\n image: DockerImage.fromRegistry('alpine'),\n command: ['command-that-produces-an-archive.sh'],\n outputType: BundlingOutput.NOT_ARCHIVED, // Bundling output will be zipped even though it produces a single archive file.\n },\n});\n```\n\nUse `BundlingOutput.ARCHIVED` if the bundling output contains a single archive file and\nyou don't want it to be zipped.\n\n## CloudFormation Resource Metadata\n\n> NOTE: This section is relevant for authors of AWS Resource Constructs.\n\nIn certain situations, it is desirable for tools to be able to know that a certain CloudFormation\nresource is using a local asset. For example, SAM CLI can be used to invoke AWS Lambda functions\nlocally for debugging purposes.\n\nTo enable such use cases, external tools will consult a set of metadata entries on AWS CloudFormation\nresources:\n\n* `aws:asset:path` points to the local path of the asset.\n* `aws:asset:property` is the name of the resource property where the asset is used\n\nUsing these two metadata entries, tools will be able to identify that assets are used\nby a certain resource, and enable advanced local experiences.\n\nTo add these metadata entries to a resource, use the\n`asset.addResourceMetadata(resource, property)` method.\n\nSee https://github.com/aws/aws-cdk/issues/1432 for more details\n"
|
|
2901
|
-
},
|
|
2902
2085
|
"targets": {
|
|
2903
2086
|
"dotnet": {
|
|
2904
2087
|
"namespace": "Amazon.CDK.AWS.S3.Assets"
|
|
@@ -2912,13 +2095,6 @@
|
|
|
2912
2095
|
}
|
|
2913
2096
|
},
|
|
2914
2097
|
"aws-cdk-lib.aws_s3_deployment": {
|
|
2915
|
-
"locationInModule": {
|
|
2916
|
-
"filename": "lib/index.ts",
|
|
2917
|
-
"line": 159
|
|
2918
|
-
},
|
|
2919
|
-
"readme": {
|
|
2920
|
-
"markdown": "# AWS S3 Deployment Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n> __Status: Experimental__\n\nThis library allows populating an S3 bucket with the contents of .zip files\nfrom other S3 buckets or from local disk.\n\nThe following example defines a publicly accessible S3 bucket with web hosting\nenabled and populates it from a local directory on disk.\n\n```ts\nconst websiteBucket = new s3.Bucket(this, 'WebsiteBucket', {\n websiteIndexDocument: 'index.html',\n publicReadAccess: true\n});\n\nnew s3deploy.BucketDeployment(this, 'DeployWebsite', {\n sources: [s3deploy.Source.asset('./website-dist')],\n destinationBucket: websiteBucket,\n destinationKeyPrefix: 'web/static' // optional prefix in destination bucket\n});\n```\n\nThis is what happens under the hood:\n\n1. When this stack is deployed (either via `cdk deploy` or via CI/CD), the\n contents of the local `website-dist` directory will be archived and uploaded\n to an intermediary assets bucket. If there is more than one source, they will\n be individually uploaded.\n2. The `BucketDeployment` construct synthesizes a custom CloudFormation resource\n of type `Custom::CDKBucketDeployment` into the template. The source bucket/key\n is set to point to the assets bucket.\n3. The custom resource downloads the .zip archive, extracts it and issues `aws\n s3 sync --delete` against the destination bucket (in this case\n `websiteBucket`). If there is more than one source, the sources will be\n downloaded and merged pre-deployment at this step.\n\n\n## Supported sources\n\nThe following source types are supported for bucket deployments:\n\n - Local .zip file: `s3deploy.Source.asset('/path/to/local/file.zip')`\n - Local directory: `s3deploy.Source.asset('/path/to/local/directory')`\n - Another bucket: `s3deploy.Source.bucket(bucket, zipObjectKey)`\n\nTo create a source from a single file, you can pass `AssetOptions` to exclude\nall but a single file:\n\n - Single file: `s3deploy.Source.asset('/path/to/local/directory', { exclude: ['**', '!onlyThisFile.txt'] })`\n\n**IMPORTANT** The `aws-s3-deployment` module is only intended to be used with\nzip files from trusted sources. Directories bundled by the CDK CLI (by using\n`Source.asset()` on a directory) are safe. If you are using `Source.asset()` or\n`Source.bucket()` to reference an existing zip file, make sure you trust the\nfile you are referencing. Zips from untrusted sources might be able to execute\narbitrary code in the Lambda Function used by this module, and use its permissions\nto read or write unexpected files in the S3 bucket.\n\n## Retain on Delete\n\nBy default, the contents of the destination bucket will **not** be deleted when the\n`BucketDeployment` resource is removed from the stack or when the destination is\nchanged. You can use the option `retainOnDelete: false` to disable this behavior,\nin which case the contents will be deleted.\n\nConfiguring this has a few implications you should be aware of:\n\n- **Logical ID Changes**\n\n Changing the logical ID of the `BucketDeployment` construct, without changing the destination\n (for example due to refactoring, or intentional ID change) **will result in the deletion of the objects**.\n This is because CloudFormation will first create the new resource, which will have no affect,\n followed by a deletion of the old resource, which will cause a deletion of the objects,\n since the destination hasn't changed, and `retainOnDelete` is `false`.\n\n- **Destination Changes**\n\n When the destination bucket or prefix is changed, all files in the previous destination will **first** be\n deleted and then uploaded to the new destination location. This could have availability implications\n on your users.\n\n### General Recommendations\n\n#### Shared Bucket\n\nIf the destination bucket **is not** dedicated to the specific `BucketDeployment` construct (i.e shared by other entities),\nwe recommend to always configure the `destinationKeyPrefix` property. This will prevent the deployment from\naccidentally deleting data that wasn't uploaded by it.\n\n#### Dedicated Bucket\n\nIf the destination bucket **is** dedicated, it might be reasonable to skip the prefix configuration,\nin which case, we recommend to remove `retainOnDelete: false`, and instead, configure the\n[`autoDeleteObjects`](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-s3-readme.html#bucket-deletion)\nproperty on the destination bucket. This will avoid the logical ID problem mentioned above.\n\n## Prune\n\nBy default, files in the destination bucket that don't exist in the source will be deleted\nwhen the `BucketDeployment` resource is created or updated. You can use the option `prune: false` to disable\nthis behavior, in which case the files will not be deleted.\n\n```ts\nnew s3deploy.BucketDeployment(this, 'DeployMeWithoutDeletingFilesOnDestination', {\n sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))],\n destinationBucket,\n prune: false,\n});\n```\n\nThis option also enables you to specify multiple bucket deployments for the same destination bucket & prefix,\neach with its own characteristics. For example, you can set different cache-control headers\nbased on file extensions:\n\n```ts\nnew BucketDeployment(this, 'BucketDeployment', {\n sources: [Source.asset('./website', { exclude: ['index.html'] })],\n destinationBucket: bucket,\n cacheControl: [CacheControl.fromString('max-age=31536000,public,immutable')],\n prune: false,\n});\n\nnew BucketDeployment(this, 'HTMLBucketDeployment', {\n sources: [Source.asset('./website', { exclude: ['*', '!index.html'] })],\n destinationBucket: bucket,\n cacheControl: [CacheControl.fromString('max-age=0,no-cache,no-store,must-revalidate')],\n prune: false,\n});\n```\n\n## Exclude and Include Filters\n\nThere are two points at which filters are evaluated in a deployment: asset bundling and the actual deployment. If you simply want to exclude files in the asset bundling process, you should leverage the `exclude` property of `AssetOptions` when defining your source:\n\n```ts\nnew BucketDeployment(this, 'HTMLBucketDeployment', {\n sources: [Source.asset('./website', { exclude: ['*', '!index.html'] })],\n destinationBucket: bucket,\n});\n```\n\nIf you want to specify filters to be used in the deployment process, you can use the `exclude` and `include` filters on `BucketDeployment`. If excluded, these files will not be deployed to the destination bucket. In addition, if the file already exists in the destination bucket, it will not be deleted if you are using the `prune` option:\n\n```ts\nnew s3deploy.BucketDeployment(this, 'DeployButExcludeSpecificFiles', {\n sources: [s3deploy.Source.asset(path.join(__dirname, 'my-website'))],\n destinationBucket,\n exclude: ['*.txt']\n});\n```\n\nThese filters follow the same format that is used for the AWS CLI. See the CLI documentation for information on [Using Include and Exclude Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/index.html#use-of-exclude-and-include-filters).\n\n## Objects metadata\n\nYou can specify metadata to be set on all the objects in your deployment.\nThere are 2 types of metadata in S3: system-defined metadata and user-defined metadata.\nSystem-defined metadata have a special purpose, for example cache-control defines how long to keep an object cached.\nUser-defined metadata are not used by S3 and keys always begin with `x-amz-meta-` (this prefix is added automatically).\n\nSystem defined metadata keys include the following:\n\n- cache-control (`--cache-control` in `aws s3 sync`)\n- content-disposition (`--content-disposition` in `aws s3 sync`)\n- content-encoding (`--content-encoding` in `aws s3 sync`)\n- content-language (`--content-language` in `aws s3 sync`)\n- content-type (`--content-type` in `aws s3 sync`)\n- expires (`--expires` in `aws s3 sync`)\n- x-amz-storage-class (`--storage-class` in `aws s3 sync`)\n- x-amz-website-redirect-location (`--website-redirect` in `aws s3 sync`)\n- x-amz-server-side-encryption (`--sse` in `aws s3 sync`)\n- x-amz-server-side-encryption-aws-kms-key-id (`--sse-kms-key-id` in `aws s3 sync`)\n- x-amz-server-side-encryption-customer-algorithm (`--sse-c-copy-source` in `aws s3 sync`)\n- x-amz-acl (`--acl` in `aws s3 sync`)\n\nYou can find more information about system defined metadata keys in\n[S3 PutObject documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)\nand [`aws s3 sync` documentation](https://docs.aws.amazon.com/cli/latest/reference/s3/sync.html).\n\n```ts\nconst websiteBucket = new s3.Bucket(this, 'WebsiteBucket', {\n websiteIndexDocument: 'index.html',\n publicReadAccess: true\n});\n\nnew s3deploy.BucketDeployment(this, 'DeployWebsite', {\n sources: [s3deploy.Source.asset('./website-dist')],\n destinationBucket: websiteBucket,\n destinationKeyPrefix: 'web/static', // optional prefix in destination bucket\n metadata: { A: \"1\", b: \"2\" }, // user-defined metadata\n\n // system-defined metadata\n contentType: \"text/html\",\n contentLanguage: \"en\",\n storageClass: StorageClass.INTELLIGENT_TIERING,\n serverSideEncryption: ServerSideEncryption.AES_256,\n cacheControl: [CacheControl.setPublic(), CacheControl.maxAge(cdk.Duration.hours(1))],\n accessControl: s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,\n});\n```\n\n## CloudFront Invalidation\n\nYou can provide a CloudFront distribution and optional paths to invalidate after the bucket deployment finishes.\n\n```ts\nimport { aws_cloudfront as cloudfront } from 'aws-cdk-lib';\nimport { aws_cloudfront_origins as origins } from 'aws-cdk-lib';\n\nconst bucket = new s3.Bucket(this, 'Destination');\n\n// Handles buckets whether or not they are configured for website hosting.\nconst distribution = new cloudfront.Distribution(this, 'Distribution', {\n defaultBehavior: { origin: new origins.S3Origin(bucket) },\n});\n\nnew s3deploy.BucketDeployment(this, 'DeployWithInvalidation', {\n sources: [s3deploy.Source.asset('./website-dist')],\n destinationBucket: bucket,\n distribution,\n distributionPaths: ['/images/*.png'],\n});\n```\n\n## Memory Limit\n\nThe default memory limit for the deployment resource is 128MiB. If you need to\ncopy larger files, you can use the `memoryLimit` configuration to specify the\nsize of the AWS Lambda resource handler.\n\n> NOTE: a new AWS Lambda handler will be created in your stack for each memory\n> limit configuration.\n\n## Notes\n\n- This library uses an AWS CloudFormation custom resource which about 10MiB in\n size. The code of this resource is bundled with this library.\n- AWS Lambda execution time is limited to 15min. This limits the amount of data\n which can be deployed into the bucket by this timeout.\n- When the `BucketDeployment` is removed from the stack, the contents are retained\n in the destination bucket ([#952](https://github.com/aws/aws-cdk/issues/952)).\n- Bucket deployment _only happens_ during stack create/update. This means that\n if you wish to update the contents of the destination, you will need to\n change the source s3 key (or bucket), so that the resource will be updated.\n This is inline with best practices. If you use local disk assets, this will\n happen automatically whenever you modify the asset, since the S3 key is based\n on a hash of the asset contents.\n\n## Development\n\nThe custom resource is implemented in Python 3.6 in order to be able to leverage\nthe AWS CLI for \"aws s3 sync\". The code is under [`lib/lambda`](https://github.com/aws/aws-cdk/tree/master/packages/%40aws-cdk/aws-s3-deployment/lib/lambda) and\nunit tests are under [`test/lambda`](https://github.com/aws/aws-cdk/tree/master/packages/%40aws-cdk/aws-s3-deployment/test/lambda).\n\nThis package requires Python 3.6 during build time in order to create the custom\nresource Lambda bundle and test it. It also relies on a few bash scripts, so\nmight be tricky to build on Windows.\n\n## Roadmap\n\n - [ ] Support \"blue/green\" deployments ([#954](https://github.com/aws/aws-cdk/issues/954))\n"
|
|
2921
|
-
},
|
|
2922
2098
|
"targets": {
|
|
2923
2099
|
"dotnet": {
|
|
2924
2100
|
"namespace": "Amazon.CDK.AWS.S3.Deployment"
|
|
@@ -2932,13 +2108,6 @@
|
|
|
2932
2108
|
}
|
|
2933
2109
|
},
|
|
2934
2110
|
"aws-cdk-lib.aws_s3_notifications": {
|
|
2935
|
-
"locationInModule": {
|
|
2936
|
-
"filename": "lib/index.ts",
|
|
2937
|
-
"line": 160
|
|
2938
|
-
},
|
|
2939
|
-
"readme": {
|
|
2940
|
-
"markdown": "# S3 Bucket Notifications Destinations\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module includes integration classes for using Topics, Queues or Lambdas\nas S3 Notification Destinations.\n\n## Example\n\nThe following example shows how to send a notification to an SNS\ntopic when an object is created in an S3 bucket:\n\n```ts\nimport { aws_s3_notifications as s3n } from 'aws-cdk-lib';\n\nconst bucket = new s3.Bucket(stack, 'Bucket');\nconst topic = new sns.Topic(stack, 'Topic');\n\nbucket.addEventNotification(s3.EventType.OBJECT_CREATED_PUT, new s3n.SnsDestination(topic));\n```\n"
|
|
2941
|
-
},
|
|
2942
2111
|
"targets": {
|
|
2943
2112
|
"dotnet": {
|
|
2944
2113
|
"namespace": "Amazon.CDK.AWS.S3.Notifications"
|
|
@@ -2952,10 +2121,6 @@
|
|
|
2952
2121
|
}
|
|
2953
2122
|
},
|
|
2954
2123
|
"aws-cdk-lib.aws_s3objectlambda": {
|
|
2955
|
-
"locationInModule": {
|
|
2956
|
-
"filename": "lib/index.ts",
|
|
2957
|
-
"line": 161
|
|
2958
|
-
},
|
|
2959
2124
|
"targets": {
|
|
2960
2125
|
"dotnet": {
|
|
2961
2126
|
"namespace": "Amazon.CDK.AWS.S3ObjectLambda"
|
|
@@ -2969,10 +2134,6 @@
|
|
|
2969
2134
|
}
|
|
2970
2135
|
},
|
|
2971
2136
|
"aws-cdk-lib.aws_s3outposts": {
|
|
2972
|
-
"locationInModule": {
|
|
2973
|
-
"filename": "lib/index.ts",
|
|
2974
|
-
"line": 162
|
|
2975
|
-
},
|
|
2976
2137
|
"targets": {
|
|
2977
2138
|
"dotnet": {
|
|
2978
2139
|
"namespace": "Amazon.CDK.AWS.S3Outposts"
|
|
@@ -2986,10 +2147,6 @@
|
|
|
2986
2147
|
}
|
|
2987
2148
|
},
|
|
2988
2149
|
"aws-cdk-lib.aws_sagemaker": {
|
|
2989
|
-
"locationInModule": {
|
|
2990
|
-
"filename": "lib/index.ts",
|
|
2991
|
-
"line": 163
|
|
2992
|
-
},
|
|
2993
2150
|
"targets": {
|
|
2994
2151
|
"dotnet": {
|
|
2995
2152
|
"namespace": "Amazon.CDK.AWS.Sagemaker"
|
|
@@ -3003,10 +2160,6 @@
|
|
|
3003
2160
|
}
|
|
3004
2161
|
},
|
|
3005
2162
|
"aws-cdk-lib.aws_sam": {
|
|
3006
|
-
"locationInModule": {
|
|
3007
|
-
"filename": "lib/index.ts",
|
|
3008
|
-
"line": 164
|
|
3009
|
-
},
|
|
3010
2163
|
"targets": {
|
|
3011
2164
|
"dotnet": {
|
|
3012
2165
|
"namespace": "Amazon.CDK.AWS.SAM"
|
|
@@ -3020,10 +2173,6 @@
|
|
|
3020
2173
|
}
|
|
3021
2174
|
},
|
|
3022
2175
|
"aws-cdk-lib.aws_sdb": {
|
|
3023
|
-
"locationInModule": {
|
|
3024
|
-
"filename": "lib/index.ts",
|
|
3025
|
-
"line": 165
|
|
3026
|
-
},
|
|
3027
2176
|
"targets": {
|
|
3028
2177
|
"dotnet": {
|
|
3029
2178
|
"namespace": "Amazon.CDK.AWS.SDB"
|
|
@@ -3037,13 +2186,6 @@
|
|
|
3037
2186
|
}
|
|
3038
2187
|
},
|
|
3039
2188
|
"aws-cdk-lib.aws_secretsmanager": {
|
|
3040
|
-
"locationInModule": {
|
|
3041
|
-
"filename": "lib/index.ts",
|
|
3042
|
-
"line": 166
|
|
3043
|
-
},
|
|
3044
|
-
"readme": {
|
|
3045
|
-
"markdown": "# AWS Secrets Manager Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\n```ts\nimport { aws_secretsmanager as secretsmanager } from 'aws-cdk-lib';\n```\n\n## Create a new Secret in a Stack\n\nIn order to have SecretsManager generate a new secret value automatically,\nyou can get started with the following:\n\n[example of creating a secret](test/integ.secret.lit.ts)\n\nThe `Secret` construct does not allow specifying the `SecretString` property\nof the `AWS::SecretsManager::Secret` resource (as this will almost always\nlead to the secret being surfaced in plain text and possibly committed to\nyour source control).\n\nIf you need to use a pre-existing secret, the recommended way is to manually\nprovision the secret in *AWS SecretsManager* and use the `Secret.fromSecretArn`\nor `Secret.fromSecretAttributes` method to make it available in your CDK Application:\n\n```ts\nconst secret = secretsmanager.Secret.fromSecretAttributes(scope, 'ImportedSecret', {\n secretArn: 'arn:aws:secretsmanager:<region>:<account-id-number>:secret:<secret-name>-<random-6-characters>',\n // If the secret is encrypted using a KMS-hosted CMK, either import or reference that key:\n encryptionKey,\n});\n```\n\nSecretsManager secret values can only be used in select set of properties. For the\nlist of properties, see [the CloudFormation Dynamic References documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html).\n\nA secret can set `RemovalPolicy`. If it set to `RETAIN`, that removing a secret will fail.\n\n## Grant permission to use the secret to a role\n\nYou must grant permission to a resource for that resource to be allowed to\nuse a secret. This can be achieved with the `Secret.grantRead` and/or `Secret.grantUpdate`\n method, depending on your need:\n\n```ts\nconst role = new iam.Role(stack, 'SomeRole', { assumedBy: new iam.AccountRootPrincipal() });\nconst secret = new secretsmanager.Secret(stack, 'Secret');\nsecret.grantRead(role);\nsecret.grantWrite(role);\n```\n\nIf, as in the following example, your secret was created with a KMS key:\n\n```ts\nconst key = new kms.Key(stack, 'KMS');\nconst secret = new secretsmanager.Secret(stack, 'Secret', { encryptionKey: key });\nsecret.grantRead(role);\nsecret.grantWrite(role);\n```\n\nthen `Secret.grantRead` and `Secret.grantWrite` will also grant the role the\nrelevant encrypt and decrypt permissions to the KMS key through the\nSecretsManager service principal.\n\nThe principal is automatically added to Secret resource policy and KMS Key policy for cross account access:\n\n```ts\nconst otherAccount = new iam.AccountPrincipal('1234');\nconst key = new kms.Key(stack, 'KMS');\nconst secret = new secretsmanager.Secret(stack, 'Secret', { encryptionKey: key });\nsecret.grantRead(otherAccount);\n```\n\n## Rotating a Secret\n\n### Using a Custom Lambda Function\n\nA rotation schedule can be added to a Secret using a custom Lambda function:\n\n```ts\nconst fn = new lambda.Function(...);\nconst secret = new secretsmanager.Secret(this, 'Secret');\n\nsecret.addRotationSchedule('RotationSchedule', {\n rotationLambda: fn,\n automaticallyAfter: Duration.days(15)\n});\n```\n\nNote: The required permissions for Lambda to call SecretsManager and the other way round are automatically granted based on [AWS Documentation](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets-required-permissions.html) as long as the Lambda is not imported.\n\nSee [Overview of the Lambda Rotation Function](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets-lambda-function-overview.html) on how to implement a Lambda Rotation Function.\n\n### Using a Hosted Lambda Function\n\nUse the `hostedRotation` prop to rotate a secret with a hosted Lambda function:\n\n```ts\nconst secret = new secretsmanager.Secret(this, 'Secret');\n\nsecret.addRotationSchedule('RotationSchedule', {\n hostedRotation: secretsmanager.HostedRotation.mysqlSingleUser(),\n});\n```\n\nHosted rotation is available for secrets representing credentials for MySQL, PostgreSQL, Oracle,\nMariaDB, SQLServer, Redshift and MongoDB (both for the single and multi user schemes).\n\nWhen deployed in a VPC, the hosted rotation implements `ec2.IConnectable`:\n\n```ts\nconst myHostedRotation = secretsmanager.HostedRotation.mysqlSingleUser({ vpc: myVpc });\nsecret.addRotationSchedule('RotationSchedule', { hostedRotation: myHostedRotation });\ndbConnections.allowDefaultPortFrom(hostedRotation);\n```\n\nSee also [Automating secret creation in AWS CloudFormation](https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_cloudformation.html).\n\n## Rotating database credentials\n\nDefine a `SecretRotation` to rotate database credentials:\n\n```ts\nnew secretsmanager.SecretRotation(this, 'SecretRotation', {\n application: secretsmanager.SecretRotationApplication.MYSQL_ROTATION_SINGLE_USER, // MySQL single user scheme\n secret: mySecret,\n target: myDatabase, // a Connectable\n vpc: myVpc, // The VPC where the secret rotation application will be deployed\n excludeCharacters: ' %+:;{}', // characters to never use when generating new passwords;\n // by default, no characters are excluded,\n // which might cause problems with some services, like DMS\n});\n```\n\nThe secret must be a JSON string with the following format:\n\n```json\n{\n \"engine\": \"<required: database engine>\",\n \"host\": \"<required: instance host name>\",\n \"username\": \"<required: username>\",\n \"password\": \"<required: password>\",\n \"dbname\": \"<optional: database name>\",\n \"port\": \"<optional: if not specified, default port will be used>\",\n \"masterarn\": \"<required for multi user rotation: the arn of the master secret which will be used to create users/change passwords>\"\n}\n```\n\nFor the multi user scheme, a `masterSecret` must be specified:\n\n```ts\nnew secretsmanager.SecretRotation(stack, 'SecretRotation', {\n application: secretsmanager.SecretRotationApplication.MYSQL_ROTATION_MULTI_USER,\n secret: myUserSecret, // The secret that will be rotated\n masterSecret: myMasterSecret, // The secret used for the rotation\n target: myDatabase,\n vpc: myVpc,\n});\n```\n\nSee also [aws-rds](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/aws-rds/README.md) where\ncredentials generation and rotation is integrated.\n\n## Importing Secrets\n\nExisting secrets can be imported by ARN, name, and other attributes (including the KMS key used to encrypt the secret).\nSecrets imported by name should use the short-form of the name (without the SecretsManager-provided suffx);\nthe secret name must exist in the same account and region as the stack.\nImporting by name makes it easier to reference secrets created in different regions, each with their own suffix and ARN.\n\n```ts\nimport { aws_kms as kms } from 'aws-cdk-lib';\n\nconst secretCompleteArn = 'arn:aws:secretsmanager:eu-west-1:111111111111:secret:MySecret-f3gDy9';\nconst secretPartialArn = 'arn:aws:secretsmanager:eu-west-1:111111111111:secret:MySecret'; // No Secrets Manager suffix\nconst encryptionKey = kms.Key.fromKeyArn(stack, 'MyEncKey', 'arn:aws:kms:eu-west-1:111111111111:key/21c4b39b-fde2-4273-9ac0-d9bb5c0d0030');\nconst mySecretFromCompleteArn = secretsmanager.Secret.fromSecretCompleteArn(stack, 'SecretFromCompleteArn', secretCompleteArn);\nconst mySecretFromPartialArn = secretsmanager.Secret.fromSecretPartialArn(stack, 'SecretFromPartialArn', secretPartialArn);\nconst mySecretFromName = secretsmanager.Secret.fromSecretNameV2(stack, 'SecretFromName', 'MySecret')\nconst mySecretFromAttrs = secretsmanager.Secret.fromSecretAttributes(stack, 'SecretFromAttributes', {\n secretCompleteArn,\n encryptionKey,\n});\n```\n\n## Replicating secrets\n\nSecrets can be replicated to multiple regions by specifying `replicaRegions`:\n\n```ts\nnew secretsmanager.Secret(this, 'Secret', {\n replicaRegions: [\n {\n region: 'eu-west-1',\n },\n {\n region: 'eu-central-1',\n encryptionKey: myKey,\n }\n ]\n});\n```\n\nAlternatively, use `addReplicaRegion()`:\n\n```ts\nconst secret = new secretsmanager.Secret(this, 'Secret');\nsecret.addReplicaRegion('eu-west-1');\n```\n"
|
|
3046
|
-
},
|
|
3047
2189
|
"targets": {
|
|
3048
2190
|
"dotnet": {
|
|
3049
2191
|
"namespace": "Amazon.CDK.AWS.SecretsManager"
|
|
@@ -3057,10 +2199,6 @@
|
|
|
3057
2199
|
}
|
|
3058
2200
|
},
|
|
3059
2201
|
"aws-cdk-lib.aws_securityhub": {
|
|
3060
|
-
"locationInModule": {
|
|
3061
|
-
"filename": "lib/index.ts",
|
|
3062
|
-
"line": 167
|
|
3063
|
-
},
|
|
3064
2202
|
"targets": {
|
|
3065
2203
|
"dotnet": {
|
|
3066
2204
|
"namespace": "Amazon.CDK.AWS.SecurityHub"
|
|
@@ -3074,10 +2212,6 @@
|
|
|
3074
2212
|
}
|
|
3075
2213
|
},
|
|
3076
2214
|
"aws-cdk-lib.aws_servicecatalog": {
|
|
3077
|
-
"locationInModule": {
|
|
3078
|
-
"filename": "lib/index.ts",
|
|
3079
|
-
"line": 168
|
|
3080
|
-
},
|
|
3081
2215
|
"targets": {
|
|
3082
2216
|
"dotnet": {
|
|
3083
2217
|
"namespace": "Amazon.CDK.AWS.Servicecatalog"
|
|
@@ -3091,10 +2225,6 @@
|
|
|
3091
2225
|
}
|
|
3092
2226
|
},
|
|
3093
2227
|
"aws-cdk-lib.aws_servicecatalogappregistry": {
|
|
3094
|
-
"locationInModule": {
|
|
3095
|
-
"filename": "lib/index.ts",
|
|
3096
|
-
"line": 169
|
|
3097
|
-
},
|
|
3098
2228
|
"targets": {
|
|
3099
2229
|
"dotnet": {
|
|
3100
2230
|
"namespace": "Amazon.CDK.AWS.Servicecatalogappregistry"
|
|
@@ -3108,13 +2238,6 @@
|
|
|
3108
2238
|
}
|
|
3109
2239
|
},
|
|
3110
2240
|
"aws-cdk-lib.aws_servicediscovery": {
|
|
3111
|
-
"locationInModule": {
|
|
3112
|
-
"filename": "lib/index.ts",
|
|
3113
|
-
"line": 170
|
|
3114
|
-
},
|
|
3115
|
-
"readme": {
|
|
3116
|
-
"markdown": "# Amazon ECS Service Discovery Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\nThis package contains constructs for working with **AWS Cloud Map**\n\nAWS Cloud Map is a fully managed service that you can use to create and\nmaintain a map of the backend services and resources that your applications\ndepend on.\n\nFor further information on AWS Cloud Map,\nsee the [AWS Cloud Map documentation](https://docs.aws.amazon.com/cloud-map)\n\n## HTTP Namespace Example\n\nThe following example creates an AWS Cloud Map namespace that\nsupports API calls, creates a service in that namespace, and\nregisters an instance to it:\n\n[Creating a Cloud Map service within an HTTP namespace](test/integ.service-with-http-namespace.lit.ts)\n\n## Private DNS Namespace Example\n\nThe following example creates an AWS Cloud Map namespace that\nsupports both API calls and DNS queries within a vpc, creates a\nservice in that namespace, and registers a loadbalancer as an\ninstance:\n\n[Creating a Cloud Map service within a Private DNS namespace](test/integ.service-with-private-dns-namespace.lit.ts)\n\n## Public DNS Namespace Example\n\nThe following example creates an AWS Cloud Map namespace that\nsupports both API calls and public DNS queries, creates a service in\nthat namespace, and registers an IP instance:\n\n[Creating a Cloud Map service within a Public namespace](test/integ.service-with-public-dns-namespace.lit.ts)\n\nFor DNS namespaces, you can also register instances to services with CNAME records:\n\n[Creating a Cloud Map service within a Public namespace](test/integ.service-with-cname-record.lit.ts)\n"
|
|
3117
|
-
},
|
|
3118
2241
|
"targets": {
|
|
3119
2242
|
"dotnet": {
|
|
3120
2243
|
"namespace": "Amazon.CDK.AWS.ServiceDiscovery"
|
|
@@ -3128,13 +2251,6 @@
|
|
|
3128
2251
|
}
|
|
3129
2252
|
},
|
|
3130
2253
|
"aws-cdk-lib.aws_ses": {
|
|
3131
|
-
"locationInModule": {
|
|
3132
|
-
"filename": "lib/index.ts",
|
|
3133
|
-
"line": 171
|
|
3134
|
-
},
|
|
3135
|
-
"readme": {
|
|
3136
|
-
"markdown": "# Amazon Simple Email Service Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Email receiving\n\nCreate a receipt rule set with rules and actions (actions can be found in the\n`@aws-cdk/aws-ses-actions` package):\n\n```ts\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\nimport { aws_ses as ses } from 'aws-cdk-lib';\nimport { aws_ses_actions as actions } from 'aws-cdk-lib';\nimport { aws_sns as sns } from 'aws-cdk-lib';\n\nconst bucket = new s3.Bucket(stack, 'Bucket');\nconst topic = new sns.Topic(stack, 'Topic');\n\nnew ses.ReceiptRuleSet(stack, 'RuleSet', {\n rules: [\n {\n recipients: ['hello@aws.com'],\n actions: [\n new actions.AddHeader({\n name: 'X-Special-Header',\n value: 'aws'\n }),\n new actions.S3({\n bucket,\n objectKeyPrefix: 'emails/',\n topic\n })\n ],\n },\n {\n recipients: ['aws.com'],\n actions: [\n new actions.Sns({\n topic\n })\n ]\n }\n ]\n});\n```\n\nAlternatively, rules can be added to a rule set:\n\n```ts\nconst ruleSet = new ses.ReceiptRuleSet(this, 'RuleSet'):\n\nconst awsRule = ruleSet.addRule('Aws', {\n recipients: ['aws.com']\n});\n```\n\nAnd actions to rules:\n\n```ts\nawsRule.addAction(new actions.Sns({\n topic\n}));\n```\n\nWhen using `addRule`, the new rule is added after the last added rule unless `after` is specified.\n\n### Drop spams\n\nA rule to drop spam can be added by setting `dropSpam` to `true`:\n\n```ts\nnew ses.ReceiptRuleSet(this, 'RuleSet', {\n dropSpam: true\n});\n```\n\nThis will add a rule at the top of the rule set with a Lambda action that stops processing messages that have at least one spam indicator. See [Lambda Function Examples](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda-example-functions.html).\n\n\n## Receipt filter\n\nCreate a receipt filter:\n\n```ts\nnew ses.ReceiptFilter(this, 'Filter', {\n ip: '1.2.3.4/16' // Will be blocked\n})\n```\n\nAn allow list filter is also available:\n\n```ts\nnew ses.AllowListReceiptFilter(this, 'AllowList', {\n ips: [\n '10.0.0.0/16',\n '1.2.3.4/16',\n ]\n});\n```\n\nThis will first create a block all filter and then create allow filters for the listed ip addresses.\n"
|
|
3137
|
-
},
|
|
3138
2254
|
"targets": {
|
|
3139
2255
|
"dotnet": {
|
|
3140
2256
|
"namespace": "Amazon.CDK.AWS.SES"
|
|
@@ -3148,13 +2264,6 @@
|
|
|
3148
2264
|
}
|
|
3149
2265
|
},
|
|
3150
2266
|
"aws-cdk-lib.aws_ses_actions": {
|
|
3151
|
-
"locationInModule": {
|
|
3152
|
-
"filename": "lib/index.ts",
|
|
3153
|
-
"line": 172
|
|
3154
|
-
},
|
|
3155
|
-
"readme": {
|
|
3156
|
-
"markdown": "# Amazon Simple Email Service Actions Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module contains integration classes to add action to SES email receiving rules.\nInstances of these classes should be passed to the `rule.addAction()` method.\n\nCurrently supported are:\n\n* [Add header](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-add-header.html)\n* [Bounce](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-bounce.html)\n* [Lambda](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda.html)\n* [S3](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html)\n* [SNS](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html)\n* [Stop](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-stop.html)\n\nSee the README of `@aws-cdk/aws-ses` for more information.\n"
|
|
3157
|
-
},
|
|
3158
2267
|
"targets": {
|
|
3159
2268
|
"dotnet": {
|
|
3160
2269
|
"namespace": "Amazon.CDK.AWS.SES.Actions"
|
|
@@ -3168,13 +2277,6 @@
|
|
|
3168
2277
|
}
|
|
3169
2278
|
},
|
|
3170
2279
|
"aws-cdk-lib.aws_signer": {
|
|
3171
|
-
"locationInModule": {
|
|
3172
|
-
"filename": "lib/index.ts",
|
|
3173
|
-
"line": 173
|
|
3174
|
-
},
|
|
3175
|
-
"readme": {
|
|
3176
|
-
"markdown": "# AWS::Signer Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAWS Signer is a fully managed code-signing service to ensure the trust and integrity of your code. Organizations validate code against\na digital signature to confirm that the code is unaltered and from a trusted publisher. For more information, see [What Is AWS\nSigner?](https://docs.aws.amazon.com/signer/latest/developerguide/Welcome.html)\n\n## Table of Contents\n\n- [Signing Platform](#signing-platform)\n- [Signing Profile](#signing-profile)\n\n## Signing Platform\n\nA signing platform is a predefined set of instructions that specifies the signature format and signing algorithms that AWS Signer should use\nto sign a zip file. For more information go to [Signing Platforms in AWS Signer](https://docs.aws.amazon.com/signer/latest/developerguide/gs-platform.html).\n\nAWS Signer provides a pre-defined set of signing platforms. They are available in the CDK as -\n\n```ts\nPlatform.AWS_IOT_DEVICE_MANAGEMENT_SHA256_ECDSA\nPlatform.AWS_LAMBDA_SHA384_ECDSA\nPlatform.AMAZON_FREE_RTOS_TI_CC3220SF\nPlatform.AMAZON_FREE_RTOS_DEFAULT\n```\n\n## Signing Profile\n\nA signing profile is a code-signing template that can be used to pre-define the signature specifications for a signing job.\nA signing profile includes a signing platform to designate the file type to be signed, the signature format, and the signature algorithms.\nFor more information, visit [Signing Profiles in AWS Signer](https://docs.aws.amazon.com/signer/latest/developerguide/gs-profile.html).\n\nThe following code sets up a signing profile for signing lambda code bundles -\n\n```ts\nimport { aws_signer as signer } from 'aws-cdk-lib';\n\nconst signingProfile = new signer.SigningProfile(this, 'SigningProfile', { \n platform: signer.Platform.AWS_LAMBDA_SHA384_ECDSA,\n} );\n```\n\nA signing profile is valid by default for 135 months. This can be modified by specifying the `signatureValidityPeriod` property.\n"
|
|
3177
|
-
},
|
|
3178
2280
|
"targets": {
|
|
3179
2281
|
"dotnet": {
|
|
3180
2282
|
"namespace": "Amazon.CDK.AWS.Signer"
|
|
@@ -3188,13 +2290,6 @@
|
|
|
3188
2290
|
}
|
|
3189
2291
|
},
|
|
3190
2292
|
"aws-cdk-lib.aws_sns": {
|
|
3191
|
-
"locationInModule": {
|
|
3192
|
-
"filename": "lib/index.ts",
|
|
3193
|
-
"line": 174
|
|
3194
|
-
},
|
|
3195
|
-
"readme": {
|
|
3196
|
-
"markdown": "# Amazon Simple Notification Service Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAdd an SNS Topic to your stack:\n\n```ts\nimport { aws_sns as sns } from 'aws-cdk-lib';\n\nconst topic = new sns.Topic(this, 'Topic', {\n displayName: 'Customer subscription topic'\n});\n```\n\nAdd a FIFO SNS topic with content-based de-duplication to your stack:\n\n```ts\nimport { aws_sns as sns } from 'aws-cdk-lib';\n\nconst topic = new sns.Topic(this, 'Topic', {\n contentBasedDeduplication: true,\n displayName: 'Customer subscription topic',\n fifo: true,\n topicName: 'customerTopic',\n});\n```\n\nNote that FIFO topics require a topic name to be provided. The required `.fifo` suffix will be automatically added to the topic name if it is not explicitly provided.\n\n## Subscriptions\n\nVarious subscriptions can be added to the topic by calling the\n`.addSubscription(...)` method on the topic. It accepts a *subscription* object,\ndefault implementations of which can be found in the\n`@aws-cdk/aws-sns-subscriptions` package:\n\nAdd an HTTPS Subscription to your topic:\n\n```ts\nimport { aws_sns_subscriptions as subs } from 'aws-cdk-lib';\n\nconst myTopic = new sns.Topic(this, 'MyTopic');\n\nmyTopic.addSubscription(new subs.UrlSubscription('https://foobar.com/'));\n```\n\nSubscribe a queue to the topic:\n\n```ts\nmyTopic.addSubscription(new subs.SqsSubscription(queue));\n```\n\nNote that subscriptions of queues in different accounts need to be manually confirmed by\nreading the initial message from the queue and visiting the link found in it.\n\n### Filter policy\n\nA filter policy can be specified when subscribing an endpoint to a topic.\n\nExample with a Lambda subscription:\n\n```ts\nconst myTopic = new sns.Topic(this, 'MyTopic');\nconst fn = new lambda.Function(this, 'Function', ...);\n\n// Lambda should receive only message matching the following conditions on attributes:\n// color: 'red' or 'orange' or begins with 'bl'\n// size: anything but 'small' or 'medium'\n// price: between 100 and 200 or greater than 300\n// store: attribute must be present\ntopic.addSubscription(new subs.LambdaSubscription(fn, {\n filterPolicy: {\n color: sns.SubscriptionFilter.stringFilter({\n allowlist: ['red', 'orange'],\n matchPrefixes: ['bl']\n }),\n size: sns.SubscriptionFilter.stringFilter({\n denylist: ['small', 'medium'],\n }),\n price: sns.SubscriptionFilter.numericFilter({\n between: { start: 100, stop: 200 },\n greaterThan: 300\n }),\n store: sns.SubscriptionFilter.existsFilter(),\n }\n}));\n```\n\n## DLQ setup for SNS Subscription\n\nCDK can attach provided Queue as DLQ for your SNS subscription.\nSee the [SNS DLQ configuration docs](https://docs.aws.amazon.com/sns/latest/dg/sns-configure-dead-letter-queue.html) for more information about this feature.\n\nExample of usage with user provided DLQ.\n\n```ts\nconst topic = new sns.Topic(stack, 'Topic');\nconst dlQueue = new Queue(stack, 'DeadLetterQueue', {\n queueName: 'MySubscription_DLQ',\n retentionPeriod: cdk.Duration.days(14),\n});\n\nnew sns.Subscription(stack, 'Subscription', {\n endpoint: 'endpoint',\n protocol: sns.SubscriptionProtocol.LAMBDA,\n topic,\n deadLetterQueue: dlQueue,\n});\n```\n\n## CloudWatch Event Rule Target\n\nSNS topics can be used as targets for CloudWatch event rules.\n\nUse the `@aws-cdk/aws-events-targets.SnsTopic`:\n\n```ts\nimport { aws_events_targets as targets } from 'aws-cdk-lib';\n\ncodeCommitRepository.onCommit(new targets.SnsTopic(myTopic));\n```\n\nThis will result in adding a target to the event rule and will also modify the\ntopic resource policy to allow CloudWatch events to publish to the topic.\n\n## Topic Policy\n\nA topic policy is automatically created when `addToResourcePolicy` is called, if\none doesn't already exist. Using `addToResourcePolicy` is the simplest way to\nadd policies, but a `TopicPolicy` can also be created manually.\n\n```ts\nconst topic = new sns.Topic(stack, 'Topic');\nconst topicPolicy = new sns.TopicPolicy(stack, 'TopicPolicy', {\n topics: [topic],\n});\n\ntopicPolicy.document.addStatements(new iam.PolicyStatement({\n actions: [\"sns:Subscribe\"],\n principals: [new iam.AnyPrincipal()],\n resources: [topic.topicArn],\n}));\n```\n\nA policy document can also be passed on `TopicPolicy` construction\n\n```ts\nconst topic = new sns.Topic(stack, 'Topic');\nconst policyDocument = new iam.PolicyDocument({\n assignSids: true,\n statements: [\n new iam.PolicyStatement({\n actions: [\"sns:Subscribe\"],\n principals: [new iam.AnyPrincipal()],\n resources: [topic.topicArn]\n }),\n ],\n});\n\nconst topicPolicy = new sns.TopicPolicy(this, 'Policy', {\n topics: [topic],\n policyDocument,\n});\n```\n"
|
|
3197
|
-
},
|
|
3198
2293
|
"targets": {
|
|
3199
2294
|
"dotnet": {
|
|
3200
2295
|
"namespace": "Amazon.CDK.AWS.SNS"
|
|
@@ -3208,13 +2303,6 @@
|
|
|
3208
2303
|
}
|
|
3209
2304
|
},
|
|
3210
2305
|
"aws-cdk-lib.aws_sns_subscriptions": {
|
|
3211
|
-
"locationInModule": {
|
|
3212
|
-
"filename": "lib/index.ts",
|
|
3213
|
-
"line": 175
|
|
3214
|
-
},
|
|
3215
|
-
"readme": {
|
|
3216
|
-
"markdown": "# CDK Construct Library for Amazon Simple Notification Service Subscriptions\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis library provides constructs for adding subscriptions to an Amazon SNS topic.\nSubscriptions can be added by calling the `.addSubscription(...)` method on the topic.\n\n## Subscriptions\n\nSubscriptions can be added to the following endpoints:\n\n* HTTPS\n* Amazon SQS\n* AWS Lambda\n* Email\n* SMS\n\nSubscriptions to Amazon SQS and AWS Lambda can be added on topics across regions.\n\nCreate an Amazon SNS Topic to add subscriptions.\n\n```ts\nimport { aws_sns as sns } from 'aws-cdk-lib';\n\nconst myTopic = new sns.Topic(this, 'MyTopic');\n```\n\n### HTTPS\n\nAdd an HTTP or HTTPS Subscription to your topic:\n\n```ts\nimport { aws_sns_subscriptions as subscriptions } from 'aws-cdk-lib';\n\nmyTopic.addSubscription(new subscriptions.UrlSubscription('https://foobar.com/'));\n```\n\nThe URL being subscribed can also be [tokens](https://docs.aws.amazon.com/cdk/latest/guide/tokens.html), that resolve\nto a URL during deployment. A typical use case is when the URL is passed in as a [CloudFormation\nparameter](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html). The\nfollowing code defines a CloudFormation parameter and uses it in a URL subscription.\n\n```ts\nconst url = new CfnParameter(this, 'url-param');\nmyTopic.addSubscription(new subscriptions.UrlSubscription(url.valueAsString()));\n```\n\n### Amazon SQS\n\nSubscribe a queue to your topic:\n\n```ts\nimport { aws_sqs as sqs } from 'aws-cdk-lib';\nimport { aws_sns_subscriptions as subscriptions } from 'aws-cdk-lib';\n\nconst myQueue = new sqs.Queue(this, 'MyQueue');\n\nmyTopic.addSubscription(new subscriptions.SqsSubscription(queue));\n```\n\nKMS key permissions will automatically be granted to SNS when a subscription is made to\nan encrypted queue.\n\nNote that subscriptions of queues in different accounts need to be manually confirmed by\nreading the initial message from the queue and visiting the link found in it.\n\n### AWS Lambda\n\nSubscribe an AWS Lambda function to your topic:\n\n```ts\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\nimport { aws_sns_subscriptions as subscriptions } from 'aws-cdk-lib';\n\nconst myFunction = new lambda.Function(this, 'Echo', {\n handler: 'index.handler',\n runtime: lambda.Runtime.NODEJS_12_X,\n code: lambda.Code.fromInline(`exports.handler = ${handler.toString()}`)\n});\n\nmyTopic.addSubscription(new subscriptions.LambdaSubscription(myFunction));\n```\n\n### Email\n\nSubscribe an email address to your topic:\n\n```ts\nimport { aws_sns_subscriptions as subscriptions } from 'aws-cdk-lib';\n\nmyTopic.addSubscription(new subscriptions.EmailSubscription('foo@bar.com'));\n```\n\nThe email being subscribed can also be [tokens](https://docs.aws.amazon.com/cdk/latest/guide/tokens.html), that resolve\nto an email address during deployment. A typical use case is when the email address is passed in as a [CloudFormation\nparameter](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html). The\nfollowing code defines a CloudFormation parameter and uses it in an email subscription.\n\n```ts\nconst emailAddress = new CfnParameter(this, 'email-param');\nmyTopic.addSubscription(new subscriptions.EmailSubscription(emailAddress.valueAsString()));\n```\n\nNote that email subscriptions require confirmation by visiting the link sent to the\nemail address.\n\n### SMS\n\nSubscribe an sms number to your topic:\n\n```ts\nimport { aws_sns_subscriptions as subscriptions } from 'aws-cdk-lib';\n\nmyTopic.addSubscription(new subscriptions.SmsSubscription('+15551231234'));\n```\n\nThe number being subscribed can also be [tokens](https://docs.aws.amazon.com/cdk/latest/guide/tokens.html), that resolve\nto a number during deployment. A typical use case is when the number is passed in as a [CloudFormation\nparameter](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html). The\nfollowing code defines a CloudFormation parameter and uses it in an sms subscription.\n\n```ts\nconst smsNumber = new CfnParameter(this, 'sms-param');\nmyTopic.addSubscription(new subscriptions.SmsSubscription(smsNumber.valueAsString()));\n```\n"
|
|
3217
|
-
},
|
|
3218
2306
|
"targets": {
|
|
3219
2307
|
"dotnet": {
|
|
3220
2308
|
"namespace": "Amazon.CDK.AWS.SNS.Subscriptions"
|
|
@@ -3228,13 +2316,6 @@
|
|
|
3228
2316
|
}
|
|
3229
2317
|
},
|
|
3230
2318
|
"aws-cdk-lib.aws_sqs": {
|
|
3231
|
-
"locationInModule": {
|
|
3232
|
-
"filename": "lib/index.ts",
|
|
3233
|
-
"line": 176
|
|
3234
|
-
},
|
|
3235
|
-
"readme": {
|
|
3236
|
-
"markdown": "# Amazon Simple Queue Service Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nAmazon Simple Queue Service (SQS) is a fully managed message queuing service that \nenables you to decouple and scale microservices, distributed systems, and serverless \napplications. SQS eliminates the complexity and overhead associated with managing and \noperating message oriented middleware, and empowers developers to focus on differentiating work. \nUsing SQS, you can send, store, and receive messages between software components at any volume, \nwithout losing messages or requiring other services to be available. \n\n## Installation\n\nImport to your project:\n\n```ts\nimport { aws_sqs as sqs } from 'aws-cdk-lib';\n```\n\n## Basic usage\n\n\nHere's how to add a basic queue to your application:\n\n```ts\nnew sqs.Queue(this, 'Queue');\n```\n\n## Encryption\n\nIf you want to encrypt the queue contents, set the `encryption` property. You can have\nthe messages encrypted with a key that SQS manages for you, or a key that you\ncan manage yourself.\n\n```ts\n// Use managed key\nnew sqs.Queue(this, 'Queue', {\n encryption: QueueEncryption.KMS_MANAGED,\n});\n\n// Use custom key\nconst myKey = new kms.Key(this, 'Key');\n\nnew sqs.Queue(this, 'Queue', {\n encryption: QueueEncryption.KMS,\n encryptionMasterKey: myKey\n});\n```\n\n## First-In-First-Out (FIFO) queues\n\nFIFO queues give guarantees on the order in which messages are dequeued, and have additional\nfeatures in order to help guarantee exactly-once processing. For more information, see\nthe SQS manual. Note that FIFO queues are not available in all AWS regions.\n\nA queue can be made a FIFO queue by either setting `fifo: true`, giving it a name which ends\nin `\".fifo\"`, or by enabling a FIFO specific feature such as: content-based deduplication, \ndeduplication scope or fifo throughput limit.\n"
|
|
3237
|
-
},
|
|
3238
2319
|
"targets": {
|
|
3239
2320
|
"dotnet": {
|
|
3240
2321
|
"namespace": "Amazon.CDK.AWS.SQS"
|
|
@@ -3248,13 +2329,6 @@
|
|
|
3248
2329
|
}
|
|
3249
2330
|
},
|
|
3250
2331
|
"aws-cdk-lib.aws_ssm": {
|
|
3251
|
-
"locationInModule": {
|
|
3252
|
-
"filename": "lib/index.ts",
|
|
3253
|
-
"line": 177
|
|
3254
|
-
},
|
|
3255
|
-
"readme": {
|
|
3256
|
-
"markdown": "# AWS Systems Manager Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Installation\n\nInstall the module:\n\n```console\n$ npm i @aws-cdk/aws-ssm\n```\n\nImport it into your code:\n\n```ts\nimport { aws_ssm as ssm } from 'aws-cdk-lib';\n```\n\n## Using existing SSM Parameters in your CDK app\n\nYou can reference existing SSM Parameter Store values that you want to use in\nyour CDK app by using `ssm.ParameterStoreString`:\n\n[using SSM parameter](test/integ.parameter-store-string.lit.ts)\n\n## Creating new SSM Parameters in your CDK app\n\nYou can create either `ssm.StringParameter` or `ssm.StringListParameter`s in\na CDK app. These are public (not secret) values. Parameters of type\n*SecretString* cannot be created directly from a CDK application; if you want\nto provision secrets automatically, use Secrets Manager Secrets (see the\n`@aws-cdk/aws-secretsmanager` package).\n\n```ts\nnew ssm.StringParameter(stack, 'Parameter', {\n allowedPattern: '.*',\n description: 'The value Foo',\n parameterName: 'FooParameter',\n stringValue: 'Foo',\n tier: ssm.ParameterTier.ADVANCED,\n});\n```\n\n[creating SSM parameters](test/integ.parameter.lit.ts)\n\nWhen specifying an `allowedPattern`, the values provided as string literals\nare validated against the pattern and an exception is raised if a value\nprovided does not comply.\n"
|
|
3257
|
-
},
|
|
3258
2332
|
"targets": {
|
|
3259
2333
|
"dotnet": {
|
|
3260
2334
|
"namespace": "Amazon.CDK.AWS.SSM"
|
|
@@ -3268,10 +2342,6 @@
|
|
|
3268
2342
|
}
|
|
3269
2343
|
},
|
|
3270
2344
|
"aws-cdk-lib.aws_ssmcontacts": {
|
|
3271
|
-
"locationInModule": {
|
|
3272
|
-
"filename": "lib/index.ts",
|
|
3273
|
-
"line": 178
|
|
3274
|
-
},
|
|
3275
2345
|
"targets": {
|
|
3276
2346
|
"dotnet": {
|
|
3277
2347
|
"namespace": "Amazon.CDK.AWS.SSMContacts"
|
|
@@ -3285,10 +2355,6 @@
|
|
|
3285
2355
|
}
|
|
3286
2356
|
},
|
|
3287
2357
|
"aws-cdk-lib.aws_ssmincidents": {
|
|
3288
|
-
"locationInModule": {
|
|
3289
|
-
"filename": "lib/index.ts",
|
|
3290
|
-
"line": 179
|
|
3291
|
-
},
|
|
3292
2358
|
"targets": {
|
|
3293
2359
|
"dotnet": {
|
|
3294
2360
|
"namespace": "Amazon.CDK.AWS.SSMIncidents"
|
|
@@ -3302,10 +2368,6 @@
|
|
|
3302
2368
|
}
|
|
3303
2369
|
},
|
|
3304
2370
|
"aws-cdk-lib.aws_sso": {
|
|
3305
|
-
"locationInModule": {
|
|
3306
|
-
"filename": "lib/index.ts",
|
|
3307
|
-
"line": 180
|
|
3308
|
-
},
|
|
3309
2371
|
"targets": {
|
|
3310
2372
|
"dotnet": {
|
|
3311
2373
|
"namespace": "Amazon.CDK.AWS.SSO"
|
|
@@ -3319,13 +2381,6 @@
|
|
|
3319
2381
|
}
|
|
3320
2382
|
},
|
|
3321
2383
|
"aws-cdk-lib.aws_stepfunctions": {
|
|
3322
|
-
"locationInModule": {
|
|
3323
|
-
"filename": "lib/index.ts",
|
|
3324
|
-
"line": 181
|
|
3325
|
-
},
|
|
3326
|
-
"readme": {
|
|
3327
|
-
"markdown": "# AWS Step Functions Construct Library\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThe `@aws-cdk/aws-stepfunctions` package contains constructs for building\nserverless workflows using objects. Use this in conjunction with the\n`@aws-cdk/aws-stepfunctions-tasks` package, which contains classes used\nto call other AWS services.\n\nDefining a workflow looks like this (for the [Step Functions Job Poller\nexample](https://docs.aws.amazon.com/step-functions/latest/dg/job-status-poller-sample.html)):\n\n## Example\n\n```ts\nimport { aws_stepfunctions as sfn } from 'aws-cdk-lib';\nimport { aws_stepfunctions_tasks as tasks } from 'aws-cdk-lib';\nimport { aws_lambda as lambda } from 'aws-cdk-lib';\n\nconst submitLambda = new lambda.Function(this, 'SubmitLambda', { ... });\nconst getStatusLambda = new lambda.Function(this, 'CheckLambda', { ... });\n\nconst submitJob = new tasks.LambdaInvoke(this, 'Submit Job', {\n lambdaFunction: submitLambda,\n // Lambda's result is in the attribute `Payload`\n outputPath: '$.Payload',\n});\n\nconst waitX = new sfn.Wait(this, 'Wait X Seconds', {\n time: sfn.WaitTime.secondsPath('$.waitSeconds'),\n});\n\nconst getStatus = new tasks.LambdaInvoke(this, 'Get Job Status', {\n lambdaFunction: getStatusLambda,\n // Pass just the field named \"guid\" into the Lambda, put the\n // Lambda's result in a field called \"status\" in the response\n inputPath: '$.guid',\n outputPath: '$.Payload',\n});\n\nconst jobFailed = new sfn.Fail(this, 'Job Failed', {\n cause: 'AWS Batch Job Failed',\n error: 'DescribeJob returned FAILED',\n});\n\nconst finalStatus = new tasks.LambdaInvoke(this, 'Get Final Job Status', {\n lambdaFunction: getStatusLambda,\n // Use \"guid\" field as input\n inputPath: '$.guid',\n outputPath: '$.Payload',\n});\n\nconst definition = submitJob\n .next(waitX)\n .next(getStatus)\n .next(new sfn.Choice(this, 'Job Complete?')\n // Look at the \"status\" field\n .when(sfn.Condition.stringEquals('$.status', 'FAILED'), jobFailed)\n .when(sfn.Condition.stringEquals('$.status', 'SUCCEEDED'), finalStatus)\n .otherwise(waitX));\n\nnew sfn.StateMachine(this, 'StateMachine', {\n definition,\n timeout: Duration.minutes(5)\n});\n```\n\nYou can find more sample snippets and learn more about the service integrations\nin the `@aws-cdk/aws-stepfunctions-tasks` package.\n\n## State Machine\n\nA `stepfunctions.StateMachine` is a resource that takes a state machine\ndefinition. The definition is specified by its start state, and encompasses\nall states reachable from the start state:\n\n```ts\nconst startState = new sfn.Pass(this, 'StartState');\n\nnew sfn.StateMachine(this, 'StateMachine', {\n definition: startState\n});\n```\n\nState machines execute using an IAM Role, which will automatically have all\npermissions added that are required to make all state machine tasks execute\nproperly (for example, permissions to invoke any Lambda functions you add to\nyour workflow). A role will be created by default, but you can supply an\nexisting one as well.\n\n## Amazon States Language\n\nThis library comes with a set of classes that model the [Amazon States\nLanguage](https://states-language.net/spec.html). The following State classes\nare supported:\n\n* [`Task`](#task)\n* [`Pass`](#pass)\n* [`Wait`](#wait)\n* [`Choice`](#choice)\n* [`Parallel`](#parallel)\n* [`Succeed`](#succeed)\n* [`Fail`](#fail)\n* [`Map`](#map)\n* [`Custom State`](#custom-state)\n\nAn arbitrary JSON object (specified at execution start) is passed from state to\nstate and transformed during the execution of the workflow. For more\ninformation, see the States Language spec.\n\n### Task\n\nA `Task` represents some work that needs to be done. The exact work to be\ndone is determine by a class that implements `IStepFunctionsTask`, a collection\nof which can be found in the `@aws-cdk/aws-stepfunctions-tasks` module.\n\nThe tasks in the `@aws-cdk/aws-stepfunctions-tasks` module support the\n[service integration pattern](https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html) that integrates Step Functions with services\ndirectly in the Amazon States language.\n\n### Pass\n\nA `Pass` state passes its input to its output, without performing work.\nPass states are useful when constructing and debugging state machines.\n\nThe following example injects some fixed data into the state machine through\nthe `result` field. The `result` field will be added to the input and the result\nwill be passed as the state's output.\n\n```ts\n// Makes the current JSON state { ..., \"subObject\": { \"hello\": \"world\" } }\nconst pass = new sfn.Pass(this, 'Add Hello World', {\n result: sfn.Result.fromObject({ hello: 'world' }),\n resultPath: '$.subObject',\n});\n\n// Set the next state\npass.next(nextState);\n```\n\nThe `Pass` state also supports passing key-value pairs as input. Values can\nbe static, or selected from the input with a path.\n\nThe following example filters the `greeting` field from the state input\nand also injects a field called `otherData`.\n\n```ts\nconst pass = new sfn.Pass(this, 'Filter input and inject data', {\n parameters: { // input to the pass state\n input: sfn.JsonPath.stringAt('$.input.greeting'),\n otherData: 'some-extra-stuff'\n },\n});\n```\n\nThe object specified in `parameters` will be the input of the `Pass` state.\nSince neither `Result` nor `ResultPath` are supplied, the `Pass` state copies\nits input through to its output.\n\nLearn more about the [Pass state](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html)\n\n### Wait\n\nA `Wait` state waits for a given number of seconds, or until the current time\nhits a particular time. The time to wait may be taken from the execution's JSON\nstate.\n\n```ts\n// Wait until it's the time mentioned in the the state object's \"triggerTime\"\n// field.\nconst wait = new sfn.Wait(this, 'Wait For Trigger Time', {\n time: sfn.WaitTime.timestampPath('$.triggerTime'),\n});\n\n// Set the next state\nwait.next(startTheWork);\n```\n\n### Choice\n\nA `Choice` state can take a different path through the workflow based on the\nvalues in the execution's JSON state:\n\n```ts\nconst choice = new sfn.Choice(this, 'Did it work?');\n\n// Add conditions with .when()\nchoice.when(sfn.Condition.stringEquals('$.status', 'SUCCESS'), successState);\nchoice.when(sfn.Condition.numberGreaterThan('$.attempts', 5), failureState);\n\n// Use .otherwise() to indicate what should be done if none of the conditions match\nchoice.otherwise(tryAgainState);\n```\n\nIf you want to temporarily branch your workflow based on a condition, but have\nall branches come together and continuing as one (similar to how an `if ...\nthen ... else` works in a programming language), use the `.afterwards()` method:\n\n```ts\nconst choice = new sfn.Choice(this, 'What color is it?');\nchoice.when(sfn.Condition.stringEquals('$.color', 'BLUE'), handleBlueItem);\nchoice.when(sfn.Condition.stringEquals('$.color', 'RED'), handleRedItem);\nchoice.otherwise(handleOtherItemColor);\n\n// Use .afterwards() to join all possible paths back together and continue\nchoice.afterwards().next(shipTheItem);\n```\n\nIf your `Choice` doesn't have an `otherwise()` and none of the conditions match\nthe JSON state, a `NoChoiceMatched` error will be thrown. Wrap the state machine\nin a `Parallel` state if you want to catch and recover from this.\n\n#### Available Conditions\n\nsee [step function comparison operators](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-choice-state.html#amazon-states-language-choice-state-rules)\n\n* `Condition.isPresent` - matches if a json path is present\n* `Condition.isNotPresent` - matches if a json path is not present\n* `Condition.isString` - matches if a json path contains a string\n* `Condition.isNotString` - matches if a json path is not a string\n* `Condition.isNumeric` - matches if a json path is numeric\n* `Condition.isNotNumeric` - matches if a json path is not numeric\n* `Condition.isBoolean` - matches if a json path is boolean\n* `Condition.isNotBoolean` - matches if a json path is not boolean\n* `Condition.isTimestamp` - matches if a json path is a timestamp\n* `Condition.isNotTimestamp` - matches if a json path is not a timestamp\n* `Condition.isNotNull` - matches if a json path is not null\n* `Condition.isNull` - matches if a json path is null\n* `Condition.booleanEquals` - matches if a boolean field has a given value\n* `Condition.booleanEqualsJsonPath` - matches if a boolean field equals a value in a given mapping path\n* `Condition.stringEqualsJsonPath` - matches if a string field equals a given mapping path\n* `Condition.stringEquals` - matches if a field equals a string value\n* `Condition.stringLessThan` - matches if a string field sorts before a given value\n* `Condition.stringLessThanJsonPath` - matches if a string field sorts before a value at given mapping path\n* `Condition.stringLessThanEquals` - matches if a string field sorts equal to or before a given value\n* `Condition.stringLessThanEqualsJsonPath` - matches if a string field sorts equal to or before a given mapping\n* `Condition.stringGreaterThan` - matches if a string field sorts after a given value\n* `Condition.stringGreaterThanJsonPath` - matches if a string field sorts after a value at a given mapping path\n* `Condition.stringGreaterThanEqualsJsonPath` - matches if a string field sorts after or equal to value at a given mapping path\n* `Condition.stringGreaterThanEquals` - matches if a string field sorts after or equal to a given value\n* `Condition.numberEquals` - matches if a numeric field has the given value\n* `Condition.numberEqualsJsonPath` - matches if a numeric field has the value in a given mapping path\n* `Condition.numberLessThan` - matches if a numeric field is less than the given value\n* `Condition.numberLessThanJsonPath` - matches if a numeric field is less than the value at the given mapping path\n* `Condition.numberLessThanEquals` - matches if a numeric field is less than or equal to the given value\n* `Condition.numberLessThanEqualsJsonPath` - matches if a numeric field is less than or equal to the numeric value at given mapping path\n* `Condition.numberGreaterThan` - matches if a numeric field is greater than the given value\n* `Condition.numberGreaterThanJsonPath` - matches if a numeric field is greater than the value at a given mapping path\n* `Condition.numberGreaterThanEquals` - matches if a numeric field is greater than or equal to the given value\n* `Condition.numberGreaterThanEqualsJsonPath` - matches if a numeric field is greater than or equal to the value at a given mapping path\n* `Condition.timestampEquals` - matches if a timestamp field is the same time as the given timestamp\n* `Condition.timestampEqualsJsonPath` - matches if a timestamp field is the same time as the timestamp at a given mapping path\n* `Condition.timestampLessThan` - matches if a timestamp field is before the given timestamp\n* `Condition.timestampLessThanJsonPath` - matches if a timestamp field is before the timestamp at a given mapping path\n* `Condition.timestampLessThanEquals` - matches if a timestamp field is before or equal to the given timestamp\n* `Condition.timestampLessThanEqualsJsonPath` - matches if a timestamp field is before or equal to the timestamp at a given mapping path\n* `Condition.timestampGreaterThan` - matches if a timestamp field is after the timestamp at a given mapping path\n* `Condition.timestampGreaterThanJsonPath` - matches if a timestamp field is after the timestamp at a given mapping path\n* `Condition.timestampGreaterThanEquals` - matches if a timestamp field is after or equal to the given timestamp\n* `Condition.timestampGreaterThanEqualsJsonPath` - matches if a timestamp field is after or equal to the timestamp at a given mapping path\n* `Condition.stringMatches` - matches if a field matches a string pattern that can contain a wild card (\\*) e.g: log-\\*.txt or \\*LATEST\\*. No other characters other than \"\\*\" have any special meaning - \\* can be escaped: \\\\\\\\*\n\n### Parallel\n\nA `Parallel` state executes one or more subworkflows in parallel. It can also\nbe used to catch and recover from errors in subworkflows.\n\n```ts\nconst parallel = new sfn.Parallel(this, 'Do the work in parallel');\n\n// Add branches to be executed in parallel\nparallel.branch(shipItem);\nparallel.branch(sendInvoice);\nparallel.branch(restock);\n\n// Retry the whole workflow if something goes wrong\nparallel.addRetry({ maxAttempts: 1 });\n\n// How to recover from errors\nparallel.addCatch(sendFailureNotification);\n\n// What to do in case everything succeeded\nparallel.next(closeOrder);\n```\n\n### Succeed\n\nReaching a `Succeed` state terminates the state machine execution with a\nsuccessful status.\n\n```ts\nconst success = new sfn.Succeed(this, 'We did it!');\n```\n\n### Fail\n\nReaching a `Fail` state terminates the state machine execution with a\nfailure status. The fail state should report the reason for the failure.\nFailures can be caught by encompassing `Parallel` states.\n\n```ts\nconst success = new sfn.Fail(this, 'Fail', {\n error: 'WorkflowFailure',\n cause: \"Something went wrong\"\n});\n```\n\n### Map\n\nA `Map` state can be used to run a set of steps for each element of an input array.\nA `Map` state will execute the same steps for multiple entries of an array in the state input.\n\nWhile the `Parallel` state executes multiple branches of steps using the same input, a `Map` state will\nexecute the same steps for multiple entries of an array in the state input.\n\n```ts\nconst map = new sfn.Map(this, 'Map State', {\n maxConcurrency: 1,\n itemsPath: sfn.JsonPath.stringAt('$.inputForMap')\n});\nmap.iterator(new sfn.Pass(this, 'Pass State'));\n```\n\n### Custom State\n\nIt's possible that the high-level constructs for the states or `stepfunctions-tasks` do not have\nthe states or service integrations you are looking for. The primary reasons for this lack of\nfunctionality are:\n\n* A [service integration](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-service-integrations.html) is available through Amazon States Langauge, but not available as construct\n classes in the CDK.\n* The state or state properties are available through Step Functions, but are not configurable\n through constructs\n\nIf a feature is not available, a `CustomState` can be used to supply any Amazon States Language\nJSON-based object as the state definition.\n\n[Code Snippets](https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-code-snippet.html#tutorial-code-snippet-1) are available and can be plugged in as the state definition.\n\nCustom states can be chained together with any of the other states to create your state machine\ndefinition. You will also need to provide any permissions that are required to the `role` that\nthe State Machine uses.\n\nThe following example uses the `DynamoDB` service integration to insert data into a DynamoDB table.\n\n```ts\nimport { aws_dynamodb as ddb } from 'aws-cdk-lib';\nimport * as cdk from 'aws-cdk-lib';\nimport { aws_stepfunctions as sfn } from 'aws-cdk-lib';\n\n// create a table\nconst table = new ddb.Table(this, 'montable', {\n partitionKey: {\n name: 'id',\n type: ddb.AttributeType.STRING,\n },\n});\n\nconst finalStatus = new sfn.Pass(stack, 'final step');\n\n// States language JSON to put an item into DynamoDB\n// snippet generated from https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-code-snippet.html#tutorial-code-snippet-1\nconst stateJson = {\n Type: 'Task',\n Resource: 'arn:aws:states:::dynamodb:putItem',\n Parameters: {\n TableName: table.tableName,\n Item: {\n id: {\n S: 'MyEntry',\n },\n },\n },\n ResultPath: null,\n};\n\n// custom state which represents a task to insert data into DynamoDB\nconst custom = new sfn.CustomState(this, 'my custom task', {\n stateJson,\n});\n\nconst chain = sfn.Chain.start(custom)\n .next(finalStatus);\n\nconst sm = new sfn.StateMachine(this, 'StateMachine', {\n definition: chain,\n timeout: cdk.Duration.seconds(30),\n});\n\n// don't forget permissions. You need to assign them\ntable.grantWriteData(sm);\n```\n\n## Task Chaining\n\nTo make defining work flows as convenient (and readable in a top-to-bottom way)\nas writing regular programs, it is possible to chain most methods invocations.\nIn particular, the `.next()` method can be repeated. The result of a series of\n`.next()` calls is called a **Chain**, and can be used when defining the jump\ntargets of `Choice.on` or `Parallel.branch`:\n\n```ts\nconst definition = step1\n .next(step2)\n .next(choice\n .when(condition1, step3.next(step4).next(step5))\n .otherwise(step6)\n .afterwards())\n .next(parallel\n .branch(step7.next(step8))\n .branch(step9.next(step10)))\n .next(finish);\n\nnew sfn.StateMachine(this, 'StateMachine', {\n definition,\n});\n```\n\nIf you don't like the visual look of starting a chain directly off the first\nstep, you can use `Chain.start`:\n\n```ts\nconst definition = sfn.Chain\n .start(step1)\n .next(step2)\n .next(step3)\n // ...\n```\n\n## State Machine Fragments\n\nIt is possible to define reusable (or abstracted) mini-state machines by\ndefining a construct that implements `IChainable`, which requires you to define\ntwo fields:\n\n* `startState: State`, representing the entry point into this state machine.\n* `endStates: INextable[]`, representing the (one or more) states that outgoing\n transitions will be added to if you chain onto the fragment.\n\nSince states will be named after their construct IDs, you may need to prefix the\nIDs of states if you plan to instantiate the same state machine fragment\nmultiples times (otherwise all states in every instantiation would have the same\nname).\n\nThe class `StateMachineFragment` contains some helper functions (like\n`prefixStates()`) to make it easier for you to do this. If you define your state\nmachine as a subclass of this, it will be convenient to use:\n\n```ts\ninterface MyJobProps {\n jobFlavor: string;\n}\n\nclass MyJob extends sfn.StateMachineFragment {\n public readonly startState: sfn.State;\n public readonly endStates: sfn.INextable[];\n\n constructor(parent: cdk.Construct, id: string, props: MyJobProps) {\n super(parent, id);\n\n const first = new sfn.Task(this, 'First', { ... });\n // ...\n const last = new sfn.Task(this, 'Last', { ... });\n\n this.startState = first;\n this.endStates = [last];\n }\n}\n\n// Do 3 different variants of MyJob in parallel\nnew sfn.Parallel(this, 'All jobs')\n .branch(new MyJob(this, 'Quick', { jobFlavor: 'quick' }).prefixStates())\n .branch(new MyJob(this, 'Medium', { jobFlavor: 'medium' }).prefixStates())\n .branch(new MyJob(this, 'Slow', { jobFlavor: 'slow' }).prefixStates());\n```\n\nA few utility functions are available to parse state machine fragments.\n\n* `State.findReachableStates`: Retrieve the list of states reachable from a given state.\n* `State.findReachableEndStates`: Retrieve the list of end or terminal states reachable from a given state.\n\n## Activity\n\n**Activities** represent work that is done on some non-Lambda worker pool. The\nStep Functions workflow will submit work to this Activity, and a worker pool\nthat you run yourself, probably on EC2, will pull jobs from the Activity and\nsubmit the results of individual jobs back.\n\nYou need the ARN to do so, so if you use Activities be sure to pass the Activity\nARN into your worker pool:\n\n```ts\nconst activity = new sfn.Activity(this, 'Activity');\n\n// Read this CloudFormation Output from your application and use it to poll for work on\n// the activity.\nnew cdk.CfnOutput(this, 'ActivityArn', { value: activity.activityArn });\n```\n\n### Activity-Level Permissions\n\nGranting IAM permissions to an activity can be achieved by calling the `grant(principal, actions)` API:\n\n```ts\nconst activity = new sfn.Activity(this, 'Activity');\n\nconst role = new iam.Role(stack, 'Role', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n});\n\nactivity.grant(role, 'states:SendTaskSuccess');\n```\n\nThis will grant the IAM principal the specified actions onto the activity.\n\n## Metrics\n\n`Task` object expose various metrics on the execution of that particular task. For example,\nto create an alarm on a particular task failing:\n\n```ts\nnew cloudwatch.Alarm(this, 'TaskAlarm', {\n metric: task.metricFailed(),\n threshold: 1,\n evaluationPeriods: 1,\n});\n```\n\nThere are also metrics on the complete state machine:\n\n```ts\nnew cloudwatch.Alarm(this, 'StateMachineAlarm', {\n metric: stateMachine.metricFailed(),\n threshold: 1,\n evaluationPeriods: 1,\n});\n```\n\nAnd there are metrics on the capacity of all state machines in your account:\n\n```ts\nnew cloudwatch.Alarm(this, 'ThrottledAlarm', {\n metric: StateTransitionMetrics.metricThrottledEvents(),\n threshold: 10,\n evaluationPeriods: 2,\n});\n```\n\n## Error names\n\nStep Functions identifies errors in the Amazon States Language using case-sensitive strings, known as error names. \nThe Amazon States Language defines a set of built-in strings that name well-known errors, all beginning with the `States.` prefix. \n\n* `States.ALL` - A wildcard that matches any known error name.\n* `States.Runtime` - An execution failed due to some exception that could not be processed. Often these are caused by errors at runtime, such as attempting to apply InputPath or OutputPath on a null JSON payload. A `States.Runtime` error is not retriable, and will always cause the execution to fail. A retry or catch on `States.ALL` will NOT catch States.Runtime errors.\n* `States.DataLimitExceeded` - A States.DataLimitExceeded exception will be thrown for the following:\n * When the output of a connector is larger than payload size quota.\n * When the output of a state is larger than payload size quota.\n * When, after Parameters processing, the input of a state is larger than the payload size quota.\n * See [the AWS documentation](https://docs.aws.amazon.com/step-functions/latest/dg/limits-overview.html) to learn more about AWS Step Functions Quotas.\n* `States.HeartbeatTimeout` - A Task state failed to send a heartbeat for a period longer than the HeartbeatSeconds value.\n* `States.Timeout` - A Task state either ran longer than the TimeoutSeconds value, or failed to send a heartbeat for a period longer than the HeartbeatSeconds value.\n* `States.TaskFailed`- A Task state failed during the execution. When used in a retry or catch, `States.TaskFailed` acts as a wildcard that matches any known error name except for `States.Timeout`.\n\n## Logging\n\nEnable logging to CloudWatch by passing a logging configuration with a\ndestination LogGroup:\n\n```ts\nconst logGroup = new logs.LogGroup(stack, 'MyLogGroup');\n\nnew sfn.StateMachine(stack, 'MyStateMachine', {\n definition: sfn.Chain.start(new sfn.Pass(stack, 'Pass')),\n logs: {\n destination: logGroup,\n level: sfn.LogLevel.ALL,\n }\n});\n```\n\n## X-Ray tracing\n\nEnable X-Ray tracing for StateMachine:\n\n```ts\nnew sfn.StateMachine(stack, 'MyStateMachine', {\n definition: sfn.Chain.start(new sfn.Pass(stack, 'Pass')),\n tracingEnabled: true\n});\n```\n\nSee [the AWS documentation](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-xray-tracing.html)\nto learn more about AWS Step Functions's X-Ray support.\n\n## State Machine Permission Grants\n\nIAM roles, users, or groups which need to be able to work with a State Machine should be granted IAM permissions.\n\nAny object that implements the `IGrantable` interface (has an associated principal) can be granted permissions by calling:\n\n* `stateMachine.grantStartExecution(principal)` - grants the principal the ability to execute the state machine\n* `stateMachine.grantRead(principal)` - grants the principal read access\n* `stateMachine.grantTaskResponse(principal)` - grants the principal the ability to send task tokens to the state machine\n* `stateMachine.grantExecution(principal, actions)` - grants the principal execution-level permissions for the IAM actions specified\n* `stateMachine.grant(principal, actions)` - grants the principal state-machine-level permissions for the IAM actions specified\n\n### Start Execution Permission\n\nGrant permission to start an execution of a state machine by calling the `grantStartExecution()` API.\n\n```ts\nconst role = new iam.Role(stack, 'Role', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n});\n\nconst stateMachine = new stepfunction.StateMachine(stack, 'StateMachine', {\n definition,\n});\n\n// Give role permission to start execution of state machine\nstateMachine.grantStartExecution(role);\n```\n\nThe following permission is provided to a service principal by the `grantStartExecution()` API:\n\n* `states:StartExecution` - to state machine\n\n### Read Permissions\n\nGrant `read` access to a state machine by calling the `grantRead()` API.\n\n```ts\nconst role = new iam.Role(stack, 'Role', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n});\n\nconst stateMachine = new stepfunction.StateMachine(stack, 'StateMachine', {\n definition,\n});\n\n// Give role read access to state machine\nstateMachine.grantRead(role);\n```\n\nThe following read permissions are provided to a service principal by the `grantRead()` API:\n\n* `states:ListExecutions` - to state machine\n* `states:ListStateMachines` - to state machine\n* `states:DescribeExecution` - to executions\n* `states:DescribeStateMachineForExecution` - to executions\n* `states:GetExecutionHistory` - to executions\n* `states:ListActivities` - to `*`\n* `states:DescribeStateMachine` - to `*`\n* `states:DescribeActivity` - to `*`\n\n### Task Response Permissions\n\nGrant permission to allow task responses to a state machine by calling the `grantTaskResponse()` API:\n\n```ts\nconst role = new iam.Role(stack, 'Role', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n});\n\nconst stateMachine = new stepfunction.StateMachine(stack, 'StateMachine', {\n definition,\n});\n\n// Give role task response permissions to the state machine\nstateMachine.grantTaskResponse(role);\n```\n\nThe following read permissions are provided to a service principal by the `grantRead()` API:\n\n* `states:SendTaskSuccess` - to state machine\n* `states:SendTaskFailure` - to state machine\n* `states:SendTaskHeartbeat` - to state machine\n\n### Execution-level Permissions\n\nGrant execution-level permissions to a state machine by calling the `grantExecution()` API:\n\n```ts\nconst role = new iam.Role(stack, 'Role', {\n assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'),\n});\n\nconst stateMachine = new stepfunction.StateMachine(stack, 'StateMachine', {\n definition,\n});\n\n// Give role permission to get execution history of ALL executions for the state machine\nstateMachine.grantExecution(role, 'states:GetExecutionHistory');\n```\n\n### Custom Permissions\n\nYou can add any set of permissions to a state machine by calling the `grant()` API.\n\n```ts\nconst user = new iam.User(stack, 'MyUser');\n\nconst stateMachine = new stepfunction.StateMachine(stack, 'StateMachine', {\n definition,\n});\n\n//give user permission to send task success to the state machine\nstateMachine.grant(user, 'states:SendTaskSuccess');\n```\n\n## Import\n\nAny Step Functions state machine that has been created outside the stack can be imported\ninto your CDK stack.\n\nState machines can be imported by their ARN via the `StateMachine.fromStateMachineArn()` API\n\n```ts\nimport * as sfn from 'aws-stepfunctions';\n\nconst stack = new Stack(app, 'MyStack');\nsfn.StateMachine.fromStateMachineArn(\n stack,\n 'ImportedStateMachine',\n 'arn:aws:states:us-east-1:123456789012:stateMachine:StateMachine2E01A3A5-N5TJppzoevKQ');\n```\n"
|
|
3328
|
-
},
|
|
3329
2384
|
"targets": {
|
|
3330
2385
|
"dotnet": {
|
|
3331
2386
|
"namespace": "Amazon.CDK.AWS.StepFunctions"
|
|
@@ -3339,13 +2394,6 @@
|
|
|
3339
2394
|
}
|
|
3340
2395
|
},
|
|
3341
2396
|
"aws-cdk-lib.aws_stepfunctions_tasks": {
|
|
3342
|
-
"locationInModule": {
|
|
3343
|
-
"filename": "lib/index.ts",
|
|
3344
|
-
"line": 182
|
|
3345
|
-
},
|
|
3346
|
-
"readme": {
|
|
3347
|
-
"markdown": "# Tasks for AWS Step Functions\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n[AWS Step Functions](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html) is a web service that enables you to coordinate the\ncomponents of distributed applications and microservices using visual workflows.\nYou build applications from individual components that each perform a discrete\nfunction, or task, allowing you to scale and change applications quickly.\n\nA [Task](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-task-state.html) state represents a single unit of work performed by a state machine.\nAll work in your state machine is performed by tasks.\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Table Of Contents\n\n- [Tasks for AWS Step Functions](#tasks-for-aws-step-functions)\n - [Table Of Contents](#table-of-contents)\n - [Task](#task)\n - [Paths](#paths)\n - [InputPath](#inputpath)\n - [OutputPath](#outputpath)\n - [ResultPath](#resultpath)\n - [Task parameters from the state JSON](#task-parameters-from-the-state-json)\n - [Evaluate Expression](#evaluate-expression)\n - [API Gateway](#api-gateway)\n - [Call REST API Endpoint](#call-rest-api-endpoint)\n - [Call HTTP API Endpoint](#call-http-api-endpoint)\n - [Athena](#athena)\n - [StartQueryExecution](#startqueryexecution)\n - [GetQueryExecution](#getqueryexecution)\n - [GetQueryResults](#getqueryresults)\n - [StopQueryExecution](#stopqueryexecution)\n - [Batch](#batch)\n - [SubmitJob](#submitjob)\n - [CodeBuild](#codebuild)\n - [StartBuild](#startbuild)\n - [DynamoDB](#dynamodb)\n - [GetItem](#getitem)\n - [PutItem](#putitem)\n - [DeleteItem](#deleteitem)\n - [UpdateItem](#updateitem)\n - [ECS](#ecs)\n - [RunTask](#runtask)\n - [EC2](#ec2)\n - [Fargate](#fargate)\n - [EMR](#emr)\n - [Create Cluster](#create-cluster)\n - [Termination Protection](#termination-protection)\n - [Terminate Cluster](#terminate-cluster)\n - [Add Step](#add-step)\n - [Cancel Step](#cancel-step)\n - [Modify Instance Fleet](#modify-instance-fleet)\n - [Modify Instance Group](#modify-instance-group)\n - [EKS](#eks)\n - [Call](#call)\n - [EventBridge](#eventbridge)\n - [Put Events](#put-events)\n - [Glue](#glue)\n - [Glue DataBrew](#glue-databrew)\n - [Lambda](#lambda)\n - [SageMaker](#sagemaker)\n - [Create Training Job](#create-training-job)\n - [Create Transform Job](#create-transform-job)\n - [Create Endpoint](#create-endpoint)\n - [Create Endpoint Config](#create-endpoint-config)\n - [Create Model](#create-model)\n - [Update Endpoint](#update-endpoint)\n - [SNS](#sns)\n - [Step Functions](#step-functions)\n - [Start Execution](#start-execution)\n - [Invoke Activity](#invoke-activity)\n - [SQS](#sqs)\n\n## Task\n\nA Task state represents a single unit of work performed by a state machine. In the\nCDK, the exact work to be done is determined by a class that implements `IStepFunctionsTask`.\n\nAWS Step Functions [integrates](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-service-integrations.html) with some AWS services so that you can call API\nactions, and coordinate executions directly from the Amazon States Language in\nStep Functions. You can directly call and pass parameters to the APIs of those\nservices.\n\n## Paths\n\nIn the Amazon States Language, a [path](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-paths.html) is a string beginning with `$` that you\ncan use to identify components within JSON text.\n\nLearn more about input and output processing in Step Functions [here](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-input-output-filtering.html)\n\n### InputPath\n\nBoth `InputPath` and `Parameters` fields provide a way to manipulate JSON as it\nmoves through your workflow. AWS Step Functions applies the `InputPath` field first,\nand then the `Parameters` field. You can first filter your raw input to a selection\nyou want using InputPath, and then apply Parameters to manipulate that input\nfurther, or add new values. If you don't specify an `InputPath`, a default value\nof `$` will be used.\n\nThe following example provides the field named `input` as the input to the `Task`\nstate that runs a Lambda function.\n\n```ts\nconst submitJob = new tasks.LambdaInvoke(this, 'Invoke Handler', {\n lambdaFunction: fn,\n inputPath: '$.input'\n});\n```\n\n### OutputPath\n\nTasks also allow you to select a portion of the state output to pass to the next\nstate. This enables you to filter out unwanted information, and pass only the\nportion of the JSON that you care about. If you don't specify an `OutputPath`,\na default value of `$` will be used. This passes the entire JSON node to the next\nstate.\n\nThe [response](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_ResponseSyntax) from a Lambda function includes the response from the function\nas well as other metadata.\n\nThe following example assigns the output from the Task to a field named `result`\n\n```ts\nconst submitJob = new tasks.LambdaInvoke(this, 'Invoke Handler', {\n lambdaFunction: fn,\n outputPath: '$.Payload.result'\n});\n```\n\n### ResultSelector\n\nYou can use [`ResultSelector`](https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-resultselector)\nto manipulate the raw result of a Task, Map or Parallel state before it is\npassed to [`ResultPath`](###ResultPath). For service integrations, the raw\nresult contains metadata in addition to the response payload. You can use\nResultSelector to construct a JSON payload that becomes the effective result\nusing static values or references to the raw result or context object.\n\nThe following example extracts the output payload of a Lambda function Task and combines\nit with some static values and the state name from the context object.\n\n```ts\nnew tasks.LambdaInvoke(this, 'Invoke Handler', {\n lambdaFunction: fn,\n resultSelector: {\n lambdaOutput: sfn.JsonPath.stringAt('$.Payload'),\n invokeRequestId: sfn.JsonPath.stringAt('$.SdkResponseMetadata.RequestId'),\n staticValue: {\n foo: 'bar',\n },\n stateName: sfn.JsonPath.stringAt('$$.State.Name'),\n },\n})\n```\n\n### ResultPath\n\nThe output of a state can be a copy of its input, the result it produces (for\nexample, output from a Task state’s Lambda function), or a combination of its\ninput and result. Use [`ResultPath`](https://docs.aws.amazon.com/step-functions/latest/dg/input-output-resultpath.html) to control which combination of these is\npassed to the state output. If you don't specify an `ResultPath`, a default\nvalue of `$` will be used.\n\nThe following example adds the item from calling DynamoDB's `getItem` API to the state\ninput and passes it to the next state.\n\n```ts\nnew tasks.DynamoPutItem(this, 'PutItem', {\n item: {\n MessageId: tasks.DynamoAttributeValue.fromString('message-id')\n },\n table: myTable,\n resultPath: `$.Item`,\n});\n```\n\n⚠️ The `OutputPath` is computed after applying `ResultPath`. All service integrations\nreturn metadata as part of their response. When using `ResultPath`, it's not possible to\nmerge a subset of the task output to the input.\n\n## Task parameters from the state JSON\n\nMost tasks take parameters. Parameter values can either be static, supplied directly\nin the workflow definition (by specifying their values), or a value available at runtime\nin the state machine's execution (either as its input or an output of a prior state).\nParameter values available at runtime can be specified via the `JsonPath` class,\nusing methods such as `JsonPath.stringAt()`.\n\nThe following example provides the field named `input` as the input to the Lambda function\nand invokes it asynchronously.\n\n```ts\nconst submitJob = new tasks.LambdaInvoke(this, 'Invoke Handler', {\n lambdaFunction: fn,\n payload: sfn.TaskInput.fromJsonPathAt('$.input'),\n invocationType: tasks.LambdaInvocationType.EVENT,\n});\n```\n\nYou can also use [intrinsic functions](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-intrinsic-functions.html) with `JsonPath.stringAt()`. \nHere is an example of starting an Athena query that is dynamically created using the task input:\n\n```ts\nconst startQueryExecutionJob = new tasks.AthenaStartQueryExecution(this, 'Athena Start Query', {\n queryString: sfn.JsonPath.stringAt(\"States.Format('select contacts where year={};', $.year)\"),\n queryExecutionContext: {\n databaseName: 'interactions',\n },\n resultConfiguration: {\n encryptionConfiguration: {\n encryptionOption: tasks.EncryptionOption.S3_MANAGED,\n },\n outputLocation: {\n bucketName: 'mybucket',\n objectKey: 'myprefix',\n },\n },\n integrationPattern: sfn.IntegrationPattern.RUN_JOB,\n});\n```\n\nEach service integration has its own set of parameters that can be supplied.\n\n## Evaluate Expression\n\nUse the `EvaluateExpression` to perform simple operations referencing state paths. The\n`expression` referenced in the task will be evaluated in a Lambda function\n(`eval()`). This allows you to not have to write Lambda code for simple operations.\n\nExample: convert a wait time from milliseconds to seconds, concat this in a message and wait:\n\n```ts\nconst convertToSeconds = new tasks.EvaluateExpression(this, 'Convert to seconds', {\n expression: '$.waitMilliseconds / 1000',\n resultPath: '$.waitSeconds',\n});\n\nconst createMessage = new tasks.EvaluateExpression(this, 'Create message', {\n // Note: this is a string inside a string.\n expression: '`Now waiting ${$.waitSeconds} seconds...`',\n runtime: lambda.Runtime.NODEJS_14_X,\n resultPath: '$.message',\n});\n\nconst publishMessage = new tasks.SnsPublish(this, 'Publish message', {\n topic: new sns.Topic(this, 'cool-topic'),\n message: sfn.TaskInput.fromJsonPathAt('$.message'),\n resultPath: '$.sns',\n});\n\nconst wait = new sfn.Wait(this, 'Wait', {\n time: sfn.WaitTime.secondsPath('$.waitSeconds')\n});\n\nnew sfn.StateMachine(this, 'StateMachine', {\n definition: convertToSeconds\n .next(createMessage)\n .next(publishMessage)\n .next(wait)\n});\n```\n\nThe `EvaluateExpression` supports a `runtime` prop to specify the Lambda\nruntime to use to evaluate the expression. Currently, only runtimes\nof the Node.js family are supported.\n\n## API Gateway\n\nStep Functions supports [API Gateway](https://docs.aws.amazon.com/step-functions/latest/dg/connect-api-gateway.html) through the service integration pattern.\n\nHTTP APIs are designed for low-latency, cost-effective integrations with AWS services, including AWS Lambda, and HTTP endpoints.\nHTTP APIs support OIDC and OAuth 2.0 authorization, and come with built-in support for CORS and automatic deployments.\nPrevious-generation REST APIs currently offer more features. More details can be found [here](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-vs-rest.html).\n\n### Call REST API Endpoint\n\nThe `CallApiGatewayRestApiEndpoint` calls the REST API endpoint.\n\n```ts\nimport { aws_stepfunctions as sfn } from 'aws-cdk-lib';\nimport * as tasks from `@aws-cdk/aws-stepfunctions-tasks`;\n\nconst restApi = new apigateway.RestApi(stack, 'MyRestApi');\n\nconst invokeTask = new tasks.CallApiGatewayRestApiEndpoint(stack, 'Call REST API', {\n api: restApi,\n stageName: 'prod',\n method: HttpMethod.GET,\n});\n```\n\n### Call HTTP API Endpoint\n\nThe `CallApiGatewayHttpApiEndpoint` calls the HTTP API endpoint.\n\n```ts\nimport { aws_stepfunctions as sfn } from 'aws-cdk-lib';\nimport * as tasks from `@aws-cdk/aws-stepfunctions-tasks`;\n\nconst httpApi = new apigatewayv2.HttpApi(stack, 'MyHttpApi');\n\nconst invokeTask = new tasks.CallApiGatewayHttpApiEndpoint(stack, 'Call HTTP API', {\n apiId: httpApi.apiId,\n apiStack: cdk.Stack.of(httpApi),\n method: HttpMethod.GET,\n});\n```\n\n## Athena\n\nStep Functions supports [Athena](https://docs.aws.amazon.com/step-functions/latest/dg/connect-athena.html) through the service integration pattern.\n\n### StartQueryExecution\n\nThe [StartQueryExecution](https://docs.aws.amazon.com/athena/latest/APIReference/API_StartQueryExecution.html) API runs the SQL query statement.\n\n```ts\nconst startQueryExecutionJob = new tasks.AthenaStartQueryExecution(this, 'Start Athena Query', {\n queryString: sfn.JsonPath.stringAt('$.queryString'),\n queryExecutionContext: {\n databaseName: 'mydatabase',\n },\n resultConfiguration: {\n encryptionConfiguration: {\n encryptionOption: tasks.EncryptionOption.S3_MANAGED,\n },\n outputLocation: {\n bucketName: 'query-results-bucket',\n objectKey: 'folder',\n },\n },\n});\n```\n\n### GetQueryExecution\n\nThe [GetQueryExecution](https://docs.aws.amazon.com/athena/latest/APIReference/API_GetQueryExecution.html) API gets information about a single execution of a query.\n\n```ts\nconst getQueryExecutionJob = new tasks.AthenaGetQueryExecution(this, 'Get Query Execution', {\n queryExecutionId: sfn.JsonPath.stringAt('$.QueryExecutionId'),\n});\n```\n\n### GetQueryResults\n\nThe [GetQueryResults](https://docs.aws.amazon.com/athena/latest/APIReference/API_GetQueryResults.html) API that streams the results of a single query execution specified by QueryExecutionId from S3.\n\n```ts\nconst getQueryResultsJob = new tasks.AthenaGetQueryResults(this, 'Get Query Results', {\n queryExecutionId: sfn.JsonPath.stringAt('$.QueryExecutionId'),\n});\n```\n\n### StopQueryExecution\n\nThe [StopQueryExecution](https://docs.aws.amazon.com/athena/latest/APIReference/API_StopQueryExecution.html) API that stops a query execution.\n\n```ts\nconst stopQueryExecutionJob = new tasks.AthenaStopQueryExecution(this, 'Stop Query Execution', {\n queryExecutionId: sfn.JsonPath.stringAt('$.QueryExecutionId'),\n});\n```\n\n## Batch\n\nStep Functions supports [Batch](https://docs.aws.amazon.com/step-functions/latest/dg/connect-batch.html) through the service integration pattern.\n\n### SubmitJob\n\nThe [SubmitJob](https://docs.aws.amazon.com/batch/latest/APIReference/API_SubmitJob.html) API submits an AWS Batch job from a job definition.\n\n```ts fixture=with-batch-job\nconst task = new tasks.BatchSubmitJob(this, 'Submit Job', {\n jobDefinitionArn: batchJobDefinitionArn,\n jobName: 'MyJob',\n jobQueueArn: batchQueueArn,\n});\n```\n\n## CodeBuild\n\nStep Functions supports [CodeBuild](https://docs.aws.amazon.com/step-functions/latest/dg/connect-codebuild.html) through the service integration pattern.\n\n### StartBuild\n\n[StartBuild](https://docs.aws.amazon.com/codebuild/latest/APIReference/API_StartBuild.html) starts a CodeBuild Project by Project Name.\n\n```ts\nimport { aws_codebuild as codebuild } from 'aws-cdk-lib';\n\nconst codebuildProject = new codebuild.Project(this, 'Project', {\n projectName: 'MyTestProject',\n buildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n phases: {\n build: {\n commands: [\n 'echo \"Hello, CodeBuild!\"',\n ],\n },\n },\n }),\n});\n\nconst task = new tasks.CodeBuildStartBuild(this, 'Task', {\n project: codebuildProject,\n integrationPattern: sfn.IntegrationPattern.RUN_JOB,\n environmentVariablesOverride: {\n ZONE: {\n type: codebuild.BuildEnvironmentVariableType.PLAINTEXT,\n value: sfn.JsonPath.stringAt('$.envVariables.zone'),\n },\n },\n});\n```\n\n## DynamoDB\n\nYou can call DynamoDB APIs from a `Task` state.\nRead more about calling DynamoDB APIs [here](https://docs.aws.amazon.com/step-functions/latest/dg/connect-ddb.html)\n\n### GetItem\n\nThe [GetItem](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_GetItem.html) operation returns a set of attributes for the item with the given primary key.\n\n```ts\nnew tasks.DynamoGetItem(this, 'Get Item', {\n key: { messageId: tasks.DynamoAttributeValue.fromString('message-007') },\n table: myTable,\n});\n```\n\n### PutItem\n\nThe [PutItem](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html) operation creates a new item, or replaces an old item with a new item.\n\n```ts\nnew tasks.DynamoPutItem(this, 'PutItem', {\n item: {\n MessageId: tasks.DynamoAttributeValue.fromString('message-007'),\n Text: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.bar')),\n TotalCount: tasks.DynamoAttributeValue.fromNumber(10),\n },\n table: myTable,\n});\n```\n\n### DeleteItem\n\nThe [DeleteItem](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html) operation deletes a single item in a table by primary key.\n\n```ts\nnew tasks.DynamoDeleteItem(this, 'DeleteItem', {\n key: { MessageId: tasks.DynamoAttributeValue.fromString('message-007') },\n table: myTable,\n resultPath: sfn.JsonPath.DISCARD,\n});\n```\n\n### UpdateItem\n\nThe [UpdateItem](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html) operation edits an existing item's attributes, or adds a new item\nto the table if it does not already exist.\n\n```ts\nnew tasks.DynamoUpdateItem(this, 'UpdateItem', {\n key: {\n MessageId: tasks.DynamoAttributeValue.fromString('message-007')\n },\n table: myTable,\n expressionAttributeValues: {\n ':val': tasks.DynamoAttributeValue.numberFromString(sfn.JsonPath.stringAt('$.Item.TotalCount.N')),\n ':rand': tasks.DynamoAttributeValue.fromNumber(20),\n },\n updateExpression: 'SET TotalCount = :val + :rand',\n});\n```\n\n## ECS\n\nStep Functions supports [ECS/Fargate](https://docs.aws.amazon.com/step-functions/latest/dg/connect-ecs.html) through the service integration pattern.\n\n### RunTask\n\n[RunTask](https://docs.aws.amazon.com/step-functions/latest/dg/connect-ecs.html) starts a new task using the specified task definition.\n\n#### EC2\n\nThe EC2 launch type allows you to run your containerized applications on a cluster\nof Amazon EC2 instances that you manage.\n\nWhen a task that uses the EC2 launch type is launched, Amazon ECS must determine where\nto place the task based on the requirements specified in the task definition, such as\nCPU and memory. Similarly, when you scale down the task count, Amazon ECS must determine\nwhich tasks to terminate. You can apply task placement strategies and constraints to\ncustomize how Amazon ECS places and terminates tasks. Learn more about [task placement](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement.html)\n\nThe latest ACTIVE revision of the passed task definition is used for running the task.\n\nThe following example runs a job from a task definition on EC2\n\n```ts\nimport { aws_ecs as ecs } from 'aws-cdk-lib';\n\nconst vpc = ec2.Vpc.fromLookup(this, 'Vpc', {\n isDefault: true,\n});\n\nconst cluster = new ecs.Cluster(this, 'Ec2Cluster', { vpc });\ncluster.addCapacity('DefaultAutoScalingGroup', {\n instanceType: new ec2.InstanceType('t2.micro'),\n vpcSubnets: { subnetType: ec2.SubnetType.PUBLIC },\n});\n\nconst taskDefinition = new ecs.TaskDefinition(this, 'TD', {\n compatibility: ecs.Compatibility.EC2,\n});\n\ntaskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('foo/bar'),\n memoryLimitMiB: 256,\n});\n\nconst runTask = new tasks.EcsRunTask(this, 'Run', {\n integrationPattern: sfn.IntegrationPattern.RUN_JOB,\n cluster,\n taskDefinition,\n launchTarget: new tasks.EcsEc2LaunchTarget({\n placementStrategies: [\n ecs.PlacementStrategy.spreadAcrossInstances(),\n ecs.PlacementStrategy.packedByCpu(),\n ecs.PlacementStrategy.randomly(),\n ],\n placementConstraints: [\n ecs.PlacementConstraint.memberOf('blieptuut')\n ],\n }),\n });\n```\n\n#### Fargate\n\nAWS Fargate is a serverless compute engine for containers that works with Amazon\nElastic Container Service (ECS). Fargate makes it easy for you to focus on building\nyour applications. Fargate removes the need to provision and manage servers, lets you\nspecify and pay for resources per application, and improves security through application\nisolation by design. Learn more about [Fargate](https://aws.amazon.com/fargate/)\n\nThe Fargate launch type allows you to run your containerized applications without the need\nto provision and manage the backend infrastructure. Just register your task definition and\nFargate launches the container for you. The latest ACTIVE revision of the passed\ntask definition is used for running the task. Learn more about\n[Fargate Versioning](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTaskDefinition.html)\n\nThe following example runs a job from a task definition on Fargate\n\n```ts\nimport { aws_ecs as ecs } from 'aws-cdk-lib';\n\nconst vpc = ec2.Vpc.fromLookup(this, 'Vpc', {\n isDefault: true,\n});\n\nconst cluster = new ecs.Cluster(this, 'FargateCluster', { vpc });\n\nconst taskDefinition = new ecs.TaskDefinition(this, 'TD', {\n memoryMiB: '512',\n cpu: '256',\n compatibility: ecs.Compatibility.FARGATE,\n});\n\nconst containerDefinition = taskDefinition.addContainer('TheContainer', {\n image: ecs.ContainerImage.fromRegistry('foo/bar'),\n memoryLimitMiB: 256,\n});\n\nconst runTask = new tasks.EcsRunTask(this, 'RunFargate', {\n integrationPattern: sfn.IntegrationPattern.RUN_JOB,\n cluster,\n taskDefinition,\n assignPublicIp: true,\n containerOverrides: [{\n containerDefinition,\n environment: [{ name: 'SOME_KEY', value: sfn.JsonPath.stringAt('$.SomeKey') }],\n }],\n launchTarget: new tasks.EcsFargateLaunchTarget(),\n});\n```\n\n## EMR\n\nStep Functions supports Amazon EMR through the service integration pattern.\nThe service integration APIs correspond to Amazon EMR APIs but differ in the\nparameters that are used.\n\n[Read more](https://docs.aws.amazon.com/step-functions/latest/dg/connect-emr.html) about the differences when using these service integrations.\n\n### Create Cluster\n\nCreates and starts running a cluster (job flow).\nCorresponds to the [`runJobFlow`](https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html) API in EMR.\n\n```ts\n\nconst clusterRole = new iam.Role(this, 'ClusterRole', {\n assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'),\n});\n\nconst serviceRole = new iam.Role(this, 'ServiceRole', {\n assumedBy: new iam.ServicePrincipal('elasticmapreduce.amazonaws.com'),\n});\n\nconst autoScalingRole = new iam.Role(this, 'AutoScalingRole', {\n assumedBy: new iam.ServicePrincipal('elasticmapreduce.amazonaws.com'),\n});\n\nautoScalingRole.assumeRolePolicy?.addStatements(\n new iam.PolicyStatement({\n effect: iam.Effect.ALLOW,\n principals: [\n new iam.ServicePrincipal('application-autoscaling.amazonaws.com'),\n ],\n actions: [\n 'sts:AssumeRole',\n ],\n }));\n)\n\nnew tasks.EmrCreateCluster(this, 'Create Cluster', {\n instances: {},\n clusterRole,\n name: sfn.TaskInput.fromJsonPathAt('$.ClusterName').value,\n serviceRole,\n autoScalingRole,\n});\n```\n\n### Termination Protection\n\nLocks a cluster (job flow) so the EC2 instances in the cluster cannot be\nterminated by user intervention, an API call, or a job-flow error.\n\nCorresponds to the [`setTerminationProtection`](https://docs.aws.amazon.com/step-functions/latest/dg/connect-emr.html) API in EMR.\n\n```ts\nnew tasks.EmrSetClusterTerminationProtection(this, 'Task', {\n clusterId: 'ClusterId',\n terminationProtected: false,\n});\n```\n\n### Terminate Cluster\n\nShuts down a cluster (job flow).\nCorresponds to the [`terminateJobFlows`](https://docs.aws.amazon.com/emr/latest/APIReference/API_TerminateJobFlows.html) API in EMR.\n\n```ts\nnew tasks.EmrTerminateCluster(this, 'Task', {\n clusterId: 'ClusterId'\n});\n```\n\n### Add Step\n\nAdds a new step to a running cluster.\nCorresponds to the [`addJobFlowSteps`](https://docs.aws.amazon.com/emr/latest/APIReference/API_AddJobFlowSteps.html) API in EMR.\n\n```ts\nnew tasks.EmrAddStep(this, 'Task', {\n clusterId: 'ClusterId',\n name: 'StepName',\n jar: 'Jar',\n actionOnFailure: tasks.ActionOnFailure.CONTINUE,\n});\n```\n\n### Cancel Step\n\nCancels a pending step in a running cluster.\nCorresponds to the [`cancelSteps`](https://docs.aws.amazon.com/emr/latest/APIReference/API_CancelSteps.html) API in EMR.\n\n```ts\nnew tasks.EmrCancelStep(this, 'Task', {\n clusterId: 'ClusterId',\n stepId: 'StepId',\n});\n```\n\n### Modify Instance Fleet\n\nModifies the target On-Demand and target Spot capacities for the instance\nfleet with the specified InstanceFleetName.\n\nCorresponds to the [`modifyInstanceFleet`](https://docs.aws.amazon.com/emr/latest/APIReference/API_ModifyInstanceFleet.html) API in EMR.\n\n```ts\nnew tasks.EmrModifyInstanceFleetByName(this, 'Task', {\n clusterId: 'ClusterId',\n instanceFleetName: 'InstanceFleetName',\n targetOnDemandCapacity: 2,\n targetSpotCapacity: 0,\n});\n```\n\n### Modify Instance Group\n\nModifies the number of nodes and configuration settings of an instance group.\n\nCorresponds to the [`modifyInstanceGroups`](https://docs.aws.amazon.com/emr/latest/APIReference/API_ModifyInstanceGroups.html) API in EMR.\n\n```ts\nnew tasks.EmrModifyInstanceGroupByName(this, 'Task', {\n clusterId: 'ClusterId',\n instanceGroupName: sfn.JsonPath.stringAt('$.InstanceGroupName'),\n instanceGroup: {\n instanceCount: 1,\n },\n});\n```\n\n## EKS\n\nStep Functions supports Amazon EKS through the service integration pattern.\nThe service integration APIs correspond to Amazon EKS APIs.\n\n[Read more](https://docs.aws.amazon.com/step-functions/latest/dg/connect-eks.html) about the differences when using these service integrations.\n\n### Call\n\nRead and write Kubernetes resource objects via a Kubernetes API endpoint.\nCorresponds to the [`call`](https://docs.aws.amazon.com/step-functions/latest/dg/connect-eks.html) API in Step Functions Connector.\n\nThe following code snippet includes a Task state that uses eks:call to list the pods.\n\n```ts\nimport { aws_eks as eks } from 'aws-cdk-lib';\nimport { aws_stepfunctions as sfn } from 'aws-cdk-lib';\nimport { aws_stepfunctions_tasks as tasks } from 'aws-cdk-lib';\n\nconst myEksCluster = new eks.Cluster(this, 'my sample cluster', {\n version: eks.KubernetesVersion.V1_18,\n clusterName: 'myEksCluster',\n });\n\nnew tasks.EksCall(stack, 'Call a EKS Endpoint', {\n cluster: myEksCluster,\n httpMethod: MethodType.GET,\n httpPath: '/api/v1/namespaces/default/pods',\n});\n```\n\n## EventBridge\n\nStep Functions supports Amazon EventBridge through the service integration pattern.\nThe service integration APIs correspond to Amazon EventBridge APIs.\n\n[Read more](https://docs.aws.amazon.com/step-functions/latest/dg/connect-eventbridge.html) about the differences when using these service integrations.\n\n### Put Events\n\nSend events to an EventBridge bus.\nCorresponds to the [`put-events`](https://docs.aws.amazon.com/step-functions/latest/dg/connect-eventbridge.html) API in Step Functions Connector.\n\nThe following code snippet includes a Task state that uses events:putevents to send an event to the default bus.\n\n```ts\nimport { aws_events as events } from 'aws-cdk-lib';\nimport { aws_stepfunctions as sfn } from 'aws-cdk-lib';\nimport { aws_stepfunctions_tasks as tasks } from 'aws-cdk-lib';\n\nconst myEventBus = events.EventBus(stack, 'EventBus', {\n eventBusName: 'MyEventBus1',\n});\n\nnew tasks.EventBridgePutEvents(stack, 'Send an event to EventBridge', {\n entries: [{\n detail: sfn.TaskInput.fromObject({\n Message: 'Hello from Step Functions!',\n }),\n eventBus: myEventBus,\n detailType: 'MessageFromStepFunctions',\n source: 'step.functions',\n }],\n});\n```\n\n## Glue\n\nStep Functions supports [AWS Glue](https://docs.aws.amazon.com/step-functions/latest/dg/connect-glue.html) through the service integration pattern.\n\nYou can call the [`StartJobRun`](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-jobs-runs.html#aws-glue-api-jobs-runs-StartJobRun) API from a `Task` state.\n\n```ts\nnew tasks.GlueStartJobRun(this, 'Task', {\n glueJobName: 'my-glue-job',\n arguments: sfn.TaskInput.fromObject({\n key: 'value',\n }),\n timeout: cdk.Duration.minutes(30),\n notifyDelayAfter: cdk.Duration.minutes(5),\n});\n```\n\n## Glue DataBrew\n\nStep Functions supports [AWS Glue DataBrew](https://docs.aws.amazon.com/step-functions/latest/dg/connect-databrew.html) through the service integration pattern.\n\nYou can call the [`StartJobRun`](https://docs.aws.amazon.com/databrew/latest/dg/API_StartJobRun.html) API from a `Task` state.\n\n```ts\nnew tasks.GlueDataBrewStartJobRun(this, 'Task', {\n name: 'databrew-job',\n});\n```\n\n## Lambda\n\n[Invoke](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) a Lambda function.\n\nYou can specify the input to your Lambda function through the `payload` attribute.\nBy default, Step Functions invokes Lambda function with the state input (JSON path '$')\nas the input.\n\nThe following snippet invokes a Lambda Function with the state input as the payload\nby referencing the `$` path.\n\n```ts\nnew tasks.LambdaInvoke(this, 'Invoke with state input', {\n lambdaFunction: fn,\n});\n```\n\nWhen a function is invoked, the Lambda service sends [these response\nelements](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_ResponseElements)\nback.\n\n⚠️ The response from the Lambda function is in an attribute called `Payload`\n\nThe following snippet invokes a Lambda Function by referencing the `$.Payload` path\nto reference the output of a Lambda executed before it.\n\n```ts\nnew tasks.LambdaInvoke(this, 'Invoke with empty object as payload', {\n lambdaFunction: fn,\n payload: sfn.TaskInput.fromObject({}),\n});\n\n// use the output of fn as input\nnew tasks.LambdaInvoke(this, 'Invoke with payload field in the state input', {\n lambdaFunction: fn,\n payload: sfn.TaskInput.fromJsonPathAt('$.Payload'),\n});\n```\n\nThe following snippet invokes a Lambda and sets the task output to only include\nthe Lambda function response.\n\n```ts\nnew tasks.LambdaInvoke(this, 'Invoke and set function response as task output', {\n lambdaFunction: fn,\n outputPath: '$.Payload',\n});\n```\n\nIf you want to combine the input and the Lambda function response you can use\nthe `payloadResponseOnly` property and specify the `resultPath`. This will put the\nLambda function ARN directly in the \"Resource\" string, but it conflicts with the\nintegrationPattern, invocationType, clientContext, and qualifier properties.\n\n```ts\nnew tasks.LambdaInvoke(this, 'Invoke and combine function response with task input', {\n lambdaFunction: fn,\n payloadResponseOnly: true,\n resultPath: '$.fn',\n});\n```\n\nYou can have Step Functions pause a task, and wait for an external process to\nreturn a task token. Read more about the [callback pattern](https://docs.aws.amazon.com/step-functions/latest/dg/callback-task-sample-sqs.html#call-back-lambda-example)\n\nTo use the callback pattern, set the `token` property on the task. Call the Step\nFunctions `SendTaskSuccess` or `SendTaskFailure` APIs with the token to\nindicate that the task has completed and the state machine should resume execution.\n\nThe following snippet invokes a Lambda with the task token as part of the input\nto the Lambda.\n\n```ts\nnew tasks.LambdaInvoke(this, 'Invoke with callback', {\n lambdaFunction: fn,\n integrationPattern: sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n payload: sfn.TaskInput.fromObject({\n token: sfn.JsonPath.taskToken,\n input: sfn.JsonPath.stringAt('$.someField'),\n }),\n});\n```\n\n⚠️ The task will pause until it receives that task token back with a `SendTaskSuccess` or `SendTaskFailure`\ncall. Learn more about [Callback with the Task\nToken](https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token).\n\nAWS Lambda can occasionally experience transient service errors. In this case, invoking Lambda\nresults in a 500 error, such as `ServiceException`, `AWSLambdaException`, or `SdkClientException`.\nAs a best practice, the `LambdaInvoke` task will retry on those errors with an interval of 2 seconds,\na back-off rate of 2 and 6 maximum attempts. Set the `retryOnServiceExceptions` prop to `false` to\ndisable this behavior.\n\n## SageMaker\n\nStep Functions supports [AWS SageMaker](https://docs.aws.amazon.com/step-functions/latest/dg/connect-sagemaker.html) through the service integration pattern.\n\n### Create Training Job\n\nYou can call the [`CreateTrainingJob`](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTrainingJob.html) API from a `Task` state.\n\n```ts\nnew tasks.SageMakerCreateTrainingJob(this, 'TrainSagemaker', {\n trainingJobName: sfn.JsonPath.stringAt('$.JobName'),\n algorithmSpecification: {\n algorithmName: 'BlazingText',\n trainingInputMode: tasks.InputMode.FILE,\n },\n inputDataConfig: [{\n channelName: 'train',\n dataSource: {\n s3DataSource: {\n s3DataType: tasks.S3DataType.S3_PREFIX,\n s3Location: tasks.S3Location.fromJsonExpression('$.S3Bucket'),\n },\n },\n }],\n outputDataConfig: {\n s3OutputLocation: tasks.S3Location.fromBucket(s3.Bucket.fromBucketName(this, 'Bucket', 'mybucket'), 'myoutputpath'),\n },\n resourceConfig: {\n instanceCount: 1,\n instanceType: new ec2.InstanceType(JsonPath.stringAt('$.InstanceType')),\n volumeSize: cdk.Size.gibibytes(50),\n }, // optional: default is 1 instance of EC2 `M4.XLarge` with `10GB` volume\n stoppingCondition: {\n maxRuntime: cdk.Duration.hours(2),\n }, // optional: default is 1 hour\n});\n```\n\n### Create Transform Job\n\nYou can call the [`CreateTransformJob`](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTransformJob.html) API from a `Task` state.\n\n```ts\nnew tasks.SageMakerCreateTransformJob(this, 'Batch Inference', {\n transformJobName: 'MyTransformJob',\n modelName: 'MyModelName',\n modelClientOptions: {\n invocationsMaxRetries: 3, // default is 0\n invocationsTimeout: cdk.Duration.minutes(5), // default is 60 seconds\n },\n transformInput: {\n transformDataSource: {\n s3DataSource: {\n s3Uri: 's3://inputbucket/train',\n s3DataType: tasks.S3DataType.S3_PREFIX,\n }\n }\n },\n transformOutput: {\n s3OutputPath: 's3://outputbucket/TransformJobOutputPath',\n },\n transformResources: {\n instanceCount: 1,\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.M4, ec2.InstanceSize.XLARGE),\n }\n});\n\n```\n\n### Create Endpoint\n\nYou can call the [`CreateEndpoint`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateEndpoint.html) API from a `Task` state.\n\n```ts\nnew tasks.SageMakerCreateEndpoint(this, 'SagemakerEndpoint', {\n endpointName: sfn.JsonPath.stringAt('$.EndpointName'),\n endpointConfigName: sfn.JsonPath.stringAt('$.EndpointConfigName'),\n});\n```\n\n### Create Endpoint Config\n\nYou can call the [`CreateEndpointConfig`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateEndpointConfig.html) API from a `Task` state.\n\n```ts\nnew tasks.SageMakerCreateEndpointConfig(this, 'SagemakerEndpointConfig', {\n endpointConfigName: 'MyEndpointConfig',\n productionVariants: [{\n initialInstanceCount: 2,\n instanceType: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.XLARGE),\n modelName: 'MyModel',\n variantName: 'awesome-variant',\n }],\n});\n```\n\n### Create Model\n\nYou can call the [`CreateModel`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateModel.html) API from a `Task` state.\n\n```ts\nnew tasks.SageMakerCreateModel(this, 'Sagemaker', {\n modelName: 'MyModel',\n primaryContainer: new tasks.ContainerDefinition({\n image: tasks.DockerImage.fromJsonExpression(sfn.JsonPath.stringAt('$.Model.imageName')),\n mode: tasks.Mode.SINGLE_MODEL,\n modelS3Location: tasks.S3Location.fromJsonExpression('$.TrainingJob.ModelArtifacts.S3ModelArtifacts'),\n }),\n});\n```\n\n### Update Endpoint\n\nYou can call the [`UpdateEndpoint`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_UpdateEndpoint.html) API from a `Task` state.\n\n```ts\nnew tasks.SageMakerUpdateEndpoint(this, 'SagemakerEndpoint', {\n endpointName: sfn.JsonPath.stringAt('$.Endpoint.Name'),\n endpointConfigName: sfn.JsonPath.stringAt('$.Endpoint.EndpointConfig'),\n });\n```\n\n## SNS\n\nStep Functions supports [Amazon SNS](https://docs.aws.amazon.com/step-functions/latest/dg/connect-sns.html) through the service integration pattern.\n\nYou can call the [`Publish`](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) API from a `Task` state to publish to an SNS topic.\n\n```ts\nconst topic = new sns.Topic(this, 'Topic');\n\n// Use a field from the execution data as message.\nconst task1 = new tasks.SnsPublish(this, 'Publish1', {\n topic,\n integrationPattern: sfn.IntegrationPattern.REQUEST_RESPONSE,\n message: sfn.TaskInput.fromDataAt('$.state.message'),\n messageAttributes: {\n place: {\n value: sfn.JsonPath.stringAt('$.place'),\n },\n pic: {\n // BINARY must be explicitly set\n type: MessageAttributeDataType.BINARY,\n value: sfn.JsonPath.stringAt('$.pic'),\n },\n people: {\n value: 4,\n },\n handles: {\n value: ['@kslater', '@jjf', null, '@mfanning'],\n },\n\n});\n\n// Combine a field from the execution data with\n// a literal object.\nconst task2 = new tasks.SnsPublish(this, 'Publish2', {\n topic,\n message: sfn.TaskInput.fromObject({\n field1: 'somedata',\n field2: sfn.JsonPath.stringAt('$.field2'),\n })\n});\n```\n\n## Step Functions\n\n### Start Execution\n\nYou can manage [AWS Step Functions](https://docs.aws.amazon.com/step-functions/latest/dg/connect-stepfunctions.html) executions.\n\nAWS Step Functions supports it's own [`StartExecution`](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) API as a service integration.\n\n```ts\n// Define a state machine with one Pass state\nconst child = new sfn.StateMachine(this, 'ChildStateMachine', {\n definition: sfn.Chain.start(new sfn.Pass(this, 'PassState')),\n});\n\n// Include the state machine in a Task state with callback pattern\nconst task = new tasks.StepFunctionsStartExecution(this, 'ChildTask', {\n stateMachine: child,\n integrationPattern: sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n input: sfn.TaskInput.fromObject({\n token: sfn.JsonPath.taskToken,\n foo: 'bar'\n }),\n name: 'MyExecutionName'\n});\n\n// Define a second state machine with the Task state above\nnew sfn.StateMachine(this, 'ParentStateMachine', {\n definition: task\n});\n```\n\n### Invoke Activity\n\nYou can invoke a [Step Functions Activity](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-activities.html) which enables you to have\na task in your state machine where the work is performed by a *worker* that can\nbe hosted on Amazon EC2, Amazon ECS, AWS Lambda, basically anywhere. Activities\nare a way to associate code running somewhere (known as an activity worker) with\na specific task in a state machine.\n\nWhen Step Functions reaches an activity task state, the workflow waits for an\nactivity worker to poll for a task. An activity worker polls Step Functions by\nusing GetActivityTask, and sending the ARN for the related activity.\n\nAfter the activity worker completes its work, it can provide a report of its\nsuccess or failure by using `SendTaskSuccess` or `SendTaskFailure`. These two\ncalls use the taskToken provided by GetActivityTask to associate the result\nwith that task.\n\nThe following example creates an activity and creates a task that invokes the activity.\n\n```ts\nconst submitJobActivity = new sfn.Activity(this, 'SubmitJob');\n\nnew tasks.StepFunctionsInvokeActivity(this, 'Submit Job', {\n activity: submitJobActivity,\n});\n```\n\n## SQS\n\nStep Functions supports [Amazon SQS](https://docs.aws.amazon.com/step-functions/latest/dg/connect-sqs.html)\n\nYou can call the [`SendMessage`](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html) API from a `Task` state\nto send a message to an SQS queue.\n\n```ts\nconst queue = new sqs.Queue(this, 'Queue');\n\n// Use a field from the execution data as message.\nconst task1 = new tasks.SqsSendMessage(this, 'Send1', {\n queue,\n messageBody: sfn.TaskInput.fromJsonPathAt('$.message'),\n});\n\n// Combine a field from the execution data with\n// a literal object.\nconst task2 = new tasks.SqsSendMessage(this, 'Send2', {\n queue,\n messageBody: sfn.TaskInput.fromObject({\n field1: 'somedata',\n field2: sfn.JsonPath.stringAt('$.field2'),\n }),\n});\n```\n"
|
|
3348
|
-
},
|
|
3349
2397
|
"targets": {
|
|
3350
2398
|
"dotnet": {
|
|
3351
2399
|
"namespace": "Amazon.CDK.AWS.StepFunctions.Tasks"
|
|
@@ -3359,10 +2407,6 @@
|
|
|
3359
2407
|
}
|
|
3360
2408
|
},
|
|
3361
2409
|
"aws-cdk-lib.aws_synthetics": {
|
|
3362
|
-
"locationInModule": {
|
|
3363
|
-
"filename": "lib/index.ts",
|
|
3364
|
-
"line": 183
|
|
3365
|
-
},
|
|
3366
2410
|
"targets": {
|
|
3367
2411
|
"dotnet": {
|
|
3368
2412
|
"namespace": "Amazon.CDK.AWS.Synthetics"
|
|
@@ -3376,10 +2420,6 @@
|
|
|
3376
2420
|
}
|
|
3377
2421
|
},
|
|
3378
2422
|
"aws-cdk-lib.aws_timestream": {
|
|
3379
|
-
"locationInModule": {
|
|
3380
|
-
"filename": "lib/index.ts",
|
|
3381
|
-
"line": 184
|
|
3382
|
-
},
|
|
3383
2423
|
"targets": {
|
|
3384
2424
|
"dotnet": {
|
|
3385
2425
|
"namespace": "Amazon.CDK.AWS.Timestream"
|
|
@@ -3393,10 +2433,6 @@
|
|
|
3393
2433
|
}
|
|
3394
2434
|
},
|
|
3395
2435
|
"aws-cdk-lib.aws_transfer": {
|
|
3396
|
-
"locationInModule": {
|
|
3397
|
-
"filename": "lib/index.ts",
|
|
3398
|
-
"line": 185
|
|
3399
|
-
},
|
|
3400
2436
|
"targets": {
|
|
3401
2437
|
"dotnet": {
|
|
3402
2438
|
"namespace": "Amazon.CDK.AWS.Transfer"
|
|
@@ -3410,10 +2446,6 @@
|
|
|
3410
2446
|
}
|
|
3411
2447
|
},
|
|
3412
2448
|
"aws-cdk-lib.aws_waf": {
|
|
3413
|
-
"locationInModule": {
|
|
3414
|
-
"filename": "lib/index.ts",
|
|
3415
|
-
"line": 186
|
|
3416
|
-
},
|
|
3417
2449
|
"targets": {
|
|
3418
2450
|
"dotnet": {
|
|
3419
2451
|
"namespace": "Amazon.CDK.AWS.WAF"
|
|
@@ -3427,10 +2459,6 @@
|
|
|
3427
2459
|
}
|
|
3428
2460
|
},
|
|
3429
2461
|
"aws-cdk-lib.aws_wafregional": {
|
|
3430
|
-
"locationInModule": {
|
|
3431
|
-
"filename": "lib/index.ts",
|
|
3432
|
-
"line": 187
|
|
3433
|
-
},
|
|
3434
2462
|
"targets": {
|
|
3435
2463
|
"dotnet": {
|
|
3436
2464
|
"namespace": "Amazon.CDK.AWS.WAFRegional"
|
|
@@ -3444,10 +2472,6 @@
|
|
|
3444
2472
|
}
|
|
3445
2473
|
},
|
|
3446
2474
|
"aws-cdk-lib.aws_wafv2": {
|
|
3447
|
-
"locationInModule": {
|
|
3448
|
-
"filename": "lib/index.ts",
|
|
3449
|
-
"line": 188
|
|
3450
|
-
},
|
|
3451
2475
|
"targets": {
|
|
3452
2476
|
"dotnet": {
|
|
3453
2477
|
"namespace": "Amazon.CDK.AWS.WAFv2"
|
|
@@ -3461,10 +2485,6 @@
|
|
|
3461
2485
|
}
|
|
3462
2486
|
},
|
|
3463
2487
|
"aws-cdk-lib.aws_workspaces": {
|
|
3464
|
-
"locationInModule": {
|
|
3465
|
-
"filename": "lib/index.ts",
|
|
3466
|
-
"line": 189
|
|
3467
|
-
},
|
|
3468
2488
|
"targets": {
|
|
3469
2489
|
"dotnet": {
|
|
3470
2490
|
"namespace": "Amazon.CDK.AWS.WorkSpaces"
|
|
@@ -3478,10 +2498,6 @@
|
|
|
3478
2498
|
}
|
|
3479
2499
|
},
|
|
3480
2500
|
"aws-cdk-lib.aws_xray": {
|
|
3481
|
-
"locationInModule": {
|
|
3482
|
-
"filename": "lib/index.ts",
|
|
3483
|
-
"line": 190
|
|
3484
|
-
},
|
|
3485
2501
|
"targets": {
|
|
3486
2502
|
"dotnet": {
|
|
3487
2503
|
"namespace": "Amazon.CDK.AWS.XRay"
|
|
@@ -3495,13 +2511,6 @@
|
|
|
3495
2511
|
}
|
|
3496
2512
|
},
|
|
3497
2513
|
"aws-cdk-lib.cloud_assembly_schema": {
|
|
3498
|
-
"locationInModule": {
|
|
3499
|
-
"filename": "lib/index.ts",
|
|
3500
|
-
"line": 191
|
|
3501
|
-
},
|
|
3502
|
-
"readme": {
|
|
3503
|
-
"markdown": "# Cloud Assembly Schema\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n## Cloud Assembly\n\nThe *Cloud Assembly* is the output of the synthesis operation. It is produced as part of the\n[`cdk synth`](https://github.com/aws/aws-cdk/tree/master/packages/aws-cdk#cdk-synthesize)\ncommand, or the [`app.synth()`](https://github.com/aws/aws-cdk/blob/master/packages/@aws-cdk/core/lib/app.ts#L135) method invocation.\n\nIts essentially a set of files and directories, one of which is the `manifest.json` file. It defines the set of instructions that are\nneeded in order to deploy the assembly directory.\n\n> For example, when `cdk deploy` is executed, the CLI reads this file and performs its instructions:\n>\n> - Build container images.\n> - Upload assets.\n> - Deploy CloudFormation templates.\n\nTherefore, the assembly is how the CDK class library and CDK CLI (or any other consumer) communicate. To ensure compatibility\nbetween the assembly and its consumers, we treat the manifest file as a well defined, versioned schema.\n\n## Schema\n\nThis module contains the typescript structs that comprise the `manifest.json` file, as well as the\ngenerated [*json-schema*](./schema/cloud-assembly.schema.json).\n\n## Versioning\n\nThe schema version is specified in the [`cloud-assembly.version.json`](./schema/cloud-assembly.schema.json) file, under the `version` property.\nIt follows semantic versioning, but with a small twist.\n\nWhen we add instructions to the assembly, they are reflected in the manifest file and the *json-schema* accordingly.\nEvery such instruction, is crucial for ensuring the correct deployment behavior. This means that to properly deploy a cloud assembly,\nconsumers must be aware of every such instruction modification.\n\nFor this reason, every change to the schema, even though it might not strictly break validation of the *json-schema* format,\nis considered `major` version bump.\n\n## How to consume\n\nIf you'd like to consume the [schema file](./schema/cloud-assembly.schema.json) in order to do validations on `manifest.json` files, \nsimply download it from this repo and run it against standard *json-schema* validators, such as [jsonschema](https://www.npmjs.com/package/jsonschema).\n\nConsumers must take into account the `major` version of the schema they are consuming. They should reject cloud assemblies \nwith a `major` version that is higher than what they expect. While schema validation might pass on such assemblies, the deployment integrity \ncannot be guaranteed because some instructions will be ignored.\n\n> For example, if your consumer was built when the schema version was 2.0.0, you should reject deploying cloud assemblies with a \n> manifest version of 3.0.0. \n\n## Contributing\n\nSee [Contribution Guide](./CONTRIBUTING.md)\n"
|
|
3504
|
-
},
|
|
3505
2514
|
"targets": {
|
|
3506
2515
|
"dotnet": {
|
|
3507
2516
|
"namespace": "Amazon.CDK.CloudAssembly.Schema"
|
|
@@ -3515,13 +2524,6 @@
|
|
|
3515
2524
|
}
|
|
3516
2525
|
},
|
|
3517
2526
|
"aws-cdk-lib.cloudformation_include": {
|
|
3518
|
-
"locationInModule": {
|
|
3519
|
-
"filename": "lib/index.ts",
|
|
3520
|
-
"line": 192
|
|
3521
|
-
},
|
|
3522
|
-
"readme": {
|
|
3523
|
-
"markdown": "# Include CloudFormation templates in the CDK\n\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module contains a set of classes whose goal is to facilitate working\nwith existing CloudFormation templates in the CDK.\nIt can be thought of as an extension of the capabilities of the\n[`CfnInclude` class](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_core.CfnInclude.html).\n\n## Basic usage\n\nAssume we have a file with an existing template.\nIt could be in JSON format, in a file `my-template.json`:\n\n```json\n{\n \"Resources\": {\n \"Bucket\": {\n \"Type\": \"AWS::S3::Bucket\",\n \"Properties\": {\n \"BucketName\": \"some-bucket-name\"\n }\n }\n }\n}\n```\n\nOr it could by in YAML format, in a file `my-template.yaml`:\n\n```yaml\nResources:\n Bucket:\n Type: AWS::S3::Bucket\n Properties:\n BucketName: some-bucket-name\n```\n\nIt can be included in a CDK application with the following code:\n\n```ts\nimport { cloudformation_include as cfn_inc } from 'aws-cdk-lib';\n\nconst cfnTemplate = new cfn_inc.CfnInclude(this, 'Template', {\n templateFile: 'my-template.json',\n});\n```\n\nOr, if your template uses YAML:\n\n```ts\nconst cfnTemplate = new cfn_inc.CfnInclude(this, 'Template', {\n templateFile: 'my-template.yaml',\n});\n```\n\n**Note**: different YAML parsers sometimes don't agree on what exactly constitutes valid YAML.\nIf you get a YAML exception when including your template,\ntry converting it to JSON, and including that file instead.\nIf you're downloading your template from the CloudFormation AWS Console,\nyou can easily get it in JSON format by clicking the 'View in Designer'\nbutton on the 'Template' tab -\nonce in Designer, select JSON in the \"Choose template language\"\nradio buttons on the bottom pane.\n\nThis will add all resources from `my-template.json` / `my-template.yaml` into the CDK application,\npreserving their original logical IDs from the template file.\n\nNote that this including process will _not_ execute any\n[CloudFormation transforms](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html) -\nincluding the [Serverless transform](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html).\n\nAny resource from the included template can be retrieved by referring to it by its logical ID from the template.\nIf you know the class of the CDK object that corresponds to that resource,\nyou can cast the returned object to the correct type:\n\n```ts\nimport { aws_s3 as s3 } from 'aws-cdk-lib';\n\nconst cfnBucket = cfnTemplate.getResource('Bucket') as s3.CfnBucket;\n// cfnBucket is of type s3.CfnBucket\n```\n\nNote that any resources not present in the latest version of the CloudFormation schema\nat the time of publishing the version of this module that you depend on,\nincluding [Custom Resources](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html),\nwill be returned as instances of the class `CfnResource`,\nand so cannot be cast to a different resource type.\n\nAny modifications made to that resource will be reflected in the resulting CDK template;\nfor example, the name of the bucket can be changed:\n\n```ts\ncfnBucket.bucketName = 'my-bucket-name';\n```\n\nYou can also refer to the resource when defining other constructs,\nincluding the higher-level ones\n(those whose name does not start with `Cfn`),\nfor example:\n\n```ts\nimport { aws_iam as iam } from 'aws-cdk-lib';\n\nconst role = new iam.Role(this, 'Role', {\n assumedBy: new iam.AnyPrincipal(),\n});\nrole.addToPolicy(new iam.PolicyStatement({\n actions: ['s3:*'],\n resources: [cfnBucket.attrArn],\n}));\n```\n\n### Converting L1 resources to L2\n\nThe resources the `getResource` method returns are what the CDK calls\n[Layer 1 resources](https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_cfn)\n(like `CfnBucket`).\nHowever, in many places in the Construct Library,\nthe CDK requires so-called Layer 2 resources, like `IBucket`.\nThere are two ways of going from an L1 to an L2 resource.\n\n#### Using`fromCfn*()` methods\n\nThis is the preferred method of converting an L1 resource to an L2.\nIt works by invoking a static method of the class of the L2 resource\nwhose name starts with `fromCfn` -\nfor example, for KMS Keys, that would be the `Kms.fromCfnKey()` method -\nand passing the L1 instance as an argument:\n\n```ts\nimport { aws_kms as kms } from 'aws-cdk-lib';\n\nconst cfnKey = cfnTemplate.getResource('Key') as kms.CfnKey;\nconst key = kms.Key.fromCfnKey(cfnKey);\n```\n\nThis returns an instance of the `kms.IKey` type that can be passed anywhere in the CDK an `IKey` is expected.\nWhat is more, that `IKey` instance will be mutable -\nwhich means calling any mutating methods on it,\nlike `addToResourcePolicy()`,\nwill be reflected in the resulting template.\n\nNote that, in some cases, the `fromCfn*()` method might not be able to create an L2 from the underlying L1.\nThis can happen when the underlying L1 heavily uses CloudFormation functions.\nFor example, if you tried to create an L2 `IKey`\nfrom an L1 represented as this CloudFormation template:\n\n```json\n{\n \"Resources\": {\n \"Key\": {\n \"Type\": \"AWS::KMS::Key\",\n \"Properties\": {\n \"KeyPolicy\": {\n \"Statement\": [\n {\n \"Fn::If\": [\n \"Condition\",\n {\n \"Action\": \"kms:if-action\",\n \"Resource\": \"*\",\n \"Principal\": \"*\",\n \"Effect\": \"Allow\"\n },\n {\n \"Action\": \"kms:else-action\",\n \"Resource\": \"*\",\n \"Principal\": \"*\",\n \"Effect\": \"Allow\"\n }\n ]\n }\n ],\n \"Version\": \"2012-10-17\"\n }\n }\n }\n }\n}\n```\n\nThe `Key.fromCfnKey()` method does not know how to translate that into CDK L2 concepts,\nand would throw an exception.\n\nIn those cases, you need the use the second method of converting an L1 to an L2.\n\n#### Using `from*Name/Arn/Attributes()` methods\n\nIf the resource you need does not have a `fromCfn*()` method,\nor if it does, but it throws an exception for your particular L1,\nyou need to use the second method of converting an L1 resource to L2.\n\nEach L2 class has static factory methods with names like `from*Name()`,\n`from*Arn()`, and/or `from*Attributes()`.\nYou can obtain an L2 resource from an L1 by passing the correct properties of the L1 as the arguments to those methods:\n\n```ts\n// using from*Name()\nconst bucket = s3.Bucket.fromBucketName(this, 'L2Bucket', cfnBucket.ref);\n\n// using from*Arn()\nconst key = kms.Key.fromKeyArn(this, 'L2Key', cfnKey.attrArn);\n\n// using from*Attributes()\nconst vpc = ec2.Vpc.fromVpcAttributes(this, 'L2Vpc', {\n vpcId: cfnVpc.ref,\n availabilityZones: cdk.Fn.getAzs(),\n privateSubnetIds: [privateCfnSubnet1.ref, privateCfnSubnet2.ref],\n});\n```\n\nAs long as they just need to be referenced,\nand not changed in any way, everything should work;\nhowever, note that resources returned from those methods,\nunlike those returned by `fromCfn*()` methods,\nare immutable, which means calling any mutating methods on them will have no effect.\nYou will have to mutate the underlying L1 in order to change them.\n\n## Non-resource template elements\n\nIn addition to resources,\nyou can also retrieve and mutate all other template elements:\n\n* [Parameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html):\n\n ```ts\n import * as core from 'aws-cdk-lib';\n\n const param: core.CfnParameter = cfnTemplate.getParameter('MyParameter');\n\n // mutating the parameter\n param.default = 'MyDefault';\n ```\n\n* [Conditions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html):\n\n ```ts\n import * as core from 'aws-cdk-lib';\n\n const condition: core.CfnCondition = cfnTemplate.getCondition('MyCondition');\n\n // mutating the condition\n condition.expression = core.Fn.conditionEquals(1, 2);\n ```\n\n* [Mappings](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html):\n\n ```ts\n import * as core from 'aws-cdk-lib';\n\n const mapping: core.CfnMapping = cfnTemplate.getMapping('MyMapping');\n\n // mutating the mapping\n mapping.setValue('my-region', 'AMI', 'ami-04681a1dbd79675a5');\n ```\n\n* [Service Catalog template Rules](https://docs.aws.amazon.com/servicecatalog/latest/adminguide/reference-template_constraint_rules.html):\n\n ```ts\n import * as core from 'aws-cdk-lib';\n\n const rule: core.CfnRule = cfnTemplate.getRule('MyRule');\n\n // mutating the rule\n rule.addAssertion(core.Fn.conditionContains(['m1.small'], myParameter.value),\n 'MyParameter has to be m1.small');\n ```\n\n* [Outputs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html):\n\n ```ts\n import * as core from 'aws-cdk-lib';\n\n const output: core.CfnOutput = cfnTemplate.getOutput('MyOutput');\n\n // mutating the output\n output.value = cfnBucket.attrArn;\n ```\n\n* [Hooks for blue-green deployments](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/blue-green.html):\n\n ```ts\n import * as core from 'aws-cdk-lib';\n\n const hook: core.CfnHook = cfnTemplate.getHook('MyOutput');\n\n // mutating the hook\n const codeDeployHook = hook as core.CfnCodeDeployBlueGreenHook;\n codeDeployHook.serviceRole = myRole.roleArn;\n ```\n\n## Parameter replacement\n\nIf your existing template uses CloudFormation Parameters,\nyou may want to remove them in favor of build-time values.\nYou can do that using the `parameters` property:\n\n```ts\nnew inc.CfnInclude(this, 'includeTemplate', {\n templateFile: 'path/to/my/template',\n parameters: {\n 'MyParam': 'my-value',\n },\n});\n```\n\nThis will replace all references to `MyParam` with the string `'my-value'`,\nand `MyParam` will be removed from the 'Parameters' section of the resulting template.\n\n## Nested Stacks\n\nThis module also supports templates that use [nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html).\n\nFor example, if you have the following parent template:\n\n```json\n{\n \"Resources\": {\n \"ChildStack\": {\n \"Type\": \"AWS::CloudFormation::Stack\",\n \"Properties\": {\n \"TemplateURL\": \"https://my-s3-template-source.s3.amazonaws.com/child-stack.json\"\n }\n }\n }\n}\n```\n\nwhere the child template pointed to by `https://my-s3-template-source.s3.amazonaws.com/child-stack.json` is:\n\n```json\n{\n \"Resources\": {\n \"MyBucket\": {\n \"Type\": \"AWS::S3::Bucket\"\n }\n }\n}\n```\n\nYou can include both the parent stack,\nand the nested stack in your CDK application as follows:\n\n```ts\nconst parentTemplate = new inc.CfnInclude(this, 'ParentStack', {\n templateFile: 'path/to/my-parent-template.json',\n loadNestedStacks: {\n 'ChildStack': {\n templateFile: 'path/to/my-nested-template.json',\n },\n },\n});\n```\n\nHere, `path/to/my-nested-template.json`\nrepresents the path on disk to the downloaded template file from the original template URL of the nested stack\n(`https://my-s3-template-source.s3.amazonaws.com/child-stack.json`).\nIn the CDK application,\nthis file will be turned into an [Asset](https://docs.aws.amazon.com/cdk/latest/guide/assets.html),\nand the `TemplateURL` property of the nested stack resource\nwill be modified to point to that asset.\n\nThe included nested stack can be accessed with the `getNestedStack` method:\n\n```ts\nconst includedChildStack = parentTemplate.getNestedStack('ChildStack');\nconst childStack: core.NestedStack = includedChildStack.stack;\nconst childTemplate: cfn_inc.CfnInclude = includedChildStack.includedTemplate;\n```\n\nNow you can reference resources from `ChildStack`,\nand modify them like any other included template:\n\n```ts\nconst cfnBucket = childTemplate.getResource('MyBucket') as s3.CfnBucket;\ncfnBucket.bucketName = 'my-new-bucket-name';\n\nconst role = new iam.Role(childStack, 'MyRole', {\n assumedBy: new iam.AccountRootPrincipal(),\n});\n\nrole.addToPolicy(new iam.PolicyStatement({\n actions: [\n 's3:GetObject*',\n 's3:GetBucket*',\n 's3:List*',\n ],\n resources: [cfnBucket.attrArn],\n}));\n```\n\nYou can also include the nested stack after the `CfnInclude` object was created,\ninstead of doing it on construction:\n\n```ts\nconst includedChildStack = parentTemplate.loadNestedStack('ChildTemplate', {\n templateFile: 'path/to/my-nested-template.json',\n});\n```\n\n## Vending CloudFormation templates as Constructs\n\nIn many cases, there are existing CloudFormation templates that are not entire applications,\nbut more like specialized fragments, implementing a particular pattern or best practice.\nIf you have templates like that,\nyou can use the `CfnInclude` class to vend them as CDK Constructs:\n\n```ts\nimport * as path from 'path';\n\nexport class MyConstruct extends Construct {\n constructor(scope: Construct, id: string) {\n super(scope, id);\n\n // include a template inside the Construct\n new cfn_inc.CfnInclude(this, 'MyConstruct', {\n templateFile: path.join(__dirname, 'my-template.json'),\n preserveLogicalIds: false, // <--- !!!\n });\n }\n}\n```\n\nNotice the `preserveLogicalIds` parameter -\nit makes sure the logical IDs of all the included template elements are re-named using CDK's algorithm,\nguaranteeing they are unique within your application.\nWithout that parameter passed,\ninstantiating `MyConstruct` twice in the same Stack would result in duplicated logical IDs.\n"
|
|
3524
|
-
},
|
|
3525
2527
|
"targets": {
|
|
3526
2528
|
"dotnet": {
|
|
3527
2529
|
"namespace": "Amazon.CDK.CloudFormation.Include"
|
|
@@ -3535,13 +2537,6 @@
|
|
|
3535
2537
|
}
|
|
3536
2538
|
},
|
|
3537
2539
|
"aws-cdk-lib.custom_resources": {
|
|
3538
|
-
"locationInModule": {
|
|
3539
|
-
"filename": "lib/index.ts",
|
|
3540
|
-
"line": 194
|
|
3541
|
-
},
|
|
3542
|
-
"readme": {
|
|
3543
|
-
"markdown": "# AWS CDK Custom Resources\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Provider Framework\n\nAWS CloudFormation [custom resources](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-custom-resources.html) are extension points to the provisioning\nengine. When CloudFormation needs to create, update or delete a custom resource,\nit sends a lifecycle event notification to a **custom resource provider**. The provider\nhandles the event (e.g. creates a resource) and sends back a response to CloudFormation.\n\nThe `@aws-cdk/custom-resources.Provider` construct is a \"mini-framework\" for\nimplementing providers for AWS CloudFormation custom resources. The framework offers a high-level API which makes it easier to implement robust\nand powerful custom resources and includes the following capabilities:\n\n* Handles responses to AWS CloudFormation and protects against blocked\n deployments\n* Validates handler return values to help with correct handler implementation\n* Supports asynchronous handlers to enable operations that require a long waiting period for a resource, which can exceed the AWS Lambda timeout\n* Implements default behavior for physical resource IDs.\n\nThe following code shows how the `Provider` construct is used in conjunction\nwith a `CustomResource` and a user-provided AWS Lambda function which implements\nthe actual handler.\n\n```ts\nimport { CustomResource } from 'aws-cdk-lib';\nimport { aws_logs as logs } from 'aws-cdk-lib';\nimport { aws_iam as iam } from 'aws-cdk-lib';\nimport { custom_resources as cr } from 'aws-cdk-lib';\n\nconst onEvent = new lambda.Function(this, 'MyHandler', { /* ... */ });\n\nconst myRole = new iam.Role(this, 'MyRole', { /* ... */ });\n\nconst myProvider = new cr.Provider(this, 'MyProvider', {\n onEventHandler: onEvent,\n isCompleteHandler: isComplete, // optional async \"waiter\"\n logRetention: logs.RetentionDays.ONE_DAY, // default is INFINITE\n role: myRole, // must be assumable by the `lambda.amazonaws.com` service principal\n});\n\nnew CustomResource(this, 'Resource1', { serviceToken: myProvider.serviceToken });\nnew CustomResource(this, 'Resource2', { serviceToken: myProvider.serviceToken });\n```\n\nProviders are implemented through AWS Lambda functions that are triggered by the\nprovider framework in response to lifecycle events.\n\nAt the minimum, users must define the `onEvent` handler, which is invoked by the\nframework for all resource lifecycle events (`Create`, `Update` and `Delete`)\nand returns a result which is then submitted to CloudFormation.\n\nThe following example is a skeleton for a Python implementation of `onEvent`:\n\n```py\ndef on_event(event, context):\n print(event)\n request_type = event['RequestType']\n if request_type == 'Create': return on_create(event)\n if request_type == 'Update': return on_update(event)\n if request_type == 'Delete': return on_delete(event)\n raise Exception(\"Invalid request type: %s\" % request_type)\n\ndef on_create(event):\n props = event[\"ResourceProperties\"]\n print(\"create new resource with props %s\" % props)\n\n # add your create code here...\n physical_id = ...\n\n return { 'PhysicalResourceId': physical_id }\n\ndef on_update(event):\n physical_id = event[\"PhysicalResourceId\"]\n props = event[\"ResourceProperties\"]\n print(\"update resource %s with props %s\" % (physical_id, props))\n # ...\n\ndef on_delete(event):\n physical_id = event[\"PhysicalResourceId\"]\n print(\"delete resource %s\" % physical_id)\n # ...\n```\n\nUsers may also provide an additional handler called `isComplete`, for cases\nwhere the lifecycle operation cannot be completed immediately. The\n`isComplete` handler will be retried asynchronously after `onEvent` until it\nreturns `IsComplete: true`, or until the total provider timeout has expired.\n\nThe following example is a skeleton for a Python implementation of `isComplete`:\n\n```py\ndef is_complete(event, context):\n physical_id = event[\"PhysicalResourceId\"]\n request_type = event[\"RequestType\"]\n\n # check if resource is stable based on request_type\n is_ready = ...\n\n return { 'IsComplete': is_ready }\n```\n\n### Handling Lifecycle Events: onEvent\n\nThe user-defined `onEvent` AWS Lambda function is invoked whenever a resource\nlifecycle event occurs. The function is expected to handle the event and return\na response to the framework that, at least, includes the physical resource ID.\n\nIf `onEvent` returns successfully, the framework will submit a \"SUCCESS\" response\nto AWS CloudFormation for this resource operation. If the provider is\n[asynchronous](#asynchronous-providers-iscomplete) (`isCompleteHandler` is\ndefined), the framework will only submit a response based on the result of\n`isComplete`.\n\nIf `onEvent` throws an error, the framework will submit a \"FAILED\" response to\nAWS CloudFormation.\n\nThe input event includes the following fields derived from the [Custom Resource\nProvider Request]:\n\n|Field|Type|Description\n|-----|----|----------------\n|`RequestType`|String|The type of lifecycle event: `Create`, `Update` or `Delete`.\n|`LogicalResourceId`|String|The template developer-chosen name (logical ID) of the custom resource in the AWS CloudFormation template.\n|`PhysicalResourceId`|String|This field will only be present for `Update` and `Delete` events and includes the value returned in `PhysicalResourceId` of the previous operation.\n|`ResourceProperties`|JSON|This field contains the properties defined in the template for this custom resource.\n|`OldResourceProperties`|JSON|This field will only be present for `Update` events and contains the resource properties that were declared previous to the update request.\n|`ResourceType`|String|The resource type defined for this custom resource in the template. A provider may handle any number of custom resource types.\n|`RequestId`|String|A unique ID for the request.\n|`StackId`|String|The ARN that identifies the stack that contains the custom resource.\n\nThe return value from `onEvent` must be a JSON object with the following fields:\n\n|Field|Type|Required|Description\n|-----|----|--------|-----------\n|`PhysicalResourceId`|String|No|The allocated/assigned physical ID of the resource. If omitted for `Create` events, the event's `RequestId` will be used. For `Update`, the current physical ID will be used. If a different value is returned, CloudFormation will follow with a subsequent `Delete` for the previous ID (resource replacement). For `Delete`, it will always return the current physical resource ID, and if the user returns a different one, an error will occur.\n|`Data`|JSON|No|Resource attributes, which can later be retrieved through `Fn::GetAtt` on the custom resource object.\n|*any*|*any*|No|Any other field included in the response will be passed through to `isComplete`. This can sometimes be useful to pass state between the handlers.\n\n[Custom Resource Provider Request]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/crpg-ref-requests.html#crpg-ref-request-fields\n\n### Asynchronous Providers: isComplete\n\nIt is not uncommon for the provisioning of resources to be an asynchronous\noperation, which means that the operation does not immediately finish, and we\nneed to \"wait\" until the resource stabilizes.\n\nThe provider framework makes it easy to implement \"waiters\" by allowing users to\nspecify an additional AWS Lambda function in `isCompleteHandler`.\n\nThe framework will repeatedly invoke the handler every `queryInterval`. When\n`isComplete` returns with `IsComplete: true`, the framework will submit a\n\"SUCCESS\" response to AWS CloudFormation. If `totalTimeout` expires and the\noperation has not yet completed, the framework will submit a \"FAILED\" response\nwith the message \"Operation timed out\".\n\nIf an error is thrown, the framework will submit a \"FAILED\" response to AWS\nCloudFormation.\n\nThe input event to `isComplete` includes all request fields, combined with all\nfields returned from `onEvent`. If `PhysicalResourceId` has not been explicitly\nreturned from `onEvent`, it's value will be calculated based on the heuristics\ndescribed above.\n\nThe return value must be a JSON object with the following fields:\n\n|Field|Type|Required|Description\n|-----|----|--------|-----------\n|`IsComplete`|Boolean|Yes|Indicates if the operation has finished or not.\n|`Data`|JSON|No|May only be sent if `IsComplete` is `true` and includes additional resource attributes. These attributes will be **merged** with the ones returned from `onEvent`\n\n### Physical Resource IDs\n\nEvery resource in CloudFormation has a physical resource ID. When a resource is\ncreated, the `PhysicalResourceId` returned from the `Create` operation is stored\nby AWS CloudFormation and assigned to the logical ID defined for this resource\nin the template. If a `Create` operation returns without a `PhysicalResourceId`,\nthe framework will use `RequestId` as the default. This is sufficient for\nvarious cases such as \"pseudo-resources\" which only query data.\n\nFor `Update` and `Delete` operations, the resource event will always include the\ncurrent `PhysicalResourceId` of the resource.\n\nWhen an `Update` operation occurs, the default behavior is to return the current\nphysical resource ID. if the `onEvent` returns a `PhysicalResourceId` which is\ndifferent from the current one, AWS CloudFormation will treat this as a\n**resource replacement**, and it will issue a subsequent `Delete` operation for\nthe old resource.\n\nAs a rule of thumb, if your custom resource supports configuring a physical name\n(e.g. you can specify a `BucketName` when you define an `AWS::S3::Bucket`), you\nmust return this name in `PhysicalResourceId` and make sure to handle\nreplacement properly. The `S3File` example demonstrates this\nthrough the `objectKey` property.\n\n### Handling Provider Framework Error\n\nAs mentioned above, if any of the user handlers fail (i.e. throws an exception)\nor times out (due to their AWS Lambda timing out), the framework will trap these\nerrors and submit a \"FAILED\" response to AWS CloudFormation, along with the error\nmessage.\n\nSince errors can occur in multiple places in the provider (framework, `onEvent`,\n`isComplete`), it is important to know that there could situations where a\nresource operation fails even though the operation technically succeeded (i.e.\nisComplete throws an error).\n\nWhen AWS CloudFormation receives a \"FAILED\" response, it will attempt to roll\nback the stack to it's last state. This has different meanings for different\nlifecycle events:\n\n* If a `Create` event fails, the resource provider framework will automatically\n ignore the subsequent `Delete` operation issued by AWS CloudFormation. The\n framework currently does not support customizing this behavior (see\n https://github.com/aws/aws-cdk/issues/5524).\n* If an `Update` event fails, CloudFormation will issue an additional `Update`\n with the previous properties.\n* If a `Delete` event fails, CloudFormation will abandon this resource.\n\n### Provider Framework Execution Policy\n\nSimilarly to any AWS Lambda function, if the user-defined handlers require\naccess to AWS resources, you will have to define these permissions\nby calling \"grant\" methods such as `myBucket.grantRead(myHandler)`), using `myHandler.addToRolePolicy`\nor specifying an `initialPolicy` when defining the function.\n\nBear in mind that in most cases, a single provider will be used for multiple\nresource instances. This means that the execution policy of the provider must\nhave the appropriate privileges.\n\nThe following example grants the `onEvent` handler `s3:GetObject*` permissions\nto all buckets:\n\n```ts\nnew lambda.Function(this, 'OnEventHandler', {\n // ...\n initialPolicy: [\n new iam.PolicyStatement({ actions: [ 's3:GetObject*' ], resources: [ '*' ] })\n ]\n});\n```\n\n### Timeouts\n\nUsers are responsible to define the timeouts for the AWS Lambda functions for\nuser-defined handlers. It is recommended not to exceed a **14 minutes** timeout,\nsince all framework functions are configured to time out after 15 minutes, which\nis the maximal AWS Lambda timeout.\n\nIf your operation takes over **14 minutes**, the recommended approach is to\nimplement an [asynchronous provider](#asynchronous-providers-iscomplete), and\nthen configure the timeouts for the asynchronous retries through the\n`queryInterval` and the `totalTimeout` options.\n\n### Provider Framework Examples\n\nThis module includes a few examples for custom resource implementations:\n\n#### S3File\n\nProvisions an object in an S3 bucket with textual contents. See the source code\nfor the\n[construct](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/custom-resources/test/provider-framework/integration-test-fixtures/s3-assert.ts) and\n[handler](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/custom-resources/test/provider-framework/integration-test-fixtures/s3-assert-handler/index.py).\n\nThe following example will create the file `folder/file1.txt` inside `myBucket`\nwith the contents `hello!`.\n\n\n```ts\nnew S3File(this, 'MyFile', {\n bucket: myBucket,\n objectKey: 'folder/file1.txt', // optional\n content: 'hello!',\n public: true // optional\n});\n```\n\nThis sample demonstrates the following concepts:\n\n* Synchronous implementation (`isComplete` is not defined)\n* Automatically generates the physical name if `objectKey` is not defined\n* Handles physical name changes\n* Returns resource attributes\n* Handles deletions\n* Implemented in TypeScript\n\n#### S3Assert\n\nChecks that the textual contents of an S3 object matches a certain value. The check will be retried for 5 minutes as long as the object is not found or the value is different. See the source code for the [construct](test/provider-framework/integration-test-fixtures/s3-assert.ts) and [handler](test/provider-framework/integration-test-fixtures/s3-assert-handler/index.py).\n\nThe following example defines an `S3Assert` resource which waits until\n`myfile.txt` in `myBucket` exists and includes the contents `foo bar`:\n\n```ts\nnew S3Assert(this, 'AssertMyFile', {\n bucket: myBucket,\n objectKey: 'myfile.txt',\n expectedContent: 'foo bar'\n});\n```\n\nThis sample demonstrates the following concepts:\n\n* Asynchronous implementation\n* Non-intrinsic physical IDs\n* Implemented in Python\n\n## Custom Resources for AWS APIs\n\nSometimes a single API call can fill the gap in the CloudFormation coverage. In\nthis case you can use the `AwsCustomResource` construct. This construct creates\na custom resource that can be customized to make specific API calls for the\n`CREATE`, `UPDATE` and `DELETE` events. Additionally, data returned by the API\ncall can be extracted and used in other constructs/resources (creating a real\nCloudFormation dependency using `Fn::GetAtt` under the hood).\n\nThe physical id of the custom resource can be specified or derived from the data\nreturned by the API call.\n\nThe `AwsCustomResource` uses the AWS SDK for JavaScript. Services, actions and\nparameters can be found in the [API documentation](https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/index.html).\n\nPath to data must be specified using a dot notation, e.g. to get the string value\nof the `Title` attribute for the first item returned by `dynamodb.query` it should\nbe `Items.0.Title.S`.\n\nTo make sure that the newest API calls are available the latest AWS SDK v2 is installed\nin the Lambda function implementing the custom resource. The installation takes around 60\nseconds. If you prefer to optimize for speed, you can disable the installation by setting\nthe `installLatestAwsSdk` prop to `false`.\n\n### Custom Resource Execution Policy\n\nYou must provide the `policy` property defining the IAM Policy that will be applied to the API calls.\nThe library provides two factory methods to quickly configure this:\n\n* **`AwsCustomResourcePolicy.fromSdkCalls`** - Use this to auto-generate IAM Policy statements based on the configured SDK calls.\nNote that you will have to either provide specific ARN's, or explicitly use `AwsCustomResourcePolicy.ANY_RESOURCE` to allow access to any resource.\n* **`AwsCustomResourcePolicy.fromStatements`** - Use this to specify your own custom statements.\n\nThe custom resource also implements `iam.IGrantable`, making it possible to use the `grantXxx()` methods.\n\nAs this custom resource uses a singleton Lambda function, it's important to note\nthat the function's role will eventually accumulate the permissions/grants from all\nresources.\n\nChained API calls can be achieved by creating dependencies:\n\n```ts\nconst awsCustom1 = new AwsCustomResource(this, 'API1', {\n onCreate: {\n service: '...',\n action: '...',\n physicalResourceId: PhysicalResourceId.of('...')\n },\n policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE})\n});\n\nconst awsCustom2 = new AwsCustomResource(this, 'API2', {\n onCreate: {\n service: '...',\n action: '...'\n parameters: {\n text: awsCustom1.getResponseField('Items.0.text')\n },\n physicalResourceId: PhysicalResourceId.of('...')\n },\n policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE})\n})\n```\n\n### Physical Resource Id Parameter\n\nSome AWS APIs may require passing the physical resource id in as a parameter for doing updates and deletes. You can pass it by using `PhysicalResourceIdReference`.\n\n```ts\nconst awsCustom = new AwsCustomResource(this, '...', {\n onCreate: {\n service: '...',\n action: '...'\n parameters: {\n text: '...'\n },\n physicalResourceId: PhysicalResourceId.of('...')\n },\n onUpdate: {\n service: '...',\n action: '...'.\n parameters: {\n text: '...',\n resourceId: new PhysicalResourceIdReference()\n }\n },\n policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE})\n})\n```\n\n### Handling Custom Resource Errors\n\nEvery error produced by the API call is treated as is and will cause a \"FAILED\" response to be submitted to CloudFormation.\nYou can ignore some errors by specifying the `ignoreErrorCodesMatching` property, which accepts a regular expression that is\ntested against the `code` property of the response. If matched, a \"SUCCESS\" response is submitted.\nNote that in such a case, the call response data and the `Data` key submitted to CloudFormation would both be an empty JSON object.\nSince a successful resource provisioning might or might not produce outputs, this presents us with some limitations:\n\n* `PhysicalResourceId.fromResponse` - Since the call response data might be empty, we cannot use it to extract the physical id.\n* `getResponseField` and `getResponseFieldReference` - Since the `Data` key is empty, the resource will not have any attributes, and therefore, invoking these functions will result in an error.\n\nIn both the cases, you will get a synth time error if you attempt to use it in conjunction with `ignoreErrorCodesMatching`.\n\n### Customizing the Lambda function implementing the custom resource\n\nUse the `role`, `timeout`, `logRetention` and `functionName` properties to customize\nthe Lambda function implementing the custom resource:\n\n```ts\nnew AwsCustomResource(this, 'Customized', {\n // other props here\n role: myRole, // must be assumable by the `lambda.amazonaws.com` service principal\n timeout: cdk.Duration.minutes(10) // defaults to 2 minutes\n logRetention: logs.RetentionDays.ONE_WEEK // defaults to never delete logs\n functionName: 'my-custom-name', // defaults to a CloudFormation generated name\n})\n```\n\n### Restricting the output of the Custom Resource\n\nCloudFormation imposes a hard limit of 4096 bytes for custom resources response\nobjects. If your API call returns an object that exceeds this limit, you can restrict\nthe data returned by the custom resource to specific paths in the API response:\n\n```ts\nnew AwsCustomResource(stack, 'ListObjects', {\n onCreate: {\n service: 's3',\n action: 'listObjectsV2',\n parameters: {\n Bucket: 'my-bucket',\n },\n physicalResourceId: PhysicalResourceId.of('id'),\n outputPaths: ['Contents.0.Key', 'Contents.1.Key'], // Output only the two first keys\n },\n policy: AwsCustomResourcePolicy.fromSdkCalls({ resources: AwsCustomResourcePolicy.ANY_RESOURCE }),\n});\n```\n\nNote that even if you restrict the output of your custom resource you can still use any\npath in `PhysicalResourceId.fromResponse()`.\n\n### Custom Resource Examples\n\n#### Verify a domain with SES\n\n```ts\nconst verifyDomainIdentity = new AwsCustomResource(this, 'VerifyDomainIdentity', {\n onCreate: {\n service: 'SES',\n action: 'verifyDomainIdentity',\n parameters: {\n Domain: 'example.com'\n },\n physicalResourceId: PhysicalResourceId.fromResponse('VerificationToken') // Use the token returned by the call as physical id\n },\n policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE})\n});\n\nnew route53.TxtRecord(this, 'SESVerificationRecord', {\n zone,\n recordName: `_amazonses.example.com`,\n values: [verifyDomainIdentity.getResponseField('VerificationToken')]\n});\n```\n\n#### Get the latest version of a secure SSM parameter\n\n```ts\nconst getParameter = new AwsCustomResource(this, 'GetParameter', {\n onUpdate: { // will also be called for a CREATE event\n service: 'SSM',\n action: 'getParameter',\n parameters: {\n Name: 'my-parameter',\n WithDecryption: true\n },\n physicalResourceId: PhysicalResourceId.of(Date.now().toString()) // Update physical id to always fetch the latest version\n },\n policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE})\n});\n\n// Use the value in another construct with\ngetParameter.getResponseField('Parameter.Value')\n```\n\n#### Associate a PrivateHostedZone with VPC shared from another account\n\n```ts\nconst getParameter = new AwsCustomResource(this, 'AssociateVPCWithHostedZone', {\n onCreate: {\n assumedRoleArn: 'arn:aws:iam::OTHERACCOUNT:role/CrossAccount/ManageHostedZoneConnections',\n service: 'Route53',\n action: 'associateVPCWithHostedZone',\n parameters: {\n HostedZoneId: 'hz-123',\n VPC: {\n\t\tVPCId: 'vpc-123',\n\t\tVPCRegion: 'region-for-vpc'\n }\n },\n physicalResourceId: PhysicalResourceId.of('${vpcStack.SharedVpc.VpcId}-${vpcStack.Region}-${PrivateHostedZone.HostedZoneId}')\n },\n //Will ignore any resource and use the assumedRoleArn as resource and 'sts:AssumeRole' for service:action\n policy: AwsCustomResourcePolicy.fromSdkCalls({resources: AwsCustomResourcePolicy.ANY_RESOURCE}) \n});\n\n```\n\n---\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n"
|
|
3544
|
-
},
|
|
3545
2540
|
"targets": {
|
|
3546
2541
|
"dotnet": {
|
|
3547
2542
|
"namespace": "Amazon.CDK.CustomResources"
|
|
@@ -3555,13 +2550,6 @@
|
|
|
3555
2550
|
}
|
|
3556
2551
|
},
|
|
3557
2552
|
"aws-cdk-lib.cx_api": {
|
|
3558
|
-
"locationInModule": {
|
|
3559
|
-
"filename": "lib/index.ts",
|
|
3560
|
-
"line": 195
|
|
3561
|
-
},
|
|
3562
|
-
"readme": {
|
|
3563
|
-
"markdown": "# Cloud Executable API\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n"
|
|
3564
|
-
},
|
|
3565
2553
|
"targets": {
|
|
3566
2554
|
"dotnet": {
|
|
3567
2555
|
"namespace": "Amazon.CDK.CXAPI"
|
|
@@ -3575,13 +2563,6 @@
|
|
|
3575
2563
|
}
|
|
3576
2564
|
},
|
|
3577
2565
|
"aws-cdk-lib.lambda_layer_awscli": {
|
|
3578
|
-
"locationInModule": {
|
|
3579
|
-
"filename": "lib/index.ts",
|
|
3580
|
-
"line": 196
|
|
3581
|
-
},
|
|
3582
|
-
"readme": {
|
|
3583
|
-
"markdown": "# AWS Lambda Layer with AWS CLI\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n\nThis module exports a single class called `AwsCliLayer` which is a `lambda.Layer` that bundles the AWS CLI.\n\nUsage:\n\n```ts\nconst fn = new lambda.Function(...);\nfn.addLayers(new AwsCliLayer(stack, 'AwsCliLayer'));\n```\n\nThe CLI will be installed under `/opt/awscli/aws`.\n"
|
|
3584
|
-
},
|
|
3585
2566
|
"targets": {
|
|
3586
2567
|
"dotnet": {
|
|
3587
2568
|
"namespace": "Amazon.CDK.LambdaLayer.AwsCli"
|
|
@@ -3595,13 +2576,6 @@
|
|
|
3595
2576
|
}
|
|
3596
2577
|
},
|
|
3597
2578
|
"aws-cdk-lib.lambda_layer_kubectl": {
|
|
3598
|
-
"locationInModule": {
|
|
3599
|
-
"filename": "lib/index.ts",
|
|
3600
|
-
"line": 197
|
|
3601
|
-
},
|
|
3602
|
-
"readme": {
|
|
3603
|
-
"markdown": "# AWS Lambda Layer with kubectl (and helm)\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nThis module exports a single class called `KubectlLayer` which is a `lambda.Layer` that bundles the [`kubectl`](https://kubernetes.io/docs/reference/kubectl/kubectl/) and the [`helm`](https://helm.sh/) command line.\n\n> - Helm Version: 1.20.0\n> - Kubectl Version: 3.4.2\n\nUsage:\n\n```ts\nconst fn = new lambda.Function(...);\nfn.addLayers(new KubectlLayer(stack, 'KubectlLayer'));\n```\n\n`kubectl` will be installed under `/opt/kubectl/kubectl`, and `helm` will be installed under `/opt/helm/helm`.\n"
|
|
3604
|
-
},
|
|
3605
2579
|
"targets": {
|
|
3606
2580
|
"dotnet": {
|
|
3607
2581
|
"namespace": "Amazon.CDK.LambdaLayer.Kubectl"
|
|
@@ -3615,13 +2589,6 @@
|
|
|
3615
2589
|
}
|
|
3616
2590
|
},
|
|
3617
2591
|
"aws-cdk-lib.pipelines": {
|
|
3618
|
-
"locationInModule": {
|
|
3619
|
-
"filename": "lib/index.ts",
|
|
3620
|
-
"line": 198
|
|
3621
|
-
},
|
|
3622
|
-
"readme": {
|
|
3623
|
-
"markdown": "# CDK Pipelines\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\nA construct library for painless Continuous Delivery of CDK applications.\n\n> This module contains two sets of APIs: an **original** and a **modern** version of\nCDK Pipelines. The *modern* API has been updated to be easier to work with and\ncustomize, and will be the preferred API going forward. The *original* version\nof the API is still available for backwards compatibility, but we recommend migrating\nto the new version if possible.\n>\n> Compared to the original API, the modern API: has more sensible defaults; is\n> more flexible; supports parallel deployments; supports multiple synth inputs;\n> allows more control of CodeBuild project generation; supports deployment\n> engines other than CodePipeline.\n>\n> The README for the original API, as well as a migration guide, can be found in [our GitHub repository](https://github.com/aws/aws-cdk/blob/master/packages/@aws-cdk/pipelines/ORIGINAL_API.md).\n\n## At a glance\n\nDeploying your application continuously starts by defining a\n`MyApplicationStage`, a subclass of `Stage` that contains the stacks that make\nup a single copy of your application.\n\nYou then define a `Pipeline`, instantiate as many instances of\n`MyApplicationStage` as you want for your test and production environments, with\ndifferent parameters for each, and calling `pipeline.addStage()` for each of\nthem. You can deploy to the same account and Region, or to a different one,\nwith the same amount of code. The *CDK Pipelines* library takes care of the\ndetails.\n\nCDK Pipelines supports multiple *deployment engines* (see below), and comes with\na deployment engine that deployes CDK apps using AWS CodePipeline. To use the\nCodePipeline engine, define a `CodePipeline` construct. The following\nexample creates a CodePipeline that deploys an application from GitHub:\n\n```ts\n/** The stacks for our app are defined in my-stacks.ts. The internals of these\n * stacks aren't important, except that DatabaseStack exposes an attribute\n * \"table\" for a database table it defines, and ComputeStack accepts a reference\n * to this table in its properties.\n */\nimport { DatabaseStack, ComputeStack } from '../lib/my-stacks';\nimport { Construct, Stage, Stack, StackProps, StageProps } from 'aws-cdk-lib';\nimport { CodePipeline, CodePipelineSource, ShellStep } from 'aws-cdk-lib/pipelines';\n\n/**\n * Stack to hold the pipeline\n */\nclass MyPipelineStack extends Stack {\n constructor(scope: Construct, id: string, props?: StackProps) {\n super(scope, id, props);\n\n const pipeline = new CodePipeline(this, 'Pipeline', {\n synth: new ShellStep('Synth', {\n // Use a connection created using the AWS console to authenticate to GitHub\n // Other sources are available.\n input: CodePipelineSource.connection('my-org/my-app', 'main', {\n connectionArn: 'arn:aws:codestar-connections:us-east-1:222222222222:connection/7d2469ff-514a-4e4f-9003-5ca4a43cdc41', // Created using the AWS console * });',\n }),\n commands: [\n 'npm ci',\n 'npm run build',\n 'npx cdk synth',\n ],\n }),\n });\n\n // 'MyApplication' is defined below. Call `addStage` as many times as\n // necessary with any account and region (may be different from the\n // pipeline's).\n pipeline.addStage(new MyApplication(this, 'Prod', {\n env: {\n account: '123456789012',\n region: 'eu-west-1',\n }\n }));\n }\n}\n\n/**\n * Your application\n *\n * May consist of one or more Stacks (here, two)\n *\n * By declaring our DatabaseStack and our ComputeStack inside a Stage,\n * we make sure they are deployed together, or not at all.\n */\nclass MyApplication extends Stage {\n constructor(scope: Construct, id: string, props?: StageProps) {\n super(scope, id, props);\n\n const dbStack = new DatabaseStack(this, 'Database');\n new ComputeStack(this, 'Compute', {\n table: dbStack.table,\n });\n }\n}\n\n// In your main file\nnew MyPipelineStack(app, 'PipelineStack', {\n env: {\n account: '123456789012',\n region: 'eu-west-1',\n }\n});\n```\n\nThe pipeline is **self-mutating**, which means that if you add new\napplication stages in the source code, or new stacks to `MyApplication`, the\npipeline will automatically reconfigure itself to deploy those new stages and\nstacks.\n\n(Note that have to *bootstrap* all environments before the above code\nwill work, see the section **CDK Environment Bootstrapping** below).\n\n## CDK Versioning\n\nThis library uses prerelease features of the CDK framework, which can be enabled\nby adding the following to `cdk.json`:\n\n```js\n{\n // ...\n \"context\": {\n \"@aws-cdk/core:newStyleStackSynthesis\": true\n }\n}\n```\n\n## Provisioning the pipeline\n\nTo provision the pipeline you have defined, making sure the target environment\nhas been bootstrapped (see below), and then executing deploying the\n`PipelineStack` *once*. Afterwards, the pipeline will keep itself up-to-date.\n\n> **Important**: be sure to `git commit` and `git push` before deploying the\n> Pipeline stack using `cdk deploy`!\n>\n> The reason is that the pipeline will start deploying and self-mutating\n> right away based on the sources in the repository, so the sources it finds\n> in there should be the ones you want it to find.\n\nRun the following commands to get the pipeline going:\n\n```console\n$ git commit -a\n$ git push\n$ cdk deploy PipelineStack\n```\n\nAdministrative permissions to the account are only necessary up until\nthis point. We recommend you shed access to these credentials after doing this.\n\n### Working on the pipeline\n\nThe self-mutation feature of the Pipeline might at times get in the way\nof the pipeline development workflow. Each change to the pipeline must be pushed\nto git, otherwise, after the pipeline was updated using `cdk deploy`, it will\nautomatically revert to the state found in git.\n\nTo make the development more convenient, the self-mutation feature can be turned\noff temporarily, by passing `selfMutation: false` property, example:\n\n```ts\n// Modern API\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n selfMutation: false,\n ...\n});\n\n// Original API\nconst pipeline = new CdkPipeline(this, 'Pipeline', {\n selfMutating: false,\n ...\n});\n```\n\n## Definining the pipeline\n\nThis section of the documentation describes the AWS CodePipeline engine, which\ncomes with this library. If you want to use a different deployment engine, read\nthe section *Using a different deployment engine* below.\n\n### Synth and sources\n\nTo define a pipeline, instantiate a `CodePipeline` construct from the\n`@aws-cdk/pipelines` module. It takes one argument, a `synth` step, which is\nexpected to produce the CDK Cloud Assembly as its single output (the contents of\nthe `cdk.out` directory after running `cdk synth`). \"Steps\" are arbitrary\nactions in the pipeline, typically used to run scripts or commands.\n\nFor the synth, use a `ShellStep` and specify the commands necessary to install\ndependencies, the CDK CLI, build your project and run `cdk synth`; the specific\ncommands required will depend on the programming language you are using. For a\ntypical NPM-based project, the synth will look like this:\n\n```ts\nconst source = /* the repository source */;\n\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n synth: new ShellStep('Synth', {\n input: source,\n commands: [\n 'npm ci',\n 'npm run build',\n 'npx cdk synth',\n ],\n }),\n});\n```\n\nThe pipeline assumes that your `ShellStep` will produce a `cdk.out`\ndirectory in the root, containing the CDK cloud assembly. If your\nCDK project lives in a subdirectory, be sure to adjust the\n`primaryOutputDirectory` to match:\n\n```ts\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n synth: new ShellStep('Synth', {\n input: source,\n commands: [\n 'cd mysubdir',\n 'npm ci',\n 'npm run build',\n 'npx cdk synth',\n ],\n primaryOutputDirectory: 'mysubdir/cdk.out',\n }),\n});\n```\n\nThe underlying `@aws-cdk/aws-codepipeline.Pipeline` construct will be produced\nwhen `app.synth()` is called. You can also force it to be produced\nearlier by calling `pipeline.buildPipeline()`. After you've called\nthat method, you can inspect the constructs that were produced by\naccessing the properties of the `pipeline` object.\n\n#### Commands for other languages and package managers\n\nThe commands you pass to `new ShellStep` will be very similar to the commands\nyou run on your own workstation to install dependencies and synth your CDK\nproject. Here are some (non-exhaustive) examples for what those commands might\nlook like in a number of different situations.\n\nFor Yarn, the install commands are different:\n\n```ts\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n synth: new ShellStep('Synth', {\n input: source,\n commands: [\n 'yarn install --frozen-lockfile',\n 'yarn build',\n 'npx cdk synth',\n ],\n })\n});\n```\n\nFor Python projects, remember to install the CDK CLI globally (as\nthere is no `package.json` to automatically install it for you):\n\n```ts\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n synth: new ShellStep('Synth', {\n input: source,\n commands: [\n 'pip install -r requirements.txt',\n 'npm install -g aws-cdk',\n 'cdk synth',\n ],\n })\n});\n```\n\nFor Java projects, remember to install the CDK CLI globally (as\nthere is no `package.json` to automatically install it for you),\nand the Maven compilation step is automatically executed for you\nas you run `cdk synth`:\n\n```ts\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n synth: new ShellStep('Synth', {\n input: source,\n commands: [\n 'npm install -g aws-cdk',\n 'cdk synth',\n ],\n })\n});\n```\n\nYou can adapt these examples to your own situation.\n\n#### CodePipeline Sources\n\nIn CodePipeline, *Sources* define where the source of your application lives.\nWhen a change to the source is detected, the pipeline will start executing.\nSource objects can be created by factory methods on the `CodePipelineSource` class:\n\n##### GitHub, GitHub Enterprise, BitBucket using a connection\n\nThe recommended way of connecting to GitHub or BitBucket is by using a *connection*.\nYou will first use the AWS Console to authenticate to the source control\nprovider, and then use the connection ARN in your pipeline definition:\n\n```ts\nCodePipelineSource.connection('org/repo', 'branch', {\n connectionArn: 'arn:aws:codestar-connections:us-east-1:222222222222:connection/7d2469ff-514a-4e4f-9003-5ca4a43cdc41',\n});\n```\n\n##### GitHub using OAuth\n\nYou can also authenticate to GitHub using a personal access token. This expects\nthat you've created a personal access token and stored it in Secrets Manager.\nBy default, the source object will look for a secret named **github-token**, but\nyou can change the name. The token should have the **repo** and **admin:repo_hook**\nscopes.\n\n```ts\nCodePipelineSource.gitHub('org/repo', 'branch', {\n // This is optional\n authentication: SecretValue.secretsManager('my-token'),\n});\n```\n\n##### CodeCommit\n\nYou can use a CodeCommit repository as the source. Either create or import\nthat the CodeCommit repository and then use `CodePipelineSource.codeCommit`\nto reference it:\n\n```ts\nconst repository = codecommit.fromRepositoryName(this, 'Repository', 'my-repository');\nCodePipelineSource.codeCommit(repository);\n```\n\n##### S3\n\nYou can use a zip file in S3 as the source of the pipeline. The pipeline will be\ntriggered every time the file in S3 is changed:\n\n```ts\nconst bucket = s3.Bucket.fromBucketName(this, 'Bucket', 'my-bucket');\nCodePipelineSource.s3(bucket, 'my/source.zip');\n```\n\n#### Additional inputs\n\n`ShellStep` allows passing in more than one input: additional\ninputs will be placed in the directories you specify. Any step that produces an\noutput file set can be used as an input, such as a `CodePipelineSource`, but\nalso other `ShellStep`:\n\n```ts\nconst prebuild = new ShellStep('Prebuild', {\n input: CodePipelineSource.gitHub('myorg/repo1'),\n primaryOutputDirectory: './build',\n commands: ['./build.sh'],\n});\n\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n synth: new ShellStep('Synth', {\n input: CodePipelineSource.gitHub('myorg/repo2'),\n additionalInputs: {\n 'subdir': CodePipelineSource.gitHub('myorg/repo3'),\n '../siblingdir': prebuild,\n },\n\n commands: ['./build.sh'],\n })\n});\n```\n\n### CDK application deployments\n\nAfter you have defined the pipeline and the `synth` step, you can add one or\nmore CDK `Stages` which will be deployed to their target environments. To do\nso, call `pipeline.addStage()` on the Stage object:\n\n```ts\n// Do this as many times as necessary with any account and region\n// Account and region may different from the pipeline's.\npipeline.addStage(new MyApplicationStage(this, 'Prod', {\n env: {\n account: '123456789012',\n region: 'eu-west-1',\n }\n}));\n```\n\nCDK Pipelines will automatically discover all `Stacks` in the given `Stage`\nobject, determine their dependency order, and add appropriate actions to the\npipeline to publish the assets referenced in those stacks and deploy the stacks\nin the right order.\n\nIf the `Stacks` are targeted at an environment in a different AWS account or\nRegion and that environment has been\n[bootstrapped](https://docs.aws.amazon.com/cdk/latest/guide/bootstrapping.html)\n, CDK Pipelines will transparently make sure the IAM roles are set up\ncorrectly and any requisite replication Buckets are created.\n\n#### Deploying in parallel\n\nBy default, all applications added to CDK Pipelines by calling `addStage()` will\nbe deployed in sequence, one after the other. If you have a lot of stages, you can\nspeed up the pipeline by choosing to deploy some stages in parallel. You do this\nby calling `addWave()` instead of `addStage()`: a *wave* is a set of stages that\nare all deployed in parallel instead of sequentially. Waves themselves are still\ndeployed in sequence. For example, the following will deploy two copies of your\napplication to `eu-west-1` and `eu-central-1` in parallel:\n\n```ts\nconst europeWave = pipeline.addWave('Europe');\neuropeWave.addStage(new MyApplicationStage(this, 'Ireland', {\n env: { region: 'eu-west-1' }\n}));\neuropeWave.addStage(new MyApplicationStage(this, 'Germany', {\n env: { region: 'eu-central-1' }\n}));\n```\n\n#### Deploying to other accounts / encrypting the Artifact Bucket\n\nCDK Pipelines can transparently deploy to other Regions and other accounts\n(provided those target environments have been\n[*bootstrapped*](https://docs.aws.amazon.com/cdk/latest/guide/bootstrapping.html)).\nHowever, deploying to another account requires one additional piece of\nconfiguration: you need to enable `crossAccountKeys: true` when creating the\npipeline.\n\nThis will encrypt the artifact bucket(s), but incurs a cost for maintaining the\nKMS key.\n\nExample:\n\n```ts\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n // Encrypt artifacts, required for cross-account deployments\n crossAccountKeys: true,\n});\n```\n\n### Validation\n\nEvery `addStage()` and `addWave()` command takes additional options. As part of these options,\nyou can specify `pre` and `post` steps, which are arbitrary steps that run before or after\nthe contents of the stage or wave, respectively. You can use these to add validations like\nmanual or automated gates to your pipeline. We recommend putting manual approval gates in the set of `pre` steps, and automated approval gates in\nthe set of `post` steps.\n\nThe following example shows both an automated approval in the form of a `ShellStep`, and\na manual approvel in the form of a `ManualApprovalStep` added to the pipeline. Both must\npass in order to promote from the `PreProd` to the `Prod` environment:\n\n```ts\nconst preprod = new MyApplicationStage(this, 'PreProd', { ... });\nconst prod = new MyApplicationStage(this, 'Prod', { ... });\n\npipeline.addStage(preprod, {\n post: [\n new ShellStep('Validate Endpoint', {\n commands: ['curl -Ssf https://my.webservice.com/'],\n }),\n ],\n});\npipeline.addStage(prod, {\n pre: [\n new ManualApprovalStep('PromoteToProd'),\n ],\n});\n```\n\n#### Using CloudFormation Stack Outputs in approvals\n\nBecause many CloudFormation deployments result in the generation of resources with unpredictable\nnames, validations have support for reading back CloudFormation Outputs after a deployment. This\nmakes it possible to pass (for example) the generated URL of a load balancer to the test set.\n\nTo use Stack Outputs, expose the `CfnOutput` object you're interested in, and\npass it to `envFromCfnOutputs` of the `ShellStep`:\n\n```ts\nclass MyApplicationStage extends Stage {\n public readonly loadBalancerAddress: CfnOutput;\n // ...\n}\n\nconst lbApp = new MyApplicationStage(this, 'MyApp', { /* ... */ });\npipeline.addStage(lbApp, {\n post: [\n new ShellStep('HitEndpoint', {\n envFromCfnOutputs: {\n // Make the load balancer address available as $URL inside the commands\n URL: lbApp.loadBalancerAddress,\n },\n commands: ['curl -Ssf $URL'],\n });\n ],\n});\n```\n\n#### Running scripts compiled during the synth step\n\nAs part of a validation, you probably want to run a test suite that's more\nelaborate than what can be expressed in a couple of lines of shell script.\nYou can bring additional files into the shell script validation by supplying\nthe `input` or `additionalInputs` property of `ShellStep`. The input can\nbe produced by the `Synth` step, or come from a source or any other build\nstep.\n\nHere's an example that captures an additional output directory in the synth\nstep and runs tests from there:\n\n```ts\nconst synth = new ShellStep('Synth', { /* ... */ });\nconst pipeline = new CodePipeline(this, 'Pipeline', { synth });\n\npipeline.addStage(/* ... */, {\n post: [\n new ShellStep('Approve', {\n // Use the contents of the 'integ' directory from the synth step as the input\n input: synth.addOutputDirectory('integ'),\n commands: ['cd integ && ./run.sh'],\n }),\n ],\n});\n```\n\n### Customizing CodeBuild Projects\n\nCDK pipelines will generate CodeBuild projects for each `ShellStep` you use, and it\nwill also generate CodeBuild projects to publish assets and perform the self-mutation\nof the pipeline. To control the various aspects of the CodeBuild projects that get\ngenerated, use a `CodeBuildStep` instead of a `ShellStep`. This class has a number\nof properties that allow you to customize various aspects of the projects:\n\n```ts\nnew CodeBuildStep('Synth', {\n // ...standard ShellStep props...\n commands: [/* ... */],\n env: { /* ... */ },\n\n // If you are using a CodeBuildStep explicitly, set the 'cdk.out' directory\n // to be the synth step's output.\n primaryOutputDirectory: 'cdk.out',\n\n // Control the name of the project\n projectName: 'MyProject',\n\n // Control parts of the BuildSpec other than the regular 'build' and 'install' commands\n partialBuildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n // ...\n }),\n\n // Control the build environment\n buildEnvironment: {\n computeType: codebuild.ComputeType.LARGE,\n },\n\n // Control Elastic Network Interface creation\n vpc: vpc,\n subnetSelection: { subnetType: ec2.SubnetType.PRIVATE },\n securityGroups: [mySecurityGroup],\n\n // Additional policy statements for the execution role\n rolePolicyStatements: [\n new iam.PolicyStatement({ /* ... */ }),\n ],\n});\n```\n\nYou can also configure defaults for *all* CodeBuild projects by passing `codeBuildDefaults`,\nor just for the synth, asset publishing, and self-mutation projects by passing `synthCodeBuildDefaults`,\n`assetPublishingCodeBuildDefaults`, or `selfMutationCodeBuildDefaults`:\n\n```ts\nnew CodePipeline(this, 'Pipeline', {\n // ...\n\n // Defaults for all CodeBuild projects\n codeBuildDefaults: {\n // Prepend commands and configuration to all projects\n partialBuildSpec: codebuild.BuildSpec.fromObject({\n version: '0.2',\n // ...\n }),\n\n // Control the build environment\n buildEnvironment: {\n computeType: codebuild.ComputeType.LARGE,\n },\n\n // Control Elastic Network Interface creation\n vpc: vpc,\n subnetSelection: { subnetType: ec2.SubnetType.PRIVATE },\n securityGroups: [mySecurityGroup],\n\n // Additional policy statements for the execution role\n rolePolicy: [\n new iam.PolicyStatement({ /* ... */ }),\n ],\n },\n\n synthCodeBuildDefaults: { /* ... */ },\n assetPublishingCodeBuildDefaults: { /* ... */ },\n selfMutationCodeBuildDefaults: { /* ... */ },\n});\n```\n\n### Arbitrary CodePipeline actions\n\nIf you want to add a type of CodePipeline action to the CDK Pipeline that\ndoesn't have a matching class yet, you can define your own step class that extends\n`Step` and implements `ICodePipelineActionFactory`.\n\nHere's an example that adds a Jenkins step:\n\n```ts\nclass MyJenkinsStep extends Step implements ICodePipelineActionFactory {\n constructor(private readonly provider: codepipeline_actions.JenkinsProvider, private readonly input: FileSet) {\n }\n\n public produceAction(stage: codepipeline.IStage, options: ProduceActionOptions): CodePipelineActionFactoryResult {\n\n // This is where you control what type of Action gets added to the\n // CodePipeline\n stage.addAction(new codepipeline_actions.JenkinsAction({\n // Copy 'actionName' and 'runOrder' from the options\n actionName: options.actionName,\n runOrder: options.runOrder,\n\n // Jenkins-specific configuration\n type: cpactions.JenkinsActionType.TEST,\n jenkinsProvider: this.provider,\n projectName: 'MyJenkinsProject',\n\n // Translate the FileSet into a codepipeline.Artifact\n inputs: [options.artifacts.toCodePipeline(this.input)],\n }));\n\n return { runOrdersConsumed: 1 };\n }\n}\n```\n\n## Using Docker in the pipeline\n\nDocker can be used in 3 different places in the pipeline:\n\n* If you are using Docker image assets in your application stages: Docker will\n run in the asset publishing projects.\n* If you are using Docker image assets in your stack (for example as\n images for your CodeBuild projects): Docker will run in the self-mutate project.\n* If you are using Docker to bundle file assets anywhere in your project (for\n example, if you are using such construct libraries as\n `@aws-cdk/aws-lambda-nodejs`): Docker will run in the\n *synth* project.\n\nFor the first case, you don't need to do anything special. For the other two cases,\nyou need to make sure that **privileged mode** is enabled on the correct CodeBuild\nprojects, so that Docker can run correctly. The follow sections describe how to do\nthat.\n\nYou may also need to authenticate to Docker registries to avoid being throttled.\nSee the section **Authenticating to Docker registries** below for information on how to do\nthat.\n\n### Using Docker image assets in the pipeline\n\nIf your `PipelineStack` is using Docker image assets (as opposed to the application\nstacks the pipeline is deploying), for example by the use of `LinuxBuildImage.fromAsset()`,\nyou need to pass `dockerEnabledForSelfMutation: true` to the pipeline. For example:\n\n```ts\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n // ...\n\n // Turn this on because the pipeline uses Docker image assets\n dockerEnabledForSelfMutation: true,\n});\n\npipeline.addWave('MyWave', {\n post: [\n new CodeBuildStep('RunApproval', {\n commands: ['command-from-image'],\n buildEnvironment: {\n // The user of a Docker image asset in the pipeline requires turning on\n // 'dockerEnabledForSelfMutation'.\n buildImage: LinuxBuildImage.fromAsset(this, 'Image', {\n directory: './docker-image',\n })\n },\n })\n ],\n});\n```\n\n> **Important**: You must turn on the `dockerEnabledForSelfMutation` flag,\n> commit and allow the pipeline to self-update *before* adding the actual\n> Docker asset.\n\n### Using bundled file assets\n\nIf you are using asset bundling anywhere (such as automatically done for you\nif you add a construct like `@aws-cdk/aws-lambda-nodejs`), you need to pass\n`dockerEnabledForSynth: true` to the pipeline. For example:\n\n```ts\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n // ...\n\n // Turn this on because the application uses bundled file assets\n dockerEnabledForSynth: true,\n});\n```\n\n> **Important**: You must turn on the `dockerEnabledForSynth` flag,\n> commit and allow the pipeline to self-update *before* adding the actual\n> Docker asset.\n\n### Authenticating to Docker registries\n\nYou can specify credentials to use for authenticating to Docker registries as part of the\npipeline definition. This can be useful if any Docker image assets — in the pipeline or\nany of the application stages — require authentication, either due to being in a\ndifferent environment (e.g., ECR repo) or to avoid throttling (e.g., DockerHub).\n\n```ts\nconst dockerHubSecret = secretsmanager.Secret.fromSecretCompleteArn(this, 'DHSecret', 'arn:aws:...');\nconst customRegSecret = secretsmanager.Secret.fromSecretCompleteArn(this, 'CRSecret', 'arn:aws:...');\nconst repo1 = ecr.Repository.fromRepositoryArn(stack, 'Repo', 'arn:aws:ecr:eu-west-1:0123456789012:repository/Repo1');\nconst repo2 = ecr.Repository.fromRepositoryArn(stack, 'Repo', 'arn:aws:ecr:eu-west-1:0123456789012:repository/Repo2');\n\nconst pipeline = new CodePipeline(this, 'Pipeline', {\n dockerCredentials: [\n DockerCredential.dockerHub(dockerHubSecret),\n DockerCredential.customRegistry('dockerregistry.example.com', customRegSecret),\n DockerCredential.ecr([repo1, repo2]);\n ],\n // ...\n});\n```\n\nFor authenticating to Docker registries that require a username and password combination\n(like DockerHub), create a Secrets Manager Secret with fields named `username`\nand `secret`, and import it (the field names change be customized).\n\nAuthentication to ECR repostories is done using the execution role of the\nrelevant CodeBuild job. Both types of credentials can be provided with an\noptional role to assume before requesting the credentials.\n\nBy default, the Docker credentials provided to the pipeline will be available to\nthe **Synth**, **Self-Update**, and **Asset Publishing** actions within the\n*pipeline. The scope of the credentials can be limited via the `DockerCredentialUsage` option.\n\n```ts\nconst dockerHubSecret = secretsmanager.Secret.fromSecretCompleteArn(this, 'DHSecret', 'arn:aws:...');\n// Only the image asset publishing actions will be granted read access to the secret.\nconst creds = DockerCredential.dockerHub(dockerHubSecret, { usages: [DockerCredentialUsage.ASSET_PUBLISHING] });\n```\n\n## CDK Environment Bootstrapping\n\nAn *environment* is an *(account, region)* pair where you want to deploy a\nCDK stack (see\n[Environments](https://docs.aws.amazon.com/cdk/latest/guide/environments.html)\nin the CDK Developer Guide). In a Continuous Deployment pipeline, there are\nat least two environments involved: the environment where the pipeline is\nprovisioned, and the environment where you want to deploy the application (or\ndifferent stages of the application). These can be the same, though best\npractices recommend you isolate your different application stages from each\nother in different AWS accounts or regions.\n\nBefore you can provision the pipeline, you have to *bootstrap* the environment you want\nto create it in. If you are deploying your application to different environments, you\nalso have to bootstrap those and be sure to add a *trust* relationship.\n\nAfter you have bootstrapped an environment and created a pipeline that deploys\nto it, it's important that you don't delete the stack or change its *Qualifier*,\nor future deployments to this environment will fail. If you want to upgrade\nthe bootstrap stack to a newer version, do that by updating it in-place.\n\n> This library requires the *modern* bootstrapping stack which has\n> been updated specifically to support cross-account continuous delivery. Starting,\n> in CDK v2 this new bootstrapping stack will become the default, but for now it is still\n> opt-in.\n>\n> The commands below assume you are running `cdk bootstrap` in a directory\n> where `cdk.json` contains the `\"@aws-cdk/core:newStyleStackSynthesis\": true`\n> setting in its context, which will switch to the new bootstrapping stack\n> automatically.\n>\n> If run from another directory, be sure to run the bootstrap command with\n> the environment variable `CDK_NEW_BOOTSTRAP=1` set.\n\nTo bootstrap an environment for provisioning the pipeline:\n\n```console\n$ env CDK_NEW_BOOTSTRAP=1 npx cdk bootstrap \\\n [--profile admin-profile-1] \\\n --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess \\\n aws://111111111111/us-east-1\n```\n\nTo bootstrap a different environment for deploying CDK applications into using\na pipeline in account `111111111111`:\n\n```console\n$ env CDK_NEW_BOOTSTRAP=1 npx cdk bootstrap \\\n [--profile admin-profile-2] \\\n --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess \\\n --trust 11111111111 \\\n aws://222222222222/us-east-2\n```\n\nIf you only want to trust an account to do lookups (e.g, when your CDK application has a\n`Vpc.fromLookup()` call), use the option `--trust-for-lookup`:\n\n```console\n$ env CDK_NEW_BOOTSTRAP=1 npx cdk bootstrap \\\n [--profile admin-profile-2] \\\n --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess \\\n --trust-for-lookup 11111111111 \\\n aws://222222222222/us-east-2\n```\n\nThese command lines explained:\n\n* `npx`: means to use the CDK CLI from the current NPM install. If you are using\n a global install of the CDK CLI, leave this out.\n* `--profile`: should indicate a profile with administrator privileges that has\n permissions to provision a pipeline in the indicated account. You can leave this\n flag out if either the AWS default credentials or the `AWS_*` environment\n variables confer these permissions.\n* `--cloudformation-execution-policies`: ARN of the managed policy that future CDK\n deployments should execute with. By default this is `AdministratorAccess`, but\n if you also specify the `--trust` flag to give another Account permissions to\n deploy into the current account, you must specify a value here.\n* `--trust`: indicates which other account(s) should have permissions to deploy\n CDK applications into this account. In this case we indicate the Pipeline's account,\n but you could also use this for developer accounts (don't do that for production\n application accounts though!).\n* `--trust-for-lookup`: gives a more limited set of permissions to the\n trusted account, only allowing it to look up values such as availability zones, EC2 images and\n VPCs. `--trust-for-lookup` does not give permissions to modify anything in the account.\n Note that `--trust` implies `--trust-for-lookup`, so you don't need to specify\n the same acocunt twice.\n* `aws://222222222222/us-east-2`: the account and region we're bootstrapping.\n\n> Be aware that anyone who has access to the trusted Accounts **effectively has all\n> permissions conferred by the configured CloudFormation execution policies**,\n> allowing them to do things like read arbitrary S3 buckets and create arbitrary\n> infrastructure in the bootstrapped account. Restrict the list of `--trust`ed Accounts,\n> or restrict the policies configured by `--cloudformation-execution-policies`.\n\n<br>\n\n> **Security tip**: we recommend that you use administrative credentials to an\n> account only to bootstrap it and provision the initial pipeline. Otherwise,\n> access to administrative credentials should be dropped as soon as possible.\n\n<br>\n\n> **On the use of AdministratorAccess**: The use of the `AdministratorAccess` policy\n> ensures that your pipeline can deploy every type of AWS resource to your account.\n> Make sure you trust all the code and dependencies that make up your CDK app.\n> Check with the appropriate department within your organization to decide on the\n> proper policy to use.\n>\n> If your policy includes permissions to create on attach permission to a role,\n> developers can escalate their privilege with more permissive permission.\n> Thus, we recommend implementing [permissions boundary](https://aws.amazon.com/premiumsupport/knowledge-center/iam-permission-boundaries/)\n> in the CDK Execution role. To do this, you can bootstrap with the `--template` option with\n> [a customized template](https://github.com/aws-samples/aws-bootstrap-kit-examples/blob/ba28a97d289128281bc9483bcba12c1793f2c27a/source/1-SDLC-organization/lib/cdk-bootstrap-template.yml#L395) that contains a permission boundary.\n\n### Migrating from old bootstrap stack\n\nThe bootstrap stack is a CloudFormation stack in your account named\n**CDKToolkit** that provisions a set of resources required for the CDK\nto deploy into that environment.\n\nThe \"new\" bootstrap stack (obtained by running `cdk bootstrap` with\n`CDK_NEW_BOOTSTRAP=1`) is slightly more elaborate than the \"old\" stack. It\ncontains:\n\n* An S3 bucket and ECR repository with predictable names, so that we can reference\n assets in these storage locations *without* the use of CloudFormation template\n parameters.\n* A set of roles with permissions to access these asset locations and to execute\n CloudFormation, assumable from whatever accounts you specify under `--trust`.\n\nIt is possible and safe to migrate from the old bootstrap stack to the new\nbootstrap stack. This will create a new S3 file asset bucket in your account\nand orphan the old bucket. You should manually delete the orphaned bucket\nafter you are sure you have redeployed all CDK applications and there are no\nmore references to the old asset bucket.\n\n## Context Lookups\n\nYou might be using CDK constructs that need to look up [runtime\ncontext](https://docs.aws.amazon.com/cdk/latest/guide/context.html#context_methods),\nwhich is information from the target AWS Account and Region the CDK needs to\nsynthesize CloudFormation templates appropriate for that environment. Examples\nof this kind of context lookups are the number of Availability Zones available\nto you, a Route53 Hosted Zone ID, or the ID of an AMI in a given region. This\ninformation is automatically looked up when you run `cdk synth`.\n\nBy default, a `cdk synth` performed in a pipeline will not have permissions\nto perform these lookups, and the lookups will fail. This is by design.\n\n**Our recommended way of using lookups** is by running `cdk synth` on the\ndeveloper workstation and checking in the `cdk.context.json` file, which\ncontains the results of the context lookups. This will make sure your\nsynthesized infrastructure is consistent and repeatable. If you do not commit\n`cdk.context.json`, the results of the lookups may suddenly be different in\nunexpected ways, and even produce results that cannot be deployed or will cause\ndata loss. To give an account permissions to perform lookups against an\nenvironment, without being able to deploy to it and make changes, run\n`cdk bootstrap --trust-for-lookup=<account>`.\n\nIf you want to use lookups directly from the pipeline, you either need to accept\nthe risk of nondeterminism, or make sure you save and load the\n`cdk.context.json` file somewhere between synth runs. Finally, you should\ngive the synth CodeBuild execution role permissions to assume the bootstrapped\nlookup roles. As an example, doing so would look like this:\n\n```ts\nnew CodePipeline(this, 'Pipeline', {\n synth: new CodeBuildStep('Synth', {\n input: // ...input...\n commands: [\n // Commands to load cdk.context.json from somewhere here\n '...',\n 'npm ci',\n 'npm run build',\n 'npx cdk synth',\n // Commands to store cdk.context.json back here\n '...',\n ],\n rolePolicyStatements: [\n new iam.PolicyStatement({\n actions: ['sts:AssumeRole'],\n resources: ['*'],\n conditions: {\n StringEquals: {\n 'iam:ResourceTag/aws-cdk:bootstrap-role': 'lookup',\n },\n },\n }),\n ],\n }),\n});\n```\n\nThe above example requires that the target environments have all\nbeen bootstrapped with bootstrap stack version `8`, released with\nCDK CLI `1.114.0`.\n\n## Security Considerations\n\nIt's important to stay safe while employing Continuous Delivery. The CDK Pipelines\nlibrary comes with secure defaults to the best of our ability, but by its\nvery nature the library cannot take care of everything.\n\nWe therefore expect you to mind the following:\n\n* Maintain dependency hygiene and vet 3rd-party software you use. Any software you\n run on your build machine has the ability to change the infrastructure that gets\n deployed. Be careful with the software you depend on.\n\n* Use dependency locking to prevent accidental upgrades! The default `CdkSynths` that\n come with CDK Pipelines will expect `package-lock.json` and `yarn.lock` to\n ensure your dependencies are the ones you expect.\n\n* Credentials to production environments should be short-lived. After\n bootstrapping and the initial pipeline provisioning, there is no more need for\n developers to have access to any of the account credentials; all further\n changes can be deployed through git. Avoid the chances of credentials leaking\n by not having them in the first place!\n\n### Confirm permissions broadening\n\nTo keep tabs on the security impact of changes going out through your pipeline,\nyou can insert a security check before any stage deployment. This security check\nwill check if the upcoming deployment would add any new IAM permissions or\nsecurity group rules, and if so pause the pipeline and require you to confirm\nthe changes.\n\nThe security check will appear as two distinct actions in your pipeline: first\na CodeBuild project that runs `cdk diff` on the stage that's about to be deployed,\nfollowed by a Manual Approval action that pauses the pipeline. If it so happens\nthat there no new IAM permissions or security group rules will be added by the deployment,\nthe manual approval step is automatically satisfied. The pipeline will look like this:\n\n```txt\nPipeline\n├── ...\n├── MyApplicationStage\n│ ├── MyApplicationSecurityCheck // Security Diff Action\n│ ├── MyApplicationManualApproval // Manual Approval Action\n│ ├── Stack.Prepare\n│ └── Stack.Deploy\n└── ...\n```\n\nYou can insert the security check by using a `ConfirmPermissionsBroadening` step:\n\n```ts\nconst stage = new MyApplicationStage(this, 'MyApplication');\npipeline.addStage(stage, {\n pre: [\n new ConfirmPermissionsBroadening('Check', { stage }),\n ],\n});\n```\n\nTo get notified when there is a change that needs your manual approval,\ncreate an SNS Topic, subscribe your own email address, and pass it in as\nas the `notificationTopic` property:\n\n```ts\nimport { aws_sns as sns } from 'aws-cdk-lib';\nimport { aws_sns_subscriptions as subscriptions } from 'aws-cdk-lib';\nimport { pipelines as pipelines } from 'aws-cdk-lib';\n\nconst topic = new sns.Topic(this, 'SecurityChangesTopic');\ntopic.addSubscription(new subscriptions.EmailSubscription('test@email.com'));\n\nconst stage = new MyApplicationStage(this, 'MyApplication');\npipeline.addStage(stage, {\n pre: [\n new ConfirmPermissionsBroadening('Check', {\n stage,\n notificationTopic: topic,\n }),\n ],\n});\n```\n\n**Note**: Manual Approvals notifications only apply when an application has security\ncheck enabled.\n\n## Troubleshooting\n\nHere are some common errors you may encounter while using this library.\n\n### Pipeline: Internal Failure\n\nIf you see the following error during deployment of your pipeline:\n\n```plaintext\nCREATE_FAILED | AWS::CodePipeline::Pipeline | Pipeline/Pipeline\nInternal Failure\n```\n\nThere's something wrong with your GitHub access token. It might be missing, or not have the\nright permissions to access the repository you're trying to access.\n\n### Key: Policy contains a statement with one or more invalid principals\n\nIf you see the following error during deployment of your pipeline:\n\n```plaintext\nCREATE_FAILED | AWS::KMS::Key | Pipeline/Pipeline/ArtifactsBucketEncryptionKey\nPolicy contains a statement with one or more invalid principals.\n```\n\nOne of the target (account, region) environments has not been bootstrapped\nwith the new bootstrap stack. Check your target environments and make sure\nthey are all bootstrapped.\n\n### Message: no matching base directory path found for cdk.out\n\nIf you see this error during the **Synth** step, it means that CodeBuild\nis expecting to find a `cdk.out` directory in the root of your CodeBuild project,\nbut the directory wasn't there. There are two common causes for this:\n\n* `cdk synth` is not being executed: `cdk synth` used to be run\n implicitly for you, but you now have to explicitly include the command.\n For NPM-based projects, add `npx cdk synth` to the end of the `commands`\n property, for other languages add `npm install -g aws-cdk` and `cdk synth`.\n* Your CDK project lives in a subdirectory: you added a `cd <somedirectory>` command\n to the list of commands; don't forget to tell the `ScriptStep` about the\n different location of `cdk.out`, by passing `primaryOutputDirectory: '<somedirectory>/cdk.out'`.\n\n### <Stack> is in ROLLBACK_COMPLETE state and can not be updated\n\nIf you see the following error during execution of your pipeline:\n\n```plaintext\nStack ... is in ROLLBACK_COMPLETE state and can not be updated. (Service:\nAmazonCloudFormation; Status Code: 400; Error Code: ValidationError; Request\nID: ...)\n```\n\nThe stack failed its previous deployment, and is in a non-retryable state.\nGo into the CloudFormation console, delete the stack, and retry the deployment.\n\n### Cannot find module 'xxxx' or its corresponding type declarations\n\nYou may see this if you are using TypeScript or other NPM-based languages,\nwhen using NPM 7 on your workstation (where you generate `package-lock.json`)\nand NPM 6 on the CodeBuild image used for synthesizing.\n\nIt looks like NPM 7 has started writing less information to `package-lock.json`,\nleading NPM 6 reading that same file to not install all required packages anymore.\n\nMake sure you are using the same NPM version everywhere, either downgrade your\nworkstation's version or upgrade the CodeBuild version.\n\n### Cannot find module '.../check-node-version.js' (MODULE_NOT_FOUND)\n\nThe above error may be produced by `npx` when executing the CDK CLI, or any\nproject that uses the AWS SDK for JavaScript, without the target application\nhaving been installed yet. For example, it can be triggered by `npx cdk synth`\nif `aws-cdk` is not in your `package.json`.\n\nWork around this by either installing the target application using NPM *before*\nrunning `npx`, or set the environment variable `NPM_CONFIG_UNSAFE_PERM=true`.\n\n### Cannot connect to the Docker daemon at unix:///var/run/docker.sock\n\nIf, in the 'Synth' action (inside the 'Build' stage) of your pipeline, you get an error like this:\n\n```console\nstderr: docker: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?.\nSee 'docker run --help'.\n```\n\nIt means that the AWS CodeBuild project for 'Synth' is not configured to run in privileged mode,\nwhich prevents Docker builds from happening. This typically happens if you use a CDK construct\nthat bundles asset using tools run via Docker, like `aws-lambda-nodejs`, `aws-lambda-python`,\n`aws-lambda-go` and others.\n\nMake sure you set the `privileged` environment variable to `true` in the synth definition:\n\n```typescript\n const pipeline = new CdkPipeline(this, 'MyPipeline', {\n ...\n\n synthAction: SimpleSynthAction.standardNpmSynth({\n sourceArtifact: ...,\n cloudAssemblyArtifact: ...,\n\n environment: {\n privileged: true,\n },\n }),\n });\n```\n\nAfter turning on `privilegedMode: true`, you will need to do a one-time manual cdk deploy of your\npipeline to get it going again (as with a broken 'synth' the pipeline will not be able to self\nupdate to the right state).\n\n### S3 error: Access Denied\n\nAn \"S3 Access Denied\" error can have two causes:\n\n* Asset hashes have changed, but self-mutation has been disabled in the pipeline.\n* You have deleted and recreated the bootstrap stack, or changed its qualifier.\n\n#### Self-mutation step has been removed\n\nSome constructs, such as EKS clusters, generate nested stacks. When CloudFormation tries\nto deploy those stacks, it may fail with this error:\n\n```console\nS3 error: Access Denied For more information check http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html\n```\n\nThis happens because the pipeline is not self-mutating and, as a consequence, the `FileAssetX`\nbuild projects get out-of-sync with the generated templates. To fix this, make sure the\n`selfMutating` property is set to `true`:\n\n```typescript\nconst pipeline = new CdkPipeline(this, 'MyPipeline', {\n selfMutating: true,\n ...\n});\n```\n\n#### Bootstrap roles have been renamed or recreated\n\nWhile attempting to deploy an application stage, the \"Prepare\" or \"Deploy\" stage may fail with a cryptic error like:\n\n`Action execution failed\nAccess Denied (Service: Amazon S3; Status Code: 403; Error Code: AccessDenied; Request ID: 0123456ABCDEFGH;\nS3 Extended Request ID: 3hWcrVkhFGxfiMb/rTJO0Bk7Qn95x5ll4gyHiFsX6Pmk/NT+uX9+Z1moEcfkL7H3cjH7sWZfeD0=; Proxy: null)`\n\nThis generally indicates that the roles necessary to deploy have been deleted (or deleted and re-created);\nfor example, if the bootstrap stack has been deleted and re-created, this scenario will happen. Under the hood,\nthe resources that rely on these roles (e.g., `cdk-$qualifier-deploy-role-$account-$region`) point to different\ncanonical IDs than the recreated versions of these roles, which causes the errors. There are no simple solutions\nto this issue, and for that reason we **strongly recommend** that bootstrap stacks not be deleted and re-created\nonce created.\n\nThe most automated way to solve the issue is to introduce a secondary bootstrap stack. By changing the qualifier\nthat the pipeline stack looks for, a change will be detected and the impacted policies and resources will be updated.\nA hypothetical recovery workflow would look something like this:\n\n* First, for all impacted environments, create a secondary bootstrap stack:\n\n```sh\n$ env CDK_NEW_BOOTSTRAP=1 npx cdk bootstrap \\\n --qualifier randchars1234\n --toolkit-stack-name CDKToolkitTemp\n aws://111111111111/us-east-1\n```\n\n* Update all impacted stacks in the pipeline to use this new qualifier.\nSee https://docs.aws.amazon.com/cdk/latest/guide/bootstrapping.html for more info.\n\n```ts\nnew MyStack(this, 'MyStack', {\n // Update this qualifier to match the one used above.\n synthesizer: new DefaultStackSynthesizer({\n qualifier: 'randchars1234',\n }),\n});\n```\n\n* Deploy the updated stacks. This will update the stacks to use the roles created in the new bootstrap stack.\n* (Optional) Restore back to the original state:\n * Revert the change made in step #2 above\n * Re-deploy the pipeline to use the original qualifier.\n * Delete the temporary bootstrap stack(s)\n\n##### Manual Alternative\n\nAlternatively, the errors can be resolved by finding each impacted resource and policy, and correcting the policies\nby replacing the canonical IDs (e.g., `AROAYBRETNYCYV6ZF2R93`) with the appropriate ARNs. As an example, the KMS\nencryption key policy for the artifacts bucket may have a statement that looks like the following:\n\n```json\n{\n \"Effect\" : \"Allow\",\n \"Principal\" : {\n // \"AWS\" : \"AROAYBRETNYCYV6ZF2R93\" // Indicates this issue; replace this value\n \"AWS\": \"arn:aws:iam::0123456789012:role/cdk-hnb659fds-deploy-role-0123456789012-eu-west-1\", // Correct value\n },\n \"Action\" : [ \"kms:Decrypt\", \"kms:DescribeKey\" ],\n \"Resource\" : \"*\"\n}\n```\n\nAny resource or policy that references the qualifier (`hnb659fds` by default) will need to be updated.\n\n## Known Issues\n\nThere are some usability issues that are caused by underlying technology, and\ncannot be remedied by CDK at this point. They are reproduced here for completeness.\n\n* **Console links to other accounts will not work**: the AWS CodePipeline\n console will assume all links are relative to the current account. You will\n not be able to use the pipeline console to click through to a CloudFormation\n stack in a different account.\n* **If a change set failed to apply the pipeline must restarted**: if a change\n set failed to apply, it cannot be retried. The pipeline must be restarted from\n the top by clicking **Release Change**.\n* **A stack that failed to create must be deleted manually**: if a stack\n failed to create on the first attempt, you must delete it using the\n CloudFormation console before starting the pipeline again by clicking\n **Release Change**.\n"
|
|
3624
|
-
},
|
|
3625
2592
|
"targets": {
|
|
3626
2593
|
"dotnet": {
|
|
3627
2594
|
"namespace": "Amazon.CDK.Pipelines"
|
|
@@ -3635,13 +2602,6 @@
|
|
|
3635
2602
|
}
|
|
3636
2603
|
},
|
|
3637
2604
|
"aws-cdk-lib.region_info": {
|
|
3638
|
-
"locationInModule": {
|
|
3639
|
-
"filename": "lib/index.ts",
|
|
3640
|
-
"line": 199
|
|
3641
|
-
},
|
|
3642
|
-
"readme": {
|
|
3643
|
-
"markdown": "# AWS Region-Specific Information Directory\n<!--BEGIN STABILITY BANNER-->\n\n---\n\n\n\n---\n\n<!--END STABILITY BANNER-->\n\n## Usage\n\nSome information used in CDK Applications differs from one AWS region to\nanother, such as service principals used in IAM policies, S3 static website\nendpoints, ...\n\n### The `RegionInfo` class\n\nThe library offers a simple interface to obtain region specific information in\nthe form of the `RegionInfo` class. This is the preferred way to interact with\nthe regional information database:\n\n```ts\nimport { RegionInfo } from 'aws-cdk-lib/region-info';\n\n// Get the information for \"eu-west-1\":\nconst region = RegionInfo.get('eu-west-1');\n\n// Access attributes:\nregion.s3StaticWebsiteEndpoint; // s3-website-eu-west-1.amazonaws.com\nregion.servicePrincipal('logs.amazonaws.com'); // logs.eu-west-1.amazonaws.com\n```\n\nThe `RegionInfo` layer is built on top of the Low-Level API, which is described\nbelow and can be used to register additional data, including user-defined facts\nthat are not available through the `RegionInfo` interface.\n\n### Low-Level API\n\nThis library offers a primitive database of such information so that CDK\nconstructs can easily access regional information. The `FactName` class provides\na list of known fact names, which can then be used with the `RegionInfo` to\nretrieve a particular value:\n\n```ts\nimport { region_info as regionInfo } from 'aws-cdk-lib';\n\nconst codeDeployPrincipal = regionInfo.Fact.find('us-east-1', regionInfo.FactName.servicePrincipal('codedeploy.amazonaws.com'));\n// => codedeploy.us-east-1.amazonaws.com\n\nconst staticWebsite = regionInfo.Fact.find('ap-northeast-1', regionInfo.FactName.S3_STATIC_WEBSITE_ENDPOINT);\n// => s3-website-ap-northeast-1.amazonaws.com\n```\n\n## Supplying new or missing information\n\nAs new regions are released, it might happen that a particular fact you need is\nmissing from the library. In such cases, the `Fact.register` method can be used\nto inject FactName into the database:\n\n```ts\nregionInfo.Fact.register({\n region: 'bermuda-triangle-1',\n name: regionInfo.FactName.servicePrincipal('s3.amazonaws.com'),\n value: 's3-website.bermuda-triangle-1.nowhere.com',\n});\n```\n\n## Overriding incorrect information\n\nIn the event information provided by the library is incorrect, it can be\noverridden using the same `Fact.register` method demonstrated above, simply\nadding an extra boolean argument:\n\n```ts\nregionInfo.Fact.register({\n region: 'us-east-1',\n name: regionInfo.FactName.servicePrincipal('service.amazonaws.com'),\n value: 'the-correct-principal.amazonaws.com',\n}, true /* Allow overriding information */);\n```\n\nIf you happen to have stumbled upon incorrect data built into this library, it\nis always a good idea to report your findings in a [GitHub issue], so we can fix\nit for everyone else!\n\n[GitHub issue]: https://github.com/aws/aws-cdk/issues\n\n---\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n"
|
|
3644
|
-
},
|
|
3645
2605
|
"targets": {
|
|
3646
2606
|
"dotnet": {
|
|
3647
2607
|
"namespace": "Amazon.CDK.RegionInfo"
|
|
@@ -3709,7 +2669,7 @@
|
|
|
3709
2669
|
},
|
|
3710
2670
|
"description": "CDK Constructs for deploying AWS Events Rule that inveokes AWS Lambda",
|
|
3711
2671
|
"homepage": "https://github.com/awslabs/aws-solutions-constructs.git",
|
|
3712
|
-
"jsiiVersion": "1.
|
|
2672
|
+
"jsiiVersion": "1.46.0 (build cd08c55)",
|
|
3713
2673
|
"keywords": [
|
|
3714
2674
|
"aws",
|
|
3715
2675
|
"cdk",
|
|
@@ -3739,8 +2699,8 @@
|
|
|
3739
2699
|
"targets": {
|
|
3740
2700
|
"dotnet": {
|
|
3741
2701
|
"iconUrl": "https://raw.githubusercontent.com/aws/aws-cdk/master/logo/default-256-dark.png",
|
|
3742
|
-
"namespace": "Amazon.
|
|
3743
|
-
"packageId": "Amazon.
|
|
2702
|
+
"namespace": "Amazon.SolutionsConstructs.AWS.EventbridgeLambda",
|
|
2703
|
+
"packageId": "Amazon.SolutionsConstructs.AWS.EventbridgeLambda",
|
|
3744
2704
|
"signAssembly": true
|
|
3745
2705
|
},
|
|
3746
2706
|
"java": {
|
|
@@ -3952,6 +2912,6 @@
|
|
|
3952
2912
|
"symbolId": "lib/index:EventbridgeToLambdaProps"
|
|
3953
2913
|
}
|
|
3954
2914
|
},
|
|
3955
|
-
"version": "2.0.0
|
|
3956
|
-
"fingerprint": "
|
|
2915
|
+
"version": "2.0.0",
|
|
2916
|
+
"fingerprint": "GLrB47PdPo/9A0tMBOe1I9HJWs81kP9pBGOcwHsZjnE="
|
|
3957
2917
|
}
|