cdk-comprehend-s3olap 2.0.69 → 2.0.72

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/.jsii +5 -5
  2. package/lib/cdk-comprehend-s3olap.js +2 -2
  3. package/lib/comprehend-lambdas.js +2 -2
  4. package/lib/iam-roles.js +4 -4
  5. package/node_modules/aws-sdk/CHANGELOG.md +20 -1
  6. package/node_modules/aws-sdk/README.md +1 -1
  7. package/node_modules/aws-sdk/apis/appmesh-2019-01-25.min.json +220 -167
  8. package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.min.json +464 -31
  9. package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.paginators.json +5 -0
  10. package/node_modules/aws-sdk/apis/dynamodb-2012-08-10.min.json +227 -33
  11. package/node_modules/aws-sdk/apis/dynamodb-2012-08-10.paginators.json +5 -0
  12. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +601 -558
  13. package/node_modules/aws-sdk/apis/kendra-2019-02-03.min.json +105 -77
  14. package/node_modules/aws-sdk/apis/lakeformation-2017-03-31.min.json +98 -66
  15. package/node_modules/aws-sdk/apis/lambda-2015-03-31.min.json +135 -111
  16. package/node_modules/aws-sdk/apis/models.lex.v2-2020-08-07.min.json +405 -123
  17. package/node_modules/aws-sdk/apis/monitoring-2010-08-01.min.json +104 -22
  18. package/node_modules/aws-sdk/apis/monitoring-2010-08-01.paginators.json +11 -0
  19. package/node_modules/aws-sdk/apis/networkmanager-2019-07-05.min.json +52 -51
  20. package/node_modules/aws-sdk/apis/rds-2014-10-31.min.json +11 -5
  21. package/node_modules/aws-sdk/apis/rekognition-2016-06-27.examples.json +103 -0
  22. package/node_modules/aws-sdk/apis/rekognition-2016-06-27.min.json +252 -141
  23. package/node_modules/aws-sdk/apis/rekognition-2016-06-27.paginators.json +6 -0
  24. package/node_modules/aws-sdk/clients/appmesh.d.ts +70 -3
  25. package/node_modules/aws-sdk/clients/chimesdkmediapipelines.d.ts +472 -43
  26. package/node_modules/aws-sdk/clients/cloudwatch.d.ts +100 -2
  27. package/node_modules/aws-sdk/clients/cognitoidentityserviceprovider.d.ts +2 -2
  28. package/node_modules/aws-sdk/clients/dynamodb.d.ts +276 -2
  29. package/node_modules/aws-sdk/clients/ec2.d.ts +53 -0
  30. package/node_modules/aws-sdk/clients/kendra.d.ts +38 -6
  31. package/node_modules/aws-sdk/clients/lakeformation.d.ts +48 -2
  32. package/node_modules/aws-sdk/clients/lambda.d.ts +45 -17
  33. package/node_modules/aws-sdk/clients/lexmodelsv2.d.ts +290 -2
  34. package/node_modules/aws-sdk/clients/networkmanager.d.ts +5 -0
  35. package/node_modules/aws-sdk/clients/rds.d.ts +36 -12
  36. package/node_modules/aws-sdk/clients/rekognition.d.ts +170 -3
  37. package/node_modules/aws-sdk/clients/secretsmanager.d.ts +11 -11
  38. package/node_modules/aws-sdk/clients/servicecatalog.d.ts +57 -57
  39. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  40. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +17 -17
  41. package/node_modules/aws-sdk/dist/aws-sdk.js +1355 -873
  42. package/node_modules/aws-sdk/dist/aws-sdk.min.js +71 -71
  43. package/node_modules/aws-sdk/lib/core.js +1 -1
  44. package/node_modules/aws-sdk/lib/dynamodb/document_client.d.ts +252 -2
  45. package/node_modules/aws-sdk/package.json +1 -1
  46. package/node_modules/esbuild/install.js +4 -4
  47. package/node_modules/esbuild/lib/main.js +7 -7
  48. package/node_modules/esbuild/package.json +22 -22
  49. package/node_modules/esbuild-linux-64/bin/esbuild +0 -0
  50. package/node_modules/esbuild-linux-64/package.json +1 -1
  51. package/node_modules/object.assign/CHANGELOG.md +4 -0
  52. package/node_modules/object.assign/dist/browser.js +944 -0
  53. package/node_modules/object.assign/package.json +3 -2
  54. package/package.json +10 -10
@@ -504,7 +504,7 @@ declare namespace Kendra {
504
504
  export type AccessControlConfigurationSummaryList = AccessControlConfigurationSummary[];
505
505
  export interface AccessControlListConfiguration {
506
506
  /**
507
- * Path to the Amazon Web Services S3 bucket that contains the ACL files.
507
+ * Path to the Amazon S3 bucket that contains the ACL files.
508
508
  */
509
509
  KeyPath?: S3ObjectKey;
510
510
  }
@@ -1003,6 +1003,10 @@ declare namespace Kendra {
1003
1003
  * A list of regular expression patterns to exclude certain blog posts, pages, spaces, or attachments in your Confluence. Content that matches the patterns are excluded from the index. Content that doesn't match the patterns is included in the index. If content matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the content isn't included in the index.
1004
1004
  */
1005
1005
  ExclusionPatterns?: DataSourceInclusionsExclusionsStrings;
1006
+ /**
1007
+ * Configuration information to connect to your Confluence URL instance via a web proxy. You can use this option for Confluence Server. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Web proxy credentials are optional and you can use them to connect to a web proxy server that requires basic authentication of user name and password. To store web proxy credentials, you use a secret in Secrets Manager. It is recommended that you follow best security practices when configuring your web proxy. This includes setting up throttling, setting up logging and monitoring, and applying security patches on a regular basis. If you use your web proxy with multiple data sources, sync jobs that occur at the same time could strain the load on your proxy. It is recommended you prepare your proxy beforehand for any security and load requirements.
1008
+ */
1009
+ ProxyConfiguration?: ProxyConfiguration;
1006
1010
  }
1007
1011
  export interface ConfluencePageConfiguration {
1008
1012
  /**
@@ -1158,7 +1162,7 @@ declare namespace Kendra {
1158
1162
  }
1159
1163
  export interface CreateDataSourceRequest {
1160
1164
  /**
1161
- * A unique name for the data source connector. A data source name can't be changed without deleting and recreating the data source connector.
1165
+ * A name for the data source connector.
1162
1166
  */
1163
1167
  Name: DataSourceName;
1164
1168
  /**
@@ -1173,6 +1177,10 @@ declare namespace Kendra {
1173
1177
  * Configuration information to connect to your data source repository. You can't specify the Configuration parameter when the Type parameter is set to CUSTOM. If you do, you receive a ValidationException exception. The Configuration parameter is required for all other data sources.
1174
1178
  */
1175
1179
  Configuration?: DataSourceConfiguration;
1180
+ /**
1181
+ * Configuration information for an Amazon Virtual Private Cloud to connect to your data source. For more information, see Configuring a VPC.
1182
+ */
1183
+ VpcConfiguration?: DataSourceVpcConfiguration;
1176
1184
  /**
1177
1185
  * A description for the data source connector.
1178
1186
  */
@@ -1488,6 +1496,10 @@ declare namespace Kendra {
1488
1496
  * Provides the configuration information to connect to Alfresco as your data source.
1489
1497
  */
1490
1498
  AlfrescoConfiguration?: AlfrescoConfiguration;
1499
+ /**
1500
+ * Provides a template for the configuration information to connect to your data source.
1501
+ */
1502
+ TemplateConfiguration?: TemplateConfiguration;
1491
1503
  }
1492
1504
  export type DataSourceDateFieldFormat = string;
1493
1505
  export type DataSourceFieldName = string;
@@ -1623,7 +1635,7 @@ declare namespace Kendra {
1623
1635
  IndexFieldName: IndexFieldName;
1624
1636
  }
1625
1637
  export type DataSourceToIndexFieldMappingList = DataSourceToIndexFieldMapping[];
1626
- export type DataSourceType = "S3"|"SHAREPOINT"|"DATABASE"|"SALESFORCE"|"ONEDRIVE"|"SERVICENOW"|"CUSTOM"|"CONFLUENCE"|"GOOGLEDRIVE"|"WEBCRAWLER"|"WORKDOCS"|"FSX"|"SLACK"|"BOX"|"QUIP"|"JIRA"|"GITHUB"|"ALFRESCO"|string;
1638
+ export type DataSourceType = "S3"|"SHAREPOINT"|"DATABASE"|"SALESFORCE"|"ONEDRIVE"|"SERVICENOW"|"CUSTOM"|"CONFLUENCE"|"GOOGLEDRIVE"|"WEBCRAWLER"|"WORKDOCS"|"FSX"|"SLACK"|"BOX"|"QUIP"|"JIRA"|"GITHUB"|"ALFRESCO"|"TEMPLATE"|string;
1627
1639
  export interface DataSourceVpcConfiguration {
1628
1640
  /**
1629
1641
  * A list of identifiers for subnets within your Amazon VPC. The subnets should be able to connect to each other in the VPC, and they should have outgoing access to the Internet through a NAT device.
@@ -1801,7 +1813,7 @@ declare namespace Kendra {
1801
1813
  */
1802
1814
  IndexId?: IndexId;
1803
1815
  /**
1804
- * The name that you gave the data source when it was created.
1816
+ * The name for the data source.
1805
1817
  */
1806
1818
  Name?: DataSourceName;
1807
1819
  /**
@@ -1812,6 +1824,10 @@ declare namespace Kendra {
1812
1824
  * Configuration details for the data source. This shows how the data source is configured. The configuration options for a data source depend on the data source provider.
1813
1825
  */
1814
1826
  Configuration?: DataSourceConfiguration;
1827
+ /**
1828
+ * Configuration information for an Amazon Virtual Private Cloud to connect to your data source. For more information, see Configuring a VPC.
1829
+ */
1830
+ VpcConfiguration?: DataSourceVpcConfiguration;
1815
1831
  /**
1816
1832
  * The Unix timestamp of when the data source was created.
1817
1833
  */
@@ -3826,7 +3842,7 @@ declare namespace Kendra {
3826
3842
  */
3827
3843
  CrawlAttachments?: Boolean;
3828
3844
  /**
3829
- * The identifiers of the Quip folders you want to index.
3845
+ * The identifiers of the Quip folders you want to index. You can find in your browser URL when you access your folder in Quip. For example, https://quip-company.com/zlLuOVNSarTL/folder-name.
3830
3846
  */
3831
3847
  FolderIds?: FolderIdList;
3832
3848
  /**
@@ -4261,6 +4277,10 @@ declare namespace Kendra {
4261
4277
  * Whether you want to connect to SharePoint using basic authentication of user name and password, or OAuth authentication of user name, password, client ID, and client secret. You can use OAuth authentication for SharePoint Online.
4262
4278
  */
4263
4279
  AuthenticationType?: SharePointOnlineAuthenticationType;
4280
+ /**
4281
+ * Configuration information to connect to your Microsoft SharePoint site URLs via instance via a web proxy. You can use this option for SharePoint Server. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Web proxy credentials are optional and you can use them to connect to a web proxy server that requires basic authentication of user name and password. To store web proxy credentials, you use a secret in Secrets Manager. It is recommended that you follow best security practices when configuring your web proxy. This includes setting up throttling, setting up logging and monitoring, and applying security patches on a regular basis. If you use your web proxy with multiple data sources, sync jobs that occur at the same time could strain the load on your proxy. It is recommended you prepare your proxy beforehand for any security and load requirements.
4282
+ */
4283
+ ProxyConfiguration?: ProxyConfiguration;
4264
4284
  }
4265
4285
  export type SharePointOnlineAuthenticationType = "HTTP_BASIC"|"OAUTH2"|string;
4266
4286
  export type SharePointUrlList = Url[];
@@ -4509,6 +4529,14 @@ declare namespace Kendra {
4509
4529
  }
4510
4530
  export type TagValue = string;
4511
4531
  export type TeamId = string;
4532
+ export interface Template {
4533
+ }
4534
+ export interface TemplateConfiguration {
4535
+ /**
4536
+ * The template schema used for the data source. The following links to the template schema for data sources where templates are supported: Zendesk template schema
4537
+ */
4538
+ Template?: Template;
4539
+ }
4512
4540
  export type TenantDomain = string;
4513
4541
  export interface TextDocumentStatistics {
4514
4542
  /**
@@ -4617,7 +4645,7 @@ declare namespace Kendra {
4617
4645
  */
4618
4646
  Id: DataSourceId;
4619
4647
  /**
4620
- * A new name for the data source connector. You must first delete the data source and re-create it to change the name of the data source.
4648
+ * A new name for the data source connector.
4621
4649
  */
4622
4650
  Name?: DataSourceName;
4623
4651
  /**
@@ -4628,6 +4656,10 @@ declare namespace Kendra {
4628
4656
  * Configuration information you want to update for the data source connector.
4629
4657
  */
4630
4658
  Configuration?: DataSourceConfiguration;
4659
+ /**
4660
+ * Configuration information for an Amazon Virtual Private Cloud to connect to your data source. For more information, see Configuring a VPC.
4661
+ */
4662
+ VpcConfiguration?: DataSourceVpcConfiguration;
4631
4663
  /**
4632
4664
  * A new description for the data source connector.
4633
4665
  */
@@ -20,6 +20,14 @@ declare class LakeFormation extends Service {
20
20
  * Attaches one or more LF-tags to an existing resource.
21
21
  */
22
22
  addLFTagsToResource(callback?: (err: AWSError, data: LakeFormation.Types.AddLFTagsToResourceResponse) => void): Request<LakeFormation.Types.AddLFTagsToResourceResponse, AWSError>;
23
+ /**
24
+ * Allows a caller to assume an IAM role decorated as the SAML user specified in the SAML assertion included in the request. This decoration allows Lake Formation to enforce access policies against the SAML users and groups. This API operation requires SAML federation setup in the caller’s account as it can only be called with valid SAML assertions. Lake Formation does not scope down the permission of the assumed role. All permissions attached to the role via the SAML federation setup will be included in the role session. This decorated role is expected to access data in Amazon S3 by getting temporary access from Lake Formation which is authorized via the virtual API GetDataAccess. Therefore, all SAML roles that can be assumed via AssumeDecoratedRoleWithSAML must at a minimum include lakeformation:GetDataAccess in their role policies. A typical IAM policy attached to such a role would look as follows:
25
+ */
26
+ assumeDecoratedRoleWithSAML(params: LakeFormation.Types.AssumeDecoratedRoleWithSAMLRequest, callback?: (err: AWSError, data: LakeFormation.Types.AssumeDecoratedRoleWithSAMLResponse) => void): Request<LakeFormation.Types.AssumeDecoratedRoleWithSAMLResponse, AWSError>;
27
+ /**
28
+ * Allows a caller to assume an IAM role decorated as the SAML user specified in the SAML assertion included in the request. This decoration allows Lake Formation to enforce access policies against the SAML users and groups. This API operation requires SAML federation setup in the caller’s account as it can only be called with valid SAML assertions. Lake Formation does not scope down the permission of the assumed role. All permissions attached to the role via the SAML federation setup will be included in the role session. This decorated role is expected to access data in Amazon S3 by getting temporary access from Lake Formation which is authorized via the virtual API GetDataAccess. Therefore, all SAML roles that can be assumed via AssumeDecoratedRoleWithSAML must at a minimum include lakeformation:GetDataAccess in their role policies. A typical IAM policy attached to such a role would look as follows:
29
+ */
30
+ assumeDecoratedRoleWithSAML(callback?: (err: AWSError, data: LakeFormation.Types.AssumeDecoratedRoleWithSAMLResponse) => void): Request<LakeFormation.Types.AssumeDecoratedRoleWithSAMLResponse, AWSError>;
23
31
  /**
24
32
  * Batch operation to grant permissions to the principal.
25
33
  */
@@ -77,11 +85,11 @@ declare class LakeFormation extends Service {
77
85
  */
78
86
  deleteDataCellsFilter(callback?: (err: AWSError, data: LakeFormation.Types.DeleteDataCellsFilterResponse) => void): Request<LakeFormation.Types.DeleteDataCellsFilterResponse, AWSError>;
79
87
  /**
80
- * Deletes the specified LF-tag key name. If the attribute key does not exist or the LF-tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message "Delete not allowed" as the LF-tag key is still attached with resources. You can consider untagging resources with this LF-tag key.
88
+ * Deletes the specified LF-tag given a key name. If the input parameter tag key was not found, then the operation will throw an exception. When you delete an LF-tag, the LFTagPolicy attached to the LF-tag becomes invalid. If the deleted LF-tag was still assigned to any resource, the tag policy attach to the deleted LF-tag will no longer be applied to the resource.
81
89
  */
82
90
  deleteLFTag(params: LakeFormation.Types.DeleteLFTagRequest, callback?: (err: AWSError, data: LakeFormation.Types.DeleteLFTagResponse) => void): Request<LakeFormation.Types.DeleteLFTagResponse, AWSError>;
83
91
  /**
84
- * Deletes the specified LF-tag key name. If the attribute key does not exist or the LF-tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message "Delete not allowed" as the LF-tag key is still attached with resources. You can consider untagging resources with this LF-tag key.
92
+ * Deletes the specified LF-tag given a key name. If the input parameter tag key was not found, then the operation will throw an exception. When you delete an LF-tag, the LFTagPolicy attached to the LF-tag becomes invalid. If the deleted LF-tag was still assigned to any resource, the tag policy attach to the deleted LF-tag will no longer be applied to the resource.
85
93
  */
86
94
  deleteLFTag(callback?: (err: AWSError, data: LakeFormation.Types.DeleteLFTagResponse) => void): Request<LakeFormation.Types.DeleteLFTagResponse, AWSError>;
87
95
  /**
@@ -407,6 +415,42 @@ declare namespace LakeFormation {
407
415
  }
408
416
  export interface AllRowsWildcard {
409
417
  }
418
+ export interface AssumeDecoratedRoleWithSAMLRequest {
419
+ /**
420
+ * A SAML assertion consisting of an assertion statement for the user who needs temporary credentials. This must match the SAML assertion that was issued to IAM. This must be Base64 encoded.
421
+ */
422
+ SAMLAssertion: SAMLAssertionString;
423
+ /**
424
+ * The role that represents an IAM principal whose scope down policy allows it to call credential vending APIs such as GetTemporaryTableCredentials. The caller must also have iam:PassRole permission on this role.
425
+ */
426
+ RoleArn: IAMRoleArn;
427
+ /**
428
+ * The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the IdP.
429
+ */
430
+ PrincipalArn: IAMSAMLProviderArn;
431
+ /**
432
+ * The time period, between 900 and 43,200 seconds, for the timeout of the temporary credentials.
433
+ */
434
+ DurationSeconds?: CredentialTimeoutDurationSecondInteger;
435
+ }
436
+ export interface AssumeDecoratedRoleWithSAMLResponse {
437
+ /**
438
+ * The access key ID for the temporary credentials. (The access key consists of an access key ID and a secret key).
439
+ */
440
+ AccessKeyId?: AccessKeyIdString;
441
+ /**
442
+ * The secret key for the temporary credentials. (The access key consists of an access key ID and a secret key).
443
+ */
444
+ SecretAccessKey?: SecretAccessKeyString;
445
+ /**
446
+ * The session token for the temporary credentials.
447
+ */
448
+ SessionToken?: SessionTokenString;
449
+ /**
450
+ * The date and time when the temporary credentials expire.
451
+ */
452
+ Expiration?: ExpirationTimestamp;
453
+ }
410
454
  export interface AuditContext {
411
455
  /**
412
456
  * The filter engine can populate the 'AdditionalAuditContext' information with the request ID for you to track. This information will be displayed in CloudTrail log in your account.
@@ -1154,6 +1198,7 @@ declare namespace LakeFormation {
1154
1198
  export interface GrantPermissionsResponse {
1155
1199
  }
1156
1200
  export type IAMRoleArn = string;
1201
+ export type IAMSAMLProviderArn = string;
1157
1202
  export type Identifier = string;
1158
1203
  export type Integer = number;
1159
1204
  export interface LFTag {
@@ -1652,6 +1697,7 @@ declare namespace LakeFormation {
1652
1697
  */
1653
1698
  AllRowsWildcard?: AllRowsWildcard;
1654
1699
  }
1700
+ export type SAMLAssertionString = string;
1655
1701
  export interface SearchDatabasesByLFTagsRequest {
1656
1702
  /**
1657
1703
  * A continuation token, if this is not the first call to retrieve this list.
@@ -46,11 +46,11 @@ declare class Lambda extends Service {
46
46
  */
47
47
  createCodeSigningConfig(callback?: (err: AWSError, data: Lambda.Types.CreateCodeSigningConfigResponse) => void): Request<Lambda.Types.CreateCodeSigningConfigResponse, AWSError>;
48
48
  /**
49
- * Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and triggers the function. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are only available for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
49
+ * Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are available only for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
50
50
  */
51
51
  createEventSourceMapping(params: Lambda.Types.CreateEventSourceMappingRequest, callback?: (err: AWSError, data: Lambda.Types.EventSourceMappingConfiguration) => void): Request<Lambda.Types.EventSourceMappingConfiguration, AWSError>;
52
52
  /**
53
- * Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and triggers the function. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are only available for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
53
+ * Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are available only for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
54
54
  */
55
55
  createEventSourceMapping(callback?: (err: AWSError, data: Lambda.Types.EventSourceMappingConfiguration) => void): Request<Lambda.Types.EventSourceMappingConfiguration, AWSError>;
56
56
  /**
@@ -270,11 +270,11 @@ declare class Lambda extends Service {
270
270
  */
271
271
  getProvisionedConcurrencyConfig(callback?: (err: AWSError, data: Lambda.Types.GetProvisionedConcurrencyConfigResponse) => void): Request<Lambda.Types.GetProvisionedConcurrencyConfigResponse, AWSError>;
272
272
  /**
273
- * Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event. For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace. When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Retry Behavior. For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue. The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, limit errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if executing the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded). For functions with a long timeout, your client might be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings. This operation requires permission for the lambda:InvokeFunction action.
273
+ * Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event. For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace. When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Retry Behavior. For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue. The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, limit errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if executing the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded). For functions with a long timeout, your client might be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings. This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.
274
274
  */
275
275
  invoke(params: Lambda.Types.InvocationRequest, callback?: (err: AWSError, data: Lambda.Types.InvocationResponse) => void): Request<Lambda.Types.InvocationResponse, AWSError>;
276
276
  /**
277
- * Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event. For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace. When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Retry Behavior. For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue. The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, limit errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if executing the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded). For functions with a long timeout, your client might be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings. This operation requires permission for the lambda:InvokeFunction action.
277
+ * Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event. For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace. When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Retry Behavior. For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue. The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, limit errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if executing the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded). For functions with a long timeout, your client might be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings. This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.
278
278
  */
279
279
  invoke(callback?: (err: AWSError, data: Lambda.Types.InvocationResponse) => void): Request<Lambda.Types.InvocationResponse, AWSError>;
280
280
  /**
@@ -302,11 +302,11 @@ declare class Lambda extends Service {
302
302
  */
303
303
  listCodeSigningConfigs(callback?: (err: AWSError, data: Lambda.Types.ListCodeSigningConfigsResponse) => void): Request<Lambda.Types.ListCodeSigningConfigsResponse, AWSError>;
304
304
  /**
305
- * Lists event source mappings. Specify an EventSourceArn to only show event source mappings for a single event source.
305
+ * Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a single event source.
306
306
  */
307
307
  listEventSourceMappings(params: Lambda.Types.ListEventSourceMappingsRequest, callback?: (err: AWSError, data: Lambda.Types.ListEventSourceMappingsResponse) => void): Request<Lambda.Types.ListEventSourceMappingsResponse, AWSError>;
308
308
  /**
309
- * Lists event source mappings. Specify an EventSourceArn to only show event source mappings for a single event source.
309
+ * Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a single event source.
310
310
  */
311
311
  listEventSourceMappings(callback?: (err: AWSError, data: Lambda.Types.ListEventSourceMappingsResponse) => void): Request<Lambda.Types.ListEventSourceMappingsResponse, AWSError>;
312
312
  /**
@@ -478,11 +478,11 @@ declare class Lambda extends Service {
478
478
  */
479
479
  updateCodeSigningConfig(callback?: (err: AWSError, data: Lambda.Types.UpdateCodeSigningConfigResponse) => void): Request<Lambda.Types.UpdateCodeSigningConfigResponse, AWSError>;
480
480
  /**
481
- * Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are only available for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
481
+ * Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are available only for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
482
482
  */
483
483
  updateEventSourceMapping(params: Lambda.Types.UpdateEventSourceMappingRequest, callback?: (err: AWSError, data: Lambda.Types.EventSourceMappingConfiguration) => void): Request<Lambda.Types.EventSourceMappingConfiguration, AWSError>;
484
484
  /**
485
- * Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are only available for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
485
+ * Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location. For details about how to configure different event sources, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka The following error handling options are available only for stream sources (DynamoDB and Kinesis): BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry. DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic. MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires. ParallelizationFactor - Process multiple batches from each shard concurrently. For information about which configuration parameters apply to each event source, see the following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQS Amazon MQ and RabbitMQ Amazon MSK Apache Kafka
486
486
  */
487
487
  updateEventSourceMapping(callback?: (err: AWSError, data: Lambda.Types.EventSourceMappingConfiguration) => void): Request<Lambda.Types.EventSourceMappingConfiguration, AWSError>;
488
488
  /**
@@ -729,6 +729,12 @@ declare namespace Lambda {
729
729
  */
730
730
  SigningProfileVersionArns: SigningProfileVersionArns;
731
731
  }
732
+ export interface AmazonManagedKafkaEventSourceConfig {
733
+ /**
734
+ * The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see services-msk-consumer-group-id.
735
+ */
736
+ ConsumerGroupId?: URI;
737
+ }
732
738
  export type Architecture = "x86_64"|"arm64"|string;
733
739
  export type ArchitecturesList = Architecture[];
734
740
  export type Arn = string;
@@ -863,7 +869,7 @@ declare namespace Lambda {
863
869
  */
864
870
  Enabled?: Enabled;
865
871
  /**
866
- * The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB). Amazon Kinesis - Default 100. Max 10,000. Amazon DynamoDB Streams - Default 100. Max 10,000. Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10. Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000. Self-Managed Apache Kafka - Default 100. Max 10,000. Amazon MQ (ActiveMQ and RabbitMQ) - Default 100. Max 10,000.
872
+ * The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB). Amazon Kinesis - Default 100. Max 10,000. Amazon DynamoDB Streams - Default 100. Max 10,000. Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10. Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000. Self-managed Apache Kafka - Default 100. Max 10,000. Amazon MQ (ActiveMQ and RabbitMQ) - Default 100. Max 10,000.
867
873
  */
868
874
  BatchSize?: BatchSize;
869
875
  /**
@@ -879,7 +885,7 @@ declare namespace Lambda {
879
885
  */
880
886
  ParallelizationFactor?: ParallelizationFactor;
881
887
  /**
882
- * The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.
888
+ * The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams.
883
889
  */
884
890
  StartingPosition?: EventSourcePosition;
885
891
  /**
@@ -899,11 +905,11 @@ declare namespace Lambda {
899
905
  */
900
906
  BisectBatchOnFunctionError?: BisectBatchOnFunctionError;
901
907
  /**
902
- * (Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records will be retried until the record expires.
908
+ * (Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
903
909
  */
904
910
  MaximumRetryAttempts?: MaximumRetryAttemptsEventSourceMapping;
905
911
  /**
906
- * (Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.
912
+ * (Streams only) The duration in seconds of a processing window. The range is between 1 second and 900 seconds.
907
913
  */
908
914
  TumblingWindowInSeconds?: TumblingWindowInSeconds;
909
915
  /**
@@ -919,13 +925,21 @@ declare namespace Lambda {
919
925
  */
920
926
  SourceAccessConfigurations?: SourceAccessConfigurations;
921
927
  /**
922
- * The Self-Managed Apache Kafka cluster to send records.
928
+ * The self-managed Apache Kafka cluster to receive records from.
923
929
  */
924
930
  SelfManagedEventSource?: SelfManagedEventSource;
925
931
  /**
926
932
  * (Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.
927
933
  */
928
934
  FunctionResponseTypes?: FunctionResponseTypeList;
935
+ /**
936
+ * Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
937
+ */
938
+ AmazonManagedKafkaEventSourceConfig?: AmazonManagedKafkaEventSourceConfig;
939
+ /**
940
+ * Specific configuration settings for a self-managed Apache Kafka event source.
941
+ */
942
+ SelfManagedKafkaEventSourceConfig?: SelfManagedKafkaEventSourceConfig;
929
943
  }
930
944
  export interface CreateFunctionRequest {
931
945
  /**
@@ -1296,6 +1310,14 @@ declare namespace Lambda {
1296
1310
  * (Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.
1297
1311
  */
1298
1312
  FunctionResponseTypes?: FunctionResponseTypeList;
1313
+ /**
1314
+ * Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
1315
+ */
1316
+ AmazonManagedKafkaEventSourceConfig?: AmazonManagedKafkaEventSourceConfig;
1317
+ /**
1318
+ * Specific configuration settings for a self-managed Apache Kafka event source.
1319
+ */
1320
+ SelfManagedKafkaEventSourceConfig?: SelfManagedKafkaEventSourceConfig;
1299
1321
  }
1300
1322
  export type EventSourceMappingsList = EventSourceMappingConfiguration[];
1301
1323
  export type EventSourcePosition = "TRIM_HORIZON"|"LATEST"|"AT_TIMESTAMP"|string;
@@ -2659,11 +2681,17 @@ declare namespace Lambda {
2659
2681
  */
2660
2682
  Endpoints?: Endpoints;
2661
2683
  }
2684
+ export interface SelfManagedKafkaEventSourceConfig {
2685
+ /**
2686
+ * The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see services-msk-consumer-group-id.
2687
+ */
2688
+ ConsumerGroupId?: URI;
2689
+ }
2662
2690
  export type SensitiveString = string;
2663
2691
  export type SigningProfileVersionArns = Arn[];
2664
2692
  export interface SourceAccessConfiguration {
2665
2693
  /**
2666
- * The type of authentication protocol, VPC components, or virtual host for your event source. For example: "Type":"SASL_SCRAM_512_AUTH". BASIC_AUTH - (Amazon MQ) The Secrets Manager secret that stores your broker credentials. BASIC_AUTH - (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers. VPC_SUBNET - The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster. VPC_SECURITY_GROUP - The VPC security group used to manage access to your self-managed Apache Kafka brokers. SASL_SCRAM_256_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers. SASL_SCRAM_512_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers. VIRTUAL_HOST - (Amazon MQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source. This property cannot be specified in an UpdateEventSourceMapping API call. CLIENT_CERTIFICATE_TLS_AUTH - (Amazon MSK, Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your MSK/Apache Kafka brokers. SERVER_ROOT_CA_CERTIFICATE - (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your Apache Kafka brokers.
2694
+ * The type of authentication protocol, VPC components, or virtual host for your event source. For example: "Type":"SASL_SCRAM_512_AUTH". BASIC_AUTH - (Amazon MQ) The Secrets Manager secret that stores your broker credentials. BASIC_AUTH - (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers. VPC_SUBNET - The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster. VPC_SECURITY_GROUP - The VPC security group used to manage access to your self-managed Apache Kafka brokers. SASL_SCRAM_256_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers. SASL_SCRAM_512_AUTH - The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers. VIRTUAL_HOST - (Amazon MQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source. This property cannot be specified in an UpdateEventSourceMapping API call. CLIENT_CERTIFICATE_TLS_AUTH - (Amazon MSK, self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your MSK/Apache Kafka brokers. SERVER_ROOT_CA_CERTIFICATE - (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your Apache Kafka brokers.
2667
2695
  */
2668
2696
  Type?: SourceAccessType;
2669
2697
  /**
@@ -2790,7 +2818,7 @@ declare namespace Lambda {
2790
2818
  */
2791
2819
  Enabled?: Enabled;
2792
2820
  /**
2793
- * The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB). Amazon Kinesis - Default 100. Max 10,000. Amazon DynamoDB Streams - Default 100. Max 10,000. Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10. Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000. Self-Managed Apache Kafka - Default 100. Max 10,000. Amazon MQ (ActiveMQ and RabbitMQ) - Default 100. Max 10,000.
2821
+ * The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB). Amazon Kinesis - Default 100. Max 10,000. Amazon DynamoDB Streams - Default 100. Max 10,000. Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10. Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000. Self-managed Apache Kafka - Default 100. Max 10,000. Amazon MQ (ActiveMQ and RabbitMQ) - Default 100. Max 10,000.
2794
2822
  */
2795
2823
  BatchSize?: BatchSize;
2796
2824
  /**
@@ -2814,7 +2842,7 @@ declare namespace Lambda {
2814
2842
  */
2815
2843
  BisectBatchOnFunctionError?: BisectBatchOnFunctionError;
2816
2844
  /**
2817
- * (Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records will be retried until the record expires.
2845
+ * (Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
2818
2846
  */
2819
2847
  MaximumRetryAttempts?: MaximumRetryAttemptsEventSourceMapping;
2820
2848
  /**
@@ -2826,7 +2854,7 @@ declare namespace Lambda {
2826
2854
  */
2827
2855
  SourceAccessConfigurations?: SourceAccessConfigurations;
2828
2856
  /**
2829
- * (Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.
2857
+ * (Streams only) The duration in seconds of a processing window. The range is between 1 second and 900 seconds.
2830
2858
  */
2831
2859
  TumblingWindowInSeconds?: TumblingWindowInSeconds;
2832
2860
  /**