cdk-lambda-subminute 2.0.249 → 2.0.251

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.jsii +16 -3
  2. package/lib/cdk-lambda-subminute.js +3 -3
  3. package/node_modules/aws-sdk/CHANGELOG.md +16 -1
  4. package/node_modules/aws-sdk/README.md +1 -1
  5. package/node_modules/aws-sdk/apis/appintegrations-2020-07-29.examples.json +83 -0
  6. package/node_modules/aws-sdk/apis/appintegrations-2020-07-29.min.json +241 -32
  7. package/node_modules/aws-sdk/apis/appintegrations-2020-07-29.paginators.json +31 -1
  8. package/node_modules/aws-sdk/apis/apprunner-2020-05-15.min.json +50 -49
  9. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.examples.json +0 -77
  10. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.min.json +8 -2
  11. package/node_modules/aws-sdk/apis/connect-2017-08-08.min.json +298 -238
  12. package/node_modules/aws-sdk/apis/connect-2017-08-08.paginators.json +6 -0
  13. package/node_modules/aws-sdk/apis/dynamodb-2012-08-10.min.json +81 -60
  14. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +7 -0
  15. package/node_modules/aws-sdk/apis/firehose-2015-08-04.min.json +69 -30
  16. package/node_modules/aws-sdk/apis/iot-2015-05-28.min.json +142 -128
  17. package/node_modules/aws-sdk/apis/lakeformation-2017-03-31.min.json +141 -36
  18. package/node_modules/aws-sdk/apis/lakeformation-2017-03-31.paginators.json +5 -0
  19. package/node_modules/aws-sdk/apis/s3-2006-03-01.examples.json +126 -126
  20. package/node_modules/aws-sdk/clients/appintegrations.d.ts +255 -1
  21. package/node_modules/aws-sdk/clients/apprunner.d.ts +6 -1
  22. package/node_modules/aws-sdk/clients/codedeploy.d.ts +34 -34
  23. package/node_modules/aws-sdk/clients/cognitoidentityserviceprovider.d.ts +1 -1
  24. package/node_modules/aws-sdk/clients/connect.d.ts +64 -6
  25. package/node_modules/aws-sdk/clients/dynamodb.d.ts +40 -2
  26. package/node_modules/aws-sdk/clients/ec2.d.ts +11 -3
  27. package/node_modules/aws-sdk/clients/firehose.d.ts +59 -9
  28. package/node_modules/aws-sdk/clients/iot.d.ts +46 -29
  29. package/node_modules/aws-sdk/clients/lakeformation.d.ts +95 -1
  30. package/node_modules/aws-sdk/clients/pinpoint.d.ts +2 -2
  31. package/node_modules/aws-sdk/clients/s3.d.ts +1 -1
  32. package/node_modules/aws-sdk/clients/textract.d.ts +5 -5
  33. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  34. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +13 -13
  35. package/node_modules/aws-sdk/dist/aws-sdk.js +614 -461
  36. package/node_modules/aws-sdk/dist/aws-sdk.min.js +84 -84
  37. package/node_modules/aws-sdk/lib/core.js +1 -1
  38. package/node_modules/aws-sdk/lib/dynamodb/document_client.d.ts +38 -0
  39. package/node_modules/aws-sdk/package.json +1 -1
  40. package/node_modules/xml2js/node_modules/sax/LICENSE +2 -2
  41. package/node_modules/xml2js/node_modules/sax/lib/sax.js +13 -4
  42. package/node_modules/xml2js/node_modules/sax/package.json +8 -4
  43. package/package.json +3 -3
@@ -1027,6 +1027,14 @@ declare class Connect extends Service {
1027
1027
  * This API is in preview release for Amazon Connect and is subject to change. Returns a paginated list of all security keys associated with the instance.
1028
1028
  */
1029
1029
  listSecurityKeys(callback?: (err: AWSError, data: Connect.Types.ListSecurityKeysResponse) => void): Request<Connect.Types.ListSecurityKeysResponse, AWSError>;
1030
+ /**
1031
+ * Returns a list of third party applications in a specific security profile.
1032
+ */
1033
+ listSecurityProfileApplications(params: Connect.Types.ListSecurityProfileApplicationsRequest, callback?: (err: AWSError, data: Connect.Types.ListSecurityProfileApplicationsResponse) => void): Request<Connect.Types.ListSecurityProfileApplicationsResponse, AWSError>;
1034
+ /**
1035
+ * Returns a list of third party applications in a specific security profile.
1036
+ */
1037
+ listSecurityProfileApplications(callback?: (err: AWSError, data: Connect.Types.ListSecurityProfileApplicationsResponse) => void): Request<Connect.Types.ListSecurityProfileApplicationsResponse, AWSError>;
1030
1038
  /**
1031
1039
  * This API is in preview release for Amazon Connect and is subject to change. Lists the permissions granted to a security profile.
1032
1040
  */
@@ -1860,6 +1868,18 @@ declare namespace Connect {
1860
1868
  */
1861
1869
  AwaitAnswerMachinePrompt?: Boolean;
1862
1870
  }
1871
+ export interface Application {
1872
+ /**
1873
+ * Namespace of the application that you want to give access to.
1874
+ */
1875
+ Namespace?: Namespace;
1876
+ /**
1877
+ * The permissions that the agent is granted on the application. Only the ACCESS permission is supported.
1878
+ */
1879
+ ApplicationPermissions?: ApplicationPermissions;
1880
+ }
1881
+ export type ApplicationPermissions = Permission[];
1882
+ export type Applications = Application[];
1863
1883
  export type ApproximateTotalCount = number;
1864
1884
  export interface AssignContactCategoryActionDefinition {
1865
1885
  }
@@ -2750,7 +2770,7 @@ declare namespace Connect {
2750
2770
  */
2751
2771
  InstanceId: InstanceId;
2752
2772
  /**
2753
- * The name of the quick connect.
2773
+ * A unique name of the quick connect.
2754
2774
  */
2755
2775
  Name: QuickConnectName;
2756
2776
  /**
@@ -2890,6 +2910,10 @@ declare namespace Connect {
2890
2910
  * The list of resources that a security profile applies tag restrictions to in Amazon Connect. Following are acceptable ResourceNames: User | SecurityProfile | Queue | RoutingProfile
2891
2911
  */
2892
2912
  TagRestrictedResources?: TagRestrictedResourceList;
2913
+ /**
2914
+ * This API is in preview release for Amazon Connect and is subject to change. A list of third party applications that the security profile will give access to.
2915
+ */
2916
+ Applications?: Applications;
2893
2917
  }
2894
2918
  export interface CreateSecurityProfileResponse {
2895
2919
  /**
@@ -5472,7 +5496,7 @@ declare namespace Connect {
5472
5496
  SourceType?: SourceType;
5473
5497
  }
5474
5498
  export type IntegrationAssociationSummaryList = IntegrationAssociationSummary[];
5475
- export type IntegrationType = "EVENT"|"VOICE_ID"|"PINPOINT_APP"|"WISDOM_ASSISTANT"|"WISDOM_KNOWLEDGE_BASE"|"CASES_DOMAIN"|string;
5499
+ export type IntegrationType = "EVENT"|"VOICE_ID"|"PINPOINT_APP"|"WISDOM_ASSISTANT"|"WISDOM_KNOWLEDGE_BASE"|"CASES_DOMAIN"|"APPLICATION"|string;
5476
5500
  export interface InvisibleFieldInfo {
5477
5501
  /**
5478
5502
  * Identifier of the invisible field.
@@ -5981,7 +6005,7 @@ declare namespace Connect {
5981
6005
  */
5982
6006
  InstanceId: InstanceId;
5983
6007
  /**
5984
- * The type of phone number.
6008
+ * The type of phone number. We recommend using ListPhoneNumbersV2 to return phone number types. While ListPhoneNumbers returns number types UIFN, SHARED, THIRD_PARTY_TF, and THIRD_PARTY_DID, it incorrectly lists them as TOLL_FREE or DID.
5985
6009
  */
5986
6010
  PhoneNumberTypes?: PhoneNumberTypes;
5987
6011
  /**
@@ -6286,6 +6310,34 @@ declare namespace Connect {
6286
6310
  */
6287
6311
  NextToken?: NextToken;
6288
6312
  }
6313
+ export interface ListSecurityProfileApplicationsRequest {
6314
+ /**
6315
+ * The security profile identifier.
6316
+ */
6317
+ SecurityProfileId: SecurityProfileId;
6318
+ /**
6319
+ * The instance identifier.
6320
+ */
6321
+ InstanceId: InstanceId;
6322
+ /**
6323
+ * The token for the next set of results. The next set of results can be retrieved by using the token value returned in the previous response when making the next request.
6324
+ */
6325
+ NextToken?: NextToken;
6326
+ /**
6327
+ * The maximum number of results to return per page.
6328
+ */
6329
+ MaxResults?: MaxResult1000;
6330
+ }
6331
+ export interface ListSecurityProfileApplicationsResponse {
6332
+ /**
6333
+ * A list of the third party application's metadata.
6334
+ */
6335
+ Applications?: Applications;
6336
+ /**
6337
+ * The token for the next set of results. The next set of results can be retrieved by using the token value returned in the previous response when making the next request.
6338
+ */
6339
+ NextToken?: NextToken;
6340
+ }
6289
6341
  export interface ListSecurityProfilePermissionsRequest {
6290
6342
  /**
6291
6343
  * The identifier for the security profle.
@@ -6672,6 +6724,7 @@ declare namespace Connect {
6672
6724
  }
6673
6725
  export type Name = string;
6674
6726
  export type Name128 = string;
6727
+ export type Namespace = string;
6675
6728
  export type NextToken = string;
6676
6729
  export type NextToken2500 = string;
6677
6730
  export type NotificationContentType = "PLAIN_TEXT"|string;
@@ -6781,6 +6834,7 @@ declare namespace Connect {
6781
6834
  }
6782
6835
  export type Password = string;
6783
6836
  export type Percentage = number;
6837
+ export type Permission = string;
6784
6838
  export type PermissionsList = SecurityProfilePermission[];
6785
6839
  export interface PersistentChat {
6786
6840
  /**
@@ -7450,15 +7504,15 @@ declare namespace Connect {
7450
7504
  */
7451
7505
  TaskAction?: TaskActionDefinition;
7452
7506
  /**
7453
- * Information about the EventBridge action.
7507
+ * Information about the EventBridge action. Supported only for TriggerEventSource values: OnPostCallAnalysisAvailable | OnRealTimeCallAnalysisAvailable | OnPostChatAnalysisAvailable | OnContactEvaluationSubmit | OnMetricDataUpdate
7454
7508
  */
7455
7509
  EventBridgeAction?: EventBridgeActionDefinition;
7456
7510
  /**
7457
- * Information about the contact category action.
7511
+ * Information about the contact category action. Supported only for TriggerEventSource values: OnPostCallAnalysisAvailable | OnRealTimeCallAnalysisAvailable | OnPostChatAnalysisAvailable | OnZendeskTicketCreate | OnZendeskTicketStatusUpdate | OnSalesforceCaseCreate
7458
7512
  */
7459
7513
  AssignContactCategoryAction?: AssignContactCategoryActionDefinition;
7460
7514
  /**
7461
- * Information about the send notification action.
7515
+ * Information about the send notification action. Supported only for TriggerEventSource values: OnPostCallAnalysisAvailable | OnRealTimeCallAnalysisAvailable | OnPostChatAnalysisAvailable | OnContactEvaluationSubmit | OnMetricDataUpdate
7462
7516
  */
7463
7517
  SendNotificationAction?: SendNotificationActionDefinition;
7464
7518
  }
@@ -9372,6 +9426,10 @@ declare namespace Connect {
9372
9426
  * The list of resources that a security profile applies tag restrictions to in Amazon Connect.
9373
9427
  */
9374
9428
  TagRestrictedResources?: TagRestrictedResourceList;
9429
+ /**
9430
+ * This API is in preview release for Amazon Connect and is subject to change. A list of the third party application's metadata.
9431
+ */
9432
+ Applications?: Applications;
9375
9433
  }
9376
9434
  export interface UpdateTaskTemplateRequest {
9377
9435
  /**
@@ -247,11 +247,11 @@ declare class DynamoDB extends DynamoDBCustomizations {
247
247
  */
248
248
  importTable(callback?: (err: AWSError, data: DynamoDB.Types.ImportTableOutput) => void): Request<DynamoDB.Types.ImportTableOutput, AWSError>;
249
249
  /**
250
- * List backups associated with an Amazon Web Services account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page. In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested. You can call ListBackups a maximum of five times per second.
250
+ * List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. To list these backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page. In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested. You can call ListBackups a maximum of five times per second. If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the Amazon Web Services Backup list API.
251
251
  */
252
252
  listBackups(params: DynamoDB.Types.ListBackupsInput, callback?: (err: AWSError, data: DynamoDB.Types.ListBackupsOutput) => void): Request<DynamoDB.Types.ListBackupsOutput, AWSError>;
253
253
  /**
254
- * List backups associated with an Amazon Web Services account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page. In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested. You can call ListBackups a maximum of five times per second.
254
+ * List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. To list these backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page. In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested. You can call ListBackups a maximum of five times per second. If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the Amazon Web Services Backup list API.
255
255
  */
256
256
  listBackups(callback?: (err: AWSError, data: DynamoDB.Types.ListBackupsOutput) => void): Request<DynamoDB.Types.ListBackupsOutput, AWSError>;
257
257
  /**
@@ -1642,9 +1642,18 @@ declare namespace DynamoDB {
1642
1642
  * The number of items exported.
1643
1643
  */
1644
1644
  ItemCount?: ItemCount;
1645
+ /**
1646
+ * Choice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be used.
1647
+ */
1648
+ ExportType?: ExportType;
1649
+ /**
1650
+ * Optional object containing the parameters specific to an incremental export.
1651
+ */
1652
+ IncrementalExportSpecification?: IncrementalExportSpecification;
1645
1653
  }
1646
1654
  export type ExportEndTime = Date;
1647
1655
  export type ExportFormat = "DYNAMODB_JSON"|"ION"|string;
1656
+ export type ExportFromTime = Date;
1648
1657
  export type ExportManifest = string;
1649
1658
  export type ExportNextToken = string;
1650
1659
  export type ExportStartTime = Date;
@@ -1659,6 +1668,10 @@ declare namespace DynamoDB {
1659
1668
  * Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.
1660
1669
  */
1661
1670
  ExportStatus?: ExportStatus;
1671
+ /**
1672
+ * Choice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be used.
1673
+ */
1674
+ ExportType?: ExportType;
1662
1675
  }
1663
1676
  export interface ExportTableToPointInTimeInput {
1664
1677
  /**
@@ -1697,6 +1710,14 @@ declare namespace DynamoDB {
1697
1710
  * The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON or ION.
1698
1711
  */
1699
1712
  ExportFormat?: ExportFormat;
1713
+ /**
1714
+ * Choice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be used.
1715
+ */
1716
+ ExportType?: ExportType;
1717
+ /**
1718
+ * Optional object containing the parameters specific to an incremental export.
1719
+ */
1720
+ IncrementalExportSpecification?: IncrementalExportSpecification;
1700
1721
  }
1701
1722
  export interface ExportTableToPointInTimeOutput {
1702
1723
  /**
@@ -1705,6 +1726,9 @@ declare namespace DynamoDB {
1705
1726
  ExportDescription?: ExportDescription;
1706
1727
  }
1707
1728
  export type ExportTime = Date;
1729
+ export type ExportToTime = Date;
1730
+ export type ExportType = "FULL_EXPORT"|"INCREMENTAL_EXPORT"|string;
1731
+ export type ExportViewType = "NEW_IMAGE"|"NEW_AND_OLD_IMAGES"|string;
1708
1732
  export type ExpressionAttributeNameMap = {[key: string]: AttributeName};
1709
1733
  export type ExpressionAttributeNameVariable = string;
1710
1734
  export type ExpressionAttributeValueMap = {[key: string]: AttributeValue};
@@ -2078,6 +2102,20 @@ declare namespace DynamoDB {
2078
2102
  ImportTableDescription: ImportTableDescription;
2079
2103
  }
2080
2104
  export type ImportedItemCount = number;
2105
+ export interface IncrementalExportSpecification {
2106
+ /**
2107
+ * Time in the past which provides the inclusive start range for the export table's data, counted in seconds from the start of the Unix epoch. The incremental export will reflect the table's state including and after this point in time.
2108
+ */
2109
+ ExportFromTime?: ExportFromTime;
2110
+ /**
2111
+ * Time in the past which provides the exclusive end range for the export table's data, counted in seconds from the start of the Unix epoch. The incremental export will reflect the table's state just prior to this point in time. If this is not provided, the latest time with data available will be used.
2112
+ */
2113
+ ExportToTime?: ExportToTime;
2114
+ /**
2115
+ * Choice of whether to output the previous item image prior to the start time of the incremental export. Valid values are NEW_AND_OLD_IMAGES and NEW_IMAGES.
2116
+ */
2117
+ ExportViewType?: ExportViewType;
2118
+ }
2081
2119
  export type IndexName = string;
2082
2120
  export type IndexStatus = "CREATING"|"UPDATING"|"DELETING"|"ACTIVE"|string;
2083
2121
  export type InputCompressionType = "GZIP"|"ZSTD"|"NONE"|string;
@@ -4621,11 +4621,11 @@ declare class EC2 extends Service {
4621
4621
  */
4622
4622
  revokeSecurityGroupEgress(callback?: (err: AWSError, data: EC2.Types.RevokeSecurityGroupEgressResult) => void): Request<EC2.Types.RevokeSecurityGroupEgressResult, AWSError>;
4623
4623
  /**
4624
- * Removes the specified inbound (ingress) rules from a security group. You can specify rules using either rule IDs or security group rule properties. If you use rule properties, the values that you specify (for example, ports) must match the existing rule's values exactly. Each rule has a protocol, from and to ports, and source (CIDR range, security group, or prefix list). For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not need to specify the description to revoke the rule. For a default VPC, if the values you specify do not match the existing rule's values, no error is returned, and the output describes the security group rules that were not revoked. Amazon Web Services recommends that you describe the security group to verify that the rules were removed. Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.
4624
+ * Removes the specified inbound (ingress) rules from a security group. You can specify rules using either rule IDs or security group rule properties. If you use rule properties, the values that you specify (for example, ports) must match the existing rule's values exactly. Each rule has a protocol, from and to ports, and source (CIDR range, security group, or prefix list). For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not need to specify the description to revoke the rule. For a default VPC, if the values you specify do not match the existing rule's values, no error is returned, and the output describes the security group rules that were not revoked. For a non-default VPC, if the values you specify do not match the existing rule's values, an InvalidPermission.NotFound client error is returned, and no rules are revoked. Amazon Web Services recommends that you describe the security group to verify that the rules were removed. Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.
4625
4625
  */
4626
4626
  revokeSecurityGroupIngress(params: EC2.Types.RevokeSecurityGroupIngressRequest, callback?: (err: AWSError, data: EC2.Types.RevokeSecurityGroupIngressResult) => void): Request<EC2.Types.RevokeSecurityGroupIngressResult, AWSError>;
4627
4627
  /**
4628
- * Removes the specified inbound (ingress) rules from a security group. You can specify rules using either rule IDs or security group rule properties. If you use rule properties, the values that you specify (for example, ports) must match the existing rule's values exactly. Each rule has a protocol, from and to ports, and source (CIDR range, security group, or prefix list). For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not need to specify the description to revoke the rule. For a default VPC, if the values you specify do not match the existing rule's values, no error is returned, and the output describes the security group rules that were not revoked. Amazon Web Services recommends that you describe the security group to verify that the rules were removed. Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.
4628
+ * Removes the specified inbound (ingress) rules from a security group. You can specify rules using either rule IDs or security group rule properties. If you use rule properties, the values that you specify (for example, ports) must match the existing rule's values exactly. Each rule has a protocol, from and to ports, and source (CIDR range, security group, or prefix list). For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not need to specify the description to revoke the rule. For a default VPC, if the values you specify do not match the existing rule's values, no error is returned, and the output describes the security group rules that were not revoked. For a non-default VPC, if the values you specify do not match the existing rule's values, an InvalidPermission.NotFound client error is returned, and no rules are revoked. Amazon Web Services recommends that you describe the security group to verify that the rules were removed. Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.
4629
4629
  */
4630
4630
  revokeSecurityGroupIngress(callback?: (err: AWSError, data: EC2.Types.RevokeSecurityGroupIngressResult) => void): Request<EC2.Types.RevokeSecurityGroupIngressResult, AWSError>;
4631
4631
  /**
@@ -8781,7 +8781,7 @@ declare namespace EC2 {
8781
8781
  /**
8782
8782
  * The Availability Zone in which to create the default subnet.
8783
8783
  */
8784
- AvailabilityZone: String;
8784
+ AvailabilityZone: AvailabilityZoneName;
8785
8785
  /**
8786
8786
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
8787
8787
  */
@@ -11041,6 +11041,10 @@ declare namespace EC2 {
11041
11041
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
11042
11042
  */
11043
11043
  DryRun?: Boolean;
11044
+ /**
11045
+ * Choose to enable or disable support for Federal Information Processing Standards (FIPS) on the instance.
11046
+ */
11047
+ FIPSEnabled?: Boolean;
11044
11048
  }
11045
11049
  export interface CreateVerifiedAccessInstanceResult {
11046
11050
  /**
@@ -37345,6 +37349,10 @@ declare namespace EC2 {
37345
37349
  * The tags.
37346
37350
  */
37347
37351
  Tags?: TagList;
37352
+ /**
37353
+ * Describes if support for Federal Information Processing Standards (FIPS) is enabled on the instance.
37354
+ */
37355
+ FipsEnabled?: Boolean;
37348
37356
  }
37349
37357
  export type VerifiedAccessInstanceId = string;
37350
37358
  export type VerifiedAccessInstanceIdList = VerifiedAccessInstanceId[];
@@ -52,27 +52,27 @@ declare class Firehose extends Service {
52
52
  */
53
53
  listTagsForDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
54
54
  /**
55
- * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
55
+ * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
56
56
  */
57
57
  putRecord(params: Firehose.Types.PutRecordInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
58
58
  /**
59
- * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
59
+ * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
60
60
  */
61
61
  putRecord(callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
62
62
  /**
63
- * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
63
+ * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
64
64
  */
65
65
  putRecordBatch(params: Firehose.Types.PutRecordBatchInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
66
66
  /**
67
- * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
67
+ * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
68
68
  */
69
69
  putRecordBatch(callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
70
70
  /**
71
- * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
71
+ * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
72
72
  */
73
73
  startDeliveryStreamEncryption(params: Firehose.Types.StartDeliveryStreamEncryptionInput, callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
74
74
  /**
75
- * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
75
+ * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
76
76
  */
77
77
  startDeliveryStreamEncryption(callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
78
78
  /**
@@ -375,6 +375,16 @@ declare namespace Firehose {
375
375
  }
376
376
  export type AmazonopensearchserviceS3BackupMode = "FailedDocumentsOnly"|"AllDocuments"|string;
377
377
  export type AmazonopensearchserviceTypeName = string;
378
+ export interface AuthenticationConfiguration {
379
+ /**
380
+ * The ARN of the role used to access the Amazon MSK cluster.
381
+ */
382
+ RoleARN: RoleARN;
383
+ /**
384
+ * The type of connectivity used to access the Amazon MSK cluster.
385
+ */
386
+ Connectivity: Connectivity;
387
+ }
378
388
  export type BlockSizeBytes = number;
379
389
  export type BooleanObject = boolean;
380
390
  export type BucketARN = string;
@@ -405,6 +415,7 @@ declare namespace Firehose {
405
415
  export type ClusterJDBCURL = string;
406
416
  export type ColumnToJsonKeyMappings = {[key: string]: NonEmptyString};
407
417
  export type CompressionFormat = "UNCOMPRESSED"|"GZIP"|"ZIP"|"Snappy"|"HADOOP_SNAPPY"|string;
418
+ export type Connectivity = "PUBLIC"|"PRIVATE"|string;
408
419
  export type ContentEncoding = "NONE"|"GZIP"|string;
409
420
  export interface CopyCommand {
410
421
  /**
@@ -474,6 +485,7 @@ declare namespace Firehose {
474
485
  * The destination in the Serverless offering for Amazon OpenSearch Service. You can specify only one destination.
475
486
  */
476
487
  AmazonOpenSearchServerlessDestinationConfiguration?: AmazonOpenSearchServerlessDestinationConfiguration;
488
+ MSKSourceConfiguration?: MSKSourceConfiguration;
477
489
  }
478
490
  export interface CreateDeliveryStreamOutput {
479
491
  /**
@@ -600,7 +612,7 @@ declare namespace Firehose {
600
612
  export type DeliveryStreamName = string;
601
613
  export type DeliveryStreamNameList = DeliveryStreamName[];
602
614
  export type DeliveryStreamStatus = "CREATING"|"CREATING_FAILED"|"DELETING"|"DELETING_FAILED"|"ACTIVE"|string;
603
- export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|string;
615
+ export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|"MSKAsSource"|string;
604
616
  export type DeliveryStreamVersionId = string;
605
617
  export interface DescribeDeliveryStreamInput {
606
618
  /**
@@ -1326,6 +1338,39 @@ declare namespace Firehose {
1326
1338
  export type ListTagsForDeliveryStreamOutputTagList = Tag[];
1327
1339
  export type LogGroupName = string;
1328
1340
  export type LogStreamName = string;
1341
+ export type MSKClusterARN = string;
1342
+ export interface MSKSourceConfiguration {
1343
+ /**
1344
+ * The ARN of the Amazon MSK cluster.
1345
+ */
1346
+ MSKClusterARN: MSKClusterARN;
1347
+ /**
1348
+ * The topic name within the Amazon MSK cluster.
1349
+ */
1350
+ TopicName: TopicName;
1351
+ /**
1352
+ * The authentication configuration of the Amazon MSK cluster.
1353
+ */
1354
+ AuthenticationConfiguration: AuthenticationConfiguration;
1355
+ }
1356
+ export interface MSKSourceDescription {
1357
+ /**
1358
+ * The ARN of the Amazon MSK cluster.
1359
+ */
1360
+ MSKClusterARN?: MSKClusterARN;
1361
+ /**
1362
+ * The topic name within the Amazon MSK cluster.
1363
+ */
1364
+ TopicName?: TopicName;
1365
+ /**
1366
+ * The authentication configuration of the Amazon MSK cluster.
1367
+ */
1368
+ AuthenticationConfiguration?: AuthenticationConfiguration;
1369
+ /**
1370
+ * Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.
1371
+ */
1372
+ DeliveryStartTimestamp?: DeliveryStartTimestamp;
1373
+ }
1329
1374
  export type NoEncryptionConfig = "NoEncryption"|string;
1330
1375
  export type NonEmptyString = string;
1331
1376
  export type NonEmptyStringWithoutWhitespace = string;
@@ -1459,9 +1504,9 @@ declare namespace Firehose {
1459
1504
  ParameterValue: ProcessorParameterValue;
1460
1505
  }
1461
1506
  export type ProcessorParameterList = ProcessorParameter[];
1462
- export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"MetadataExtractionQuery"|"JsonParsingEngine"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|"SubRecordType"|"Delimiter"|string;
1507
+ export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"MetadataExtractionQuery"|"JsonParsingEngine"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|"SubRecordType"|"Delimiter"|"CompressionFormat"|string;
1463
1508
  export type ProcessorParameterValue = string;
1464
- export type ProcessorType = "RecordDeAggregation"|"Lambda"|"MetadataExtraction"|"AppendDelimiterToRecord"|string;
1509
+ export type ProcessorType = "RecordDeAggregation"|"Decompression"|"Lambda"|"MetadataExtraction"|"AppendDelimiterToRecord"|string;
1465
1510
  export type Proportion = number;
1466
1511
  export interface PutRecordBatchInput {
1467
1512
  /**
@@ -1826,6 +1871,10 @@ declare namespace Firehose {
1826
1871
  * The KinesisStreamSourceDescription value for the source Kinesis data stream.
1827
1872
  */
1828
1873
  KinesisStreamSourceDescription?: KinesisStreamSourceDescription;
1874
+ /**
1875
+ * The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.
1876
+ */
1877
+ MSKSourceDescription?: MSKSourceDescription;
1829
1878
  }
1830
1879
  export interface SplunkDestinationConfiguration {
1831
1880
  /**
@@ -1997,6 +2046,7 @@ declare namespace Firehose {
1997
2046
  export type TagKeyList = TagKey[];
1998
2047
  export type TagValue = string;
1999
2048
  export type Timestamp = Date;
2049
+ export type TopicName = string;
2000
2050
  export interface UntagDeliveryStreamInput {
2001
2051
  /**
2002
2052
  * The name of the delivery stream.