cdk-lambda-subminute 2.0.250 → 2.0.253

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.jsii +3 -3
  2. package/lib/cdk-lambda-subminute.js +3 -3
  3. package/node_modules/aws-sdk/CHANGELOG.md +16 -1
  4. package/node_modules/aws-sdk/README.md +1 -1
  5. package/node_modules/aws-sdk/apis/bedrock-2023-04-20.examples.json +5 -0
  6. package/node_modules/aws-sdk/apis/bedrock-2023-04-20.min.json +816 -0
  7. package/node_modules/aws-sdk/apis/bedrock-2023-04-20.paginators.json +16 -0
  8. package/node_modules/aws-sdk/apis/bedrock-runtime-2023-09-30.examples.json +5 -0
  9. package/node_modules/aws-sdk/apis/bedrock-runtime-2023-09-30.min.json +180 -0
  10. package/node_modules/aws-sdk/apis/bedrock-runtime-2023-09-30.paginators.json +4 -0
  11. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.examples.json +0 -77
  12. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.min.json +8 -2
  13. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +1292 -1235
  14. package/node_modules/aws-sdk/apis/firehose-2015-08-04.min.json +69 -30
  15. package/node_modules/aws-sdk/apis/iot-2015-05-28.min.json +142 -128
  16. package/node_modules/aws-sdk/apis/iotfleetwise-2021-06-17.min.json +62 -13
  17. package/node_modules/aws-sdk/apis/metadata.json +7 -0
  18. package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +681 -662
  19. package/node_modules/aws-sdk/apis/sagemaker-featurestore-runtime-2020-07-01.min.json +9 -6
  20. package/node_modules/aws-sdk/clients/all.d.ts +2 -0
  21. package/node_modules/aws-sdk/clients/all.js +3 -1
  22. package/node_modules/aws-sdk/clients/bedrock.d.ts +840 -0
  23. package/node_modules/aws-sdk/clients/bedrock.js +18 -0
  24. package/node_modules/aws-sdk/clients/bedrockruntime.d.ts +138 -0
  25. package/node_modules/aws-sdk/clients/bedrockruntime.js +18 -0
  26. package/node_modules/aws-sdk/clients/budgets.d.ts +8 -7
  27. package/node_modules/aws-sdk/clients/cognitoidentityserviceprovider.d.ts +1 -1
  28. package/node_modules/aws-sdk/clients/ec2.d.ts +69 -4
  29. package/node_modules/aws-sdk/clients/firehose.d.ts +59 -9
  30. package/node_modules/aws-sdk/clients/iot.d.ts +46 -29
  31. package/node_modules/aws-sdk/clients/iotfleetwise.d.ts +71 -1
  32. package/node_modules/aws-sdk/clients/sagemaker.d.ts +58 -31
  33. package/node_modules/aws-sdk/clients/sagemakerfeaturestoreruntime.d.ts +7 -2
  34. package/node_modules/aws-sdk/clients/textract.d.ts +5 -5
  35. package/node_modules/aws-sdk/clients/wafv2.d.ts +2 -2
  36. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
  37. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +85 -11
  38. package/node_modules/aws-sdk/dist/aws-sdk.js +1521 -1398
  39. package/node_modules/aws-sdk/dist/aws-sdk.min.js +84 -84
  40. package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +4 -0
  41. package/node_modules/aws-sdk/lib/core.js +1 -1
  42. package/node_modules/aws-sdk/package.json +1 -1
  43. package/package.json +4 -4
@@ -0,0 +1,18 @@
1
+ require('../lib/node_loader');
2
+ var AWS = require('../lib/core');
3
+ var Service = AWS.Service;
4
+ var apiLoader = AWS.apiLoader;
5
+
6
+ apiLoader.services['bedrock'] = {};
7
+ AWS.Bedrock = Service.defineService('bedrock', ['2023-04-20']);
8
+ Object.defineProperty(apiLoader.services['bedrock'], '2023-04-20', {
9
+ get: function get() {
10
+ var model = require('../apis/bedrock-2023-04-20.min.json');
11
+ model.paginators = require('../apis/bedrock-2023-04-20.paginators.json').pagination;
12
+ return model;
13
+ },
14
+ enumerable: true,
15
+ configurable: true
16
+ });
17
+
18
+ module.exports = AWS.Bedrock;
@@ -0,0 +1,138 @@
1
+ import {Request} from '../lib/request';
2
+ import {Response} from '../lib/response';
3
+ import {AWSError} from '../lib/error';
4
+ import {Service} from '../lib/service';
5
+ import {ServiceConfigurationOptions} from '../lib/service';
6
+ import {ConfigBase as Config} from '../lib/config-base';
7
+ import {EventStream} from '../lib/event-stream/event-stream';
8
+ interface Blob {}
9
+ declare class BedrockRuntime extends Service {
10
+ /**
11
+ * Constructs a service object. This object has one method for each API operation.
12
+ */
13
+ constructor(options?: BedrockRuntime.Types.ClientConfiguration)
14
+ config: Config & BedrockRuntime.Types.ClientConfiguration;
15
+ /**
16
+ * Invokes the specified Bedrock model to run inference using the input provided in the request body. You use InvokeModel to run inference for text models, image models, and embedding models. For more information about invoking models, see Using the API in the Bedrock User Guide. For example requests, see Examples (after the Errors section).
17
+ */
18
+ invokeModel(params: BedrockRuntime.Types.InvokeModelRequest, callback?: (err: AWSError, data: BedrockRuntime.Types.InvokeModelResponse) => void): Request<BedrockRuntime.Types.InvokeModelResponse, AWSError>;
19
+ /**
20
+ * Invokes the specified Bedrock model to run inference using the input provided in the request body. You use InvokeModel to run inference for text models, image models, and embedding models. For more information about invoking models, see Using the API in the Bedrock User Guide. For example requests, see Examples (after the Errors section).
21
+ */
22
+ invokeModel(callback?: (err: AWSError, data: BedrockRuntime.Types.InvokeModelResponse) => void): Request<BedrockRuntime.Types.InvokeModelResponse, AWSError>;
23
+ /**
24
+ * Invoke the specified Bedrock model to run inference using the input provided. Return the response in a stream. For more information about invoking models, see Using the API in the Bedrock User Guide. For an example request and response, see Examples (after the Errors section).
25
+ */
26
+ invokeModelWithResponseStream(params: BedrockRuntime.Types.InvokeModelWithResponseStreamRequest, callback?: (err: AWSError, data: BedrockRuntime.Types.InvokeModelWithResponseStreamResponse) => void): Request<BedrockRuntime.Types.InvokeModelWithResponseStreamResponse, AWSError>;
27
+ /**
28
+ * Invoke the specified Bedrock model to run inference using the input provided. Return the response in a stream. For more information about invoking models, see Using the API in the Bedrock User Guide. For an example request and response, see Examples (after the Errors section).
29
+ */
30
+ invokeModelWithResponseStream(callback?: (err: AWSError, data: BedrockRuntime.Types.InvokeModelWithResponseStreamResponse) => void): Request<BedrockRuntime.Types.InvokeModelWithResponseStreamResponse, AWSError>;
31
+ }
32
+ declare namespace BedrockRuntime {
33
+ export type Body = Buffer|Uint8Array|Blob|string;
34
+ export interface InternalServerException {
35
+ message?: NonBlankString;
36
+ }
37
+ export type InvokeModelIdentifier = string;
38
+ export interface InvokeModelRequest {
39
+ /**
40
+ * The desired MIME type of the inference body in the response. The default value is application/json.
41
+ */
42
+ accept?: MimeType;
43
+ /**
44
+ * Input data in the format specified in the content-type request header. To see the format and content of this field for different models, refer to Inference parameters.
45
+ */
46
+ body: Body;
47
+ /**
48
+ * The MIME type of the input data in the request. The default value is application/json.
49
+ */
50
+ contentType?: MimeType;
51
+ /**
52
+ * Identifier of the model.
53
+ */
54
+ modelId: InvokeModelIdentifier;
55
+ }
56
+ export interface InvokeModelResponse {
57
+ /**
58
+ * Inference response from the model in the format specified in the content-type header field. To see the format and content of this field for different models, refer to Inference parameters.
59
+ */
60
+ body: Body;
61
+ /**
62
+ * The MIME type of the inference result.
63
+ */
64
+ contentType: MimeType;
65
+ }
66
+ export interface InvokeModelWithResponseStreamRequest {
67
+ /**
68
+ * The desired MIME type of the inference body in the response. The default value is application/json.
69
+ */
70
+ accept?: MimeType;
71
+ /**
72
+ * Inference input in the format specified by the content-type. To see the format and content of this field for different models, refer to Inference parameters.
73
+ */
74
+ body: Body;
75
+ /**
76
+ * The MIME type of the input data in the request. The default value is application/json.
77
+ */
78
+ contentType?: MimeType;
79
+ /**
80
+ * Id of the model to invoke using the streaming request.
81
+ */
82
+ modelId: InvokeModelIdentifier;
83
+ }
84
+ export interface InvokeModelWithResponseStreamResponse {
85
+ /**
86
+ * Inference response from the model in the format specified by Content-Type. To see the format and content of this field for different models, refer to Inference parameters.
87
+ */
88
+ body: ResponseStream;
89
+ /**
90
+ * The MIME type of the inference result.
91
+ */
92
+ contentType: MimeType;
93
+ }
94
+ export type MimeType = string;
95
+ export interface ModelStreamErrorException {
96
+ message?: NonBlankString;
97
+ /**
98
+ * The original message.
99
+ */
100
+ originalMessage?: NonBlankString;
101
+ /**
102
+ * The original status code.
103
+ */
104
+ originalStatusCode?: StatusCode;
105
+ }
106
+ export type NonBlankString = string;
107
+ export type PartBody = Buffer|Uint8Array|Blob|string;
108
+ export interface PayloadPart {
109
+ /**
110
+ * Base64-encoded bytes of payload data.
111
+ */
112
+ bytes?: PartBody;
113
+ }
114
+ export type ResponseStream = EventStream<{chunk?:PayloadPart,internalServerException?:InternalServerException,modelStreamErrorException?:ModelStreamErrorException,throttlingException?:ThrottlingException,validationException?:ValidationException}>;
115
+ export type StatusCode = number;
116
+ export interface ThrottlingException {
117
+ message?: NonBlankString;
118
+ }
119
+ export interface ValidationException {
120
+ message?: NonBlankString;
121
+ }
122
+ /**
123
+ * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
124
+ */
125
+ export type apiVersion = "2023-09-30"|"latest"|string;
126
+ export interface ClientApiVersions {
127
+ /**
128
+ * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
129
+ */
130
+ apiVersion?: apiVersion;
131
+ }
132
+ export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
133
+ /**
134
+ * Contains interfaces for use with the BedrockRuntime client.
135
+ */
136
+ export import Types = BedrockRuntime;
137
+ }
138
+ export = BedrockRuntime;
@@ -0,0 +1,18 @@
1
+ require('../lib/node_loader');
2
+ var AWS = require('../lib/core');
3
+ var Service = AWS.Service;
4
+ var apiLoader = AWS.apiLoader;
5
+
6
+ apiLoader.services['bedrockruntime'] = {};
7
+ AWS.BedrockRuntime = Service.defineService('bedrockruntime', ['2023-09-30']);
8
+ Object.defineProperty(apiLoader.services['bedrockruntime'], '2023-09-30', {
9
+ get: function get() {
10
+ var model = require('../apis/bedrock-runtime-2023-09-30.min.json');
11
+ model.paginators = require('../apis/bedrock-runtime-2023-09-30.paginators.json').pagination;
12
+ return model;
13
+ },
14
+ enumerable: true,
15
+ configurable: true
16
+ });
17
+
18
+ module.exports = AWS.BedrockRuntime;
@@ -645,7 +645,7 @@ declare namespace Budgets {
645
645
  export interface DescribeBudgetNotificationsForAccountRequest {
646
646
  AccountId: AccountId;
647
647
  /**
648
- * An integer that shows how many budget name entries a paginated response contains.
648
+ * An integer that represents how many budgets a paginated response contains. The default is 50.
649
649
  */
650
650
  MaxResults?: MaxResultsBudgetNotifications;
651
651
  NextToken?: GenericString;
@@ -692,13 +692,13 @@ declare namespace Budgets {
692
692
  }
693
693
  export interface DescribeBudgetsRequest {
694
694
  /**
695
- * The accountId that is associated with the budgets that you want descriptions of.
695
+ * The accountId that is associated with the budgets that you want to describe.
696
696
  */
697
697
  AccountId: AccountId;
698
698
  /**
699
- * An optional integer that represents how many entries a paginated response contains. The maximum is 100.
699
+ * An integer that represents how many budgets a paginated response contains. The default is 100.
700
700
  */
701
- MaxResults?: MaxResults;
701
+ MaxResults?: MaxResultsDescribeBudgets;
702
702
  /**
703
703
  * The pagination token that you include in your request to indicate the next set of results that you want to retrieve.
704
704
  */
@@ -724,7 +724,7 @@ declare namespace Budgets {
724
724
  */
725
725
  BudgetName: BudgetName;
726
726
  /**
727
- * An optional integer that represents how many entries a paginated response contains. The maximum is 100.
727
+ * An optional integer that represents how many entries a paginated response contains.
728
728
  */
729
729
  MaxResults?: MaxResults;
730
730
  /**
@@ -756,7 +756,7 @@ declare namespace Budgets {
756
756
  */
757
757
  Notification: Notification;
758
758
  /**
759
- * An optional integer that represents how many entries a paginated response contains. The maximum is 100.
759
+ * An optional integer that represents how many entries a paginated response contains.
760
760
  */
761
761
  MaxResults?: MaxResults;
762
762
  /**
@@ -838,6 +838,7 @@ declare namespace Budgets {
838
838
  export type InstanceIds = InstanceId[];
839
839
  export type MaxResults = number;
840
840
  export type MaxResultsBudgetNotifications = number;
841
+ export type MaxResultsDescribeBudgets = number;
841
842
  export interface Notification {
842
843
  /**
843
844
  * Specifies whether the notification is for how much you have spent (ACTUAL) or for how much that you're forecasted to spend (FORECASTED).
@@ -900,7 +901,7 @@ declare namespace Budgets {
900
901
  */
901
902
  Amount: NumericValue;
902
903
  /**
903
- * The unit of measurement that's used for the budget forecast, actual spend, or budget threshold, such as USD or GBP.
904
+ * The unit of measurement that's used for the budget forecast, actual spend, or budget threshold.
904
905
  */
905
906
  Unit: UnitValue;
906
907
  }
@@ -943,7 +943,7 @@ declare namespace CognitoIdentityServiceProvider {
943
943
  */
944
944
  UserPoolId: UserPoolIdType;
945
945
  /**
946
- * The username for the user. Must be unique within the user pool. Must be a UTF-8 string between 1 and 128 characters. After the user is created, the username can't be changed.
946
+ * The value that you want to set as the username sign-in attribute. The following conditions apply to the username parameter. The username can't be a duplicate of another username in the same user pool. You can't change the value of a username after you create it. You can only provide a value if usernames are a valid sign-in attribute for your user pool. If your user pool only supports phone numbers or email addresses as sign-in attributes, Amazon Cognito automatically generates a username value. For more information, see Customizing sign-in attributes.
947
947
  */
948
948
  Username: UsernameType;
949
949
  /**
@@ -10984,6 +10984,10 @@ declare namespace EC2 {
10984
10984
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
10985
10985
  */
10986
10986
  DryRun?: Boolean;
10987
+ /**
10988
+ * Options for server side encryption.
10989
+ */
10990
+ SseSpecification?: VerifiedAccessSseSpecificationRequest;
10987
10991
  }
10988
10992
  export interface CreateVerifiedAccessEndpointResult {
10989
10993
  /**
@@ -11017,6 +11021,10 @@ declare namespace EC2 {
11017
11021
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
11018
11022
  */
11019
11023
  DryRun?: Boolean;
11024
+ /**
11025
+ * Options for server side encryption.
11026
+ */
11027
+ SseSpecification?: VerifiedAccessSseSpecificationRequest;
11020
11028
  }
11021
11029
  export interface CreateVerifiedAccessGroupResult {
11022
11030
  /**
@@ -11042,7 +11050,7 @@ declare namespace EC2 {
11042
11050
  */
11043
11051
  DryRun?: Boolean;
11044
11052
  /**
11045
- * Choose to enable or disable support for Federal Information Processing Standards (FIPS) on the instance.
11053
+ * Enable or disable support for Federal Information Processing Standards (FIPS) on the instance.
11046
11054
  */
11047
11055
  FIPSEnabled?: Boolean;
11048
11056
  }
@@ -11129,6 +11137,10 @@ declare namespace EC2 {
11129
11137
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
11130
11138
  */
11131
11139
  DryRun?: Boolean;
11140
+ /**
11141
+ * Options for server side encryption.
11142
+ */
11143
+ SseSpecification?: VerifiedAccessSseSpecificationRequest;
11132
11144
  }
11133
11145
  export interface CreateVerifiedAccessTrustProviderResult {
11134
11146
  /**
@@ -25273,6 +25285,7 @@ declare namespace EC2 {
25273
25285
  export type KeyPairList = KeyPairInfo[];
25274
25286
  export type KeyPairName = string;
25275
25287
  export type KeyType = "rsa"|"ed25519"|string;
25288
+ export type KmsKeyArn = string;
25276
25289
  export type KmsKeyId = string;
25277
25290
  export interface LastError {
25278
25291
  /**
@@ -28145,7 +28158,7 @@ declare namespace EC2 {
28145
28158
  /**
28146
28159
  * The status of the Verified Access policy.
28147
28160
  */
28148
- PolicyEnabled: Boolean;
28161
+ PolicyEnabled?: Boolean;
28149
28162
  /**
28150
28163
  * The Verified Access policy document.
28151
28164
  */
@@ -28158,6 +28171,10 @@ declare namespace EC2 {
28158
28171
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
28159
28172
  */
28160
28173
  DryRun?: Boolean;
28174
+ /**
28175
+ * Options for server side encryption.
28176
+ */
28177
+ SseSpecification?: VerifiedAccessSseSpecificationRequest;
28161
28178
  }
28162
28179
  export interface ModifyVerifiedAccessEndpointPolicyResult {
28163
28180
  /**
@@ -28168,6 +28185,10 @@ declare namespace EC2 {
28168
28185
  * The Verified Access policy document.
28169
28186
  */
28170
28187
  PolicyDocument?: String;
28188
+ /**
28189
+ * Describes the options in use for server side encryption.
28190
+ */
28191
+ SseSpecification?: VerifiedAccessSseSpecificationResponse;
28171
28192
  }
28172
28193
  export interface ModifyVerifiedAccessEndpointRequest {
28173
28194
  /**
@@ -28214,7 +28235,7 @@ declare namespace EC2 {
28214
28235
  /**
28215
28236
  * The status of the Verified Access policy.
28216
28237
  */
28217
- PolicyEnabled: Boolean;
28238
+ PolicyEnabled?: Boolean;
28218
28239
  /**
28219
28240
  * The Verified Access policy document.
28220
28241
  */
@@ -28227,6 +28248,10 @@ declare namespace EC2 {
28227
28248
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
28228
28249
  */
28229
28250
  DryRun?: Boolean;
28251
+ /**
28252
+ * Options for server side encryption.
28253
+ */
28254
+ SseSpecification?: VerifiedAccessSseSpecificationRequest;
28230
28255
  }
28231
28256
  export interface ModifyVerifiedAccessGroupPolicyResult {
28232
28257
  /**
@@ -28237,6 +28262,10 @@ declare namespace EC2 {
28237
28262
  * The Verified Access policy document.
28238
28263
  */
28239
28264
  PolicyDocument?: String;
28265
+ /**
28266
+ * Describes the options in use for server side encryption.
28267
+ */
28268
+ SseSpecification?: VerifiedAccessSseSpecificationResponse;
28240
28269
  }
28241
28270
  export interface ModifyVerifiedAccessGroupRequest {
28242
28271
  /**
@@ -28365,6 +28394,10 @@ declare namespace EC2 {
28365
28394
  * A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.
28366
28395
  */
28367
28396
  ClientToken?: String;
28397
+ /**
28398
+ * Options for server side encryption.
28399
+ */
28400
+ SseSpecification?: VerifiedAccessSseSpecificationRequest;
28368
28401
  }
28369
28402
  export interface ModifyVerifiedAccessTrustProviderResult {
28370
28403
  /**
@@ -37231,6 +37264,10 @@ declare namespace EC2 {
37231
37264
  * The tags.
37232
37265
  */
37233
37266
  Tags?: TagList;
37267
+ /**
37268
+ * Describes the options in use for server side encryption.
37269
+ */
37270
+ SseSpecification?: VerifiedAccessSseSpecificationResponse;
37234
37271
  }
37235
37272
  export type VerifiedAccessEndpointAttachmentType = "vpc"|string;
37236
37273
  export interface VerifiedAccessEndpointEniOptions {
@@ -37320,6 +37357,10 @@ declare namespace EC2 {
37320
37357
  * The tags.
37321
37358
  */
37322
37359
  Tags?: TagList;
37360
+ /**
37361
+ * Describes the options in use for server side encryption.
37362
+ */
37363
+ SseSpecification?: VerifiedAccessSseSpecificationResponse;
37323
37364
  }
37324
37365
  export type VerifiedAccessGroupId = string;
37325
37366
  export type VerifiedAccessGroupIdList = VerifiedAccessGroupId[];
@@ -37350,7 +37391,7 @@ declare namespace EC2 {
37350
37391
  */
37351
37392
  Tags?: TagList;
37352
37393
  /**
37353
- * Describes if support for Federal Information Processing Standards (FIPS) is enabled on the instance.
37394
+ * Describes whether support for Federal Information Processing Standards (FIPS) is enabled on the instance.
37354
37395
  */
37355
37396
  FipsEnabled?: Boolean;
37356
37397
  }
@@ -37511,6 +37552,26 @@ declare namespace EC2 {
37511
37552
  */
37512
37553
  IncludeTrustContext?: Boolean;
37513
37554
  }
37555
+ export interface VerifiedAccessSseSpecificationRequest {
37556
+ /**
37557
+ * Enable or disable the use of customer managed KMS keys for server side encryption. Valid values: True | False
37558
+ */
37559
+ CustomerManagedKeyEnabled?: Boolean;
37560
+ /**
37561
+ * The ARN of the KMS key.
37562
+ */
37563
+ KmsKeyArn?: KmsKeyArn;
37564
+ }
37565
+ export interface VerifiedAccessSseSpecificationResponse {
37566
+ /**
37567
+ * Describes the use of customer managed KMS keys for server side encryption. Valid values: True | False
37568
+ */
37569
+ CustomerManagedKeyEnabled?: Boolean;
37570
+ /**
37571
+ * Describes the ARN of the KMS key.
37572
+ */
37573
+ KmsKeyArn?: KmsKeyArn;
37574
+ }
37514
37575
  export interface VerifiedAccessTrustProvider {
37515
37576
  /**
37516
37577
  * The ID of the Amazon Web Services Verified Access trust provider.
@@ -37556,6 +37617,10 @@ declare namespace EC2 {
37556
37617
  * The tags.
37557
37618
  */
37558
37619
  Tags?: TagList;
37620
+ /**
37621
+ * Describes the options in use for server side encryption.
37622
+ */
37623
+ SseSpecification?: VerifiedAccessSseSpecificationResponse;
37559
37624
  }
37560
37625
  export interface VerifiedAccessTrustProviderCondensed {
37561
37626
  /**
@@ -52,27 +52,27 @@ declare class Firehose extends Service {
52
52
  */
53
53
  listTagsForDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
54
54
  /**
55
- * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
55
+ * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
56
56
  */
57
57
  putRecord(params: Firehose.Types.PutRecordInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
58
58
  /**
59
- * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
59
+ * Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
60
60
  */
61
61
  putRecord(callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
62
62
  /**
63
- * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
63
+ * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
64
64
  */
65
65
  putRecordBatch(params: Firehose.Types.PutRecordBatchInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
66
66
  /**
67
- * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
67
+ * Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
68
68
  */
69
69
  putRecordBatch(callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
70
70
  /**
71
- * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
71
+ * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
72
72
  */
73
73
  startDeliveryStreamEncryption(params: Firehose.Types.StartDeliveryStreamEncryptionInput, callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
74
74
  /**
75
- * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
75
+ * Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
76
76
  */
77
77
  startDeliveryStreamEncryption(callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
78
78
  /**
@@ -375,6 +375,16 @@ declare namespace Firehose {
375
375
  }
376
376
  export type AmazonopensearchserviceS3BackupMode = "FailedDocumentsOnly"|"AllDocuments"|string;
377
377
  export type AmazonopensearchserviceTypeName = string;
378
+ export interface AuthenticationConfiguration {
379
+ /**
380
+ * The ARN of the role used to access the Amazon MSK cluster.
381
+ */
382
+ RoleARN: RoleARN;
383
+ /**
384
+ * The type of connectivity used to access the Amazon MSK cluster.
385
+ */
386
+ Connectivity: Connectivity;
387
+ }
378
388
  export type BlockSizeBytes = number;
379
389
  export type BooleanObject = boolean;
380
390
  export type BucketARN = string;
@@ -405,6 +415,7 @@ declare namespace Firehose {
405
415
  export type ClusterJDBCURL = string;
406
416
  export type ColumnToJsonKeyMappings = {[key: string]: NonEmptyString};
407
417
  export type CompressionFormat = "UNCOMPRESSED"|"GZIP"|"ZIP"|"Snappy"|"HADOOP_SNAPPY"|string;
418
+ export type Connectivity = "PUBLIC"|"PRIVATE"|string;
408
419
  export type ContentEncoding = "NONE"|"GZIP"|string;
409
420
  export interface CopyCommand {
410
421
  /**
@@ -474,6 +485,7 @@ declare namespace Firehose {
474
485
  * The destination in the Serverless offering for Amazon OpenSearch Service. You can specify only one destination.
475
486
  */
476
487
  AmazonOpenSearchServerlessDestinationConfiguration?: AmazonOpenSearchServerlessDestinationConfiguration;
488
+ MSKSourceConfiguration?: MSKSourceConfiguration;
477
489
  }
478
490
  export interface CreateDeliveryStreamOutput {
479
491
  /**
@@ -600,7 +612,7 @@ declare namespace Firehose {
600
612
  export type DeliveryStreamName = string;
601
613
  export type DeliveryStreamNameList = DeliveryStreamName[];
602
614
  export type DeliveryStreamStatus = "CREATING"|"CREATING_FAILED"|"DELETING"|"DELETING_FAILED"|"ACTIVE"|string;
603
- export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|string;
615
+ export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|"MSKAsSource"|string;
604
616
  export type DeliveryStreamVersionId = string;
605
617
  export interface DescribeDeliveryStreamInput {
606
618
  /**
@@ -1326,6 +1338,39 @@ declare namespace Firehose {
1326
1338
  export type ListTagsForDeliveryStreamOutputTagList = Tag[];
1327
1339
  export type LogGroupName = string;
1328
1340
  export type LogStreamName = string;
1341
+ export type MSKClusterARN = string;
1342
+ export interface MSKSourceConfiguration {
1343
+ /**
1344
+ * The ARN of the Amazon MSK cluster.
1345
+ */
1346
+ MSKClusterARN: MSKClusterARN;
1347
+ /**
1348
+ * The topic name within the Amazon MSK cluster.
1349
+ */
1350
+ TopicName: TopicName;
1351
+ /**
1352
+ * The authentication configuration of the Amazon MSK cluster.
1353
+ */
1354
+ AuthenticationConfiguration: AuthenticationConfiguration;
1355
+ }
1356
+ export interface MSKSourceDescription {
1357
+ /**
1358
+ * The ARN of the Amazon MSK cluster.
1359
+ */
1360
+ MSKClusterARN?: MSKClusterARN;
1361
+ /**
1362
+ * The topic name within the Amazon MSK cluster.
1363
+ */
1364
+ TopicName?: TopicName;
1365
+ /**
1366
+ * The authentication configuration of the Amazon MSK cluster.
1367
+ */
1368
+ AuthenticationConfiguration?: AuthenticationConfiguration;
1369
+ /**
1370
+ * Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.
1371
+ */
1372
+ DeliveryStartTimestamp?: DeliveryStartTimestamp;
1373
+ }
1329
1374
  export type NoEncryptionConfig = "NoEncryption"|string;
1330
1375
  export type NonEmptyString = string;
1331
1376
  export type NonEmptyStringWithoutWhitespace = string;
@@ -1459,9 +1504,9 @@ declare namespace Firehose {
1459
1504
  ParameterValue: ProcessorParameterValue;
1460
1505
  }
1461
1506
  export type ProcessorParameterList = ProcessorParameter[];
1462
- export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"MetadataExtractionQuery"|"JsonParsingEngine"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|"SubRecordType"|"Delimiter"|string;
1507
+ export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"MetadataExtractionQuery"|"JsonParsingEngine"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|"SubRecordType"|"Delimiter"|"CompressionFormat"|string;
1463
1508
  export type ProcessorParameterValue = string;
1464
- export type ProcessorType = "RecordDeAggregation"|"Lambda"|"MetadataExtraction"|"AppendDelimiterToRecord"|string;
1509
+ export type ProcessorType = "RecordDeAggregation"|"Decompression"|"Lambda"|"MetadataExtraction"|"AppendDelimiterToRecord"|string;
1465
1510
  export type Proportion = number;
1466
1511
  export interface PutRecordBatchInput {
1467
1512
  /**
@@ -1826,6 +1871,10 @@ declare namespace Firehose {
1826
1871
  * The KinesisStreamSourceDescription value for the source Kinesis data stream.
1827
1872
  */
1828
1873
  KinesisStreamSourceDescription?: KinesisStreamSourceDescription;
1874
+ /**
1875
+ * The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.
1876
+ */
1877
+ MSKSourceDescription?: MSKSourceDescription;
1829
1878
  }
1830
1879
  export interface SplunkDestinationConfiguration {
1831
1880
  /**
@@ -1997,6 +2046,7 @@ declare namespace Firehose {
1997
2046
  export type TagKeyList = TagKey[];
1998
2047
  export type TagValue = string;
1999
2048
  export type Timestamp = Date;
2049
+ export type TopicName = string;
2000
2050
  export interface UntagDeliveryStreamInput {
2001
2051
  /**
2002
2052
  * The name of the delivery stream.