@aws-sdk/client-firehose 3.514.0 → 3.516.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -6,9 +6,9 @@
6
6
 
7
7
  AWS SDK for JavaScript Firehose Client for Node.js, Browser and React Native.
8
8
 
9
- <fullname>Amazon Kinesis Data Firehose API Reference</fullname>
9
+ <fullname>Amazon Data Firehose</fullname>
10
10
 
11
- <p>Amazon Kinesis Data Firehose is a fully managed service that delivers real-time
11
+ <p>Amazon Data Firehose is a fully managed service that delivers real-time
12
12
  streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon
13
13
  OpenSearch Service, Amazon Redshift, Splunk, and various other supportd
14
14
  destinations.</p>
package/dist-cjs/index.js CHANGED
@@ -270,6 +270,7 @@ var ProcessorParameterName = {
270
270
  BUFFER_INTERVAL_IN_SECONDS: "BufferIntervalInSeconds",
271
271
  BUFFER_SIZE_IN_MB: "BufferSizeInMBs",
272
272
  COMPRESSION_FORMAT: "CompressionFormat",
273
+ DATA_MESSAGE_EXTRACTION: "DataMessageExtraction",
273
274
  Delimiter: "Delimiter",
274
275
  JSON_PARSING_ENGINE: "JsonParsingEngine",
275
276
  LAMBDA_ARN: "LambdaArn",
@@ -280,6 +281,7 @@ var ProcessorParameterName = {
280
281
  };
281
282
  var ProcessorType = {
282
283
  AppendDelimiterToRecord: "AppendDelimiterToRecord",
284
+ CloudWatchLogProcessing: "CloudWatchLogProcessing",
283
285
  Decompression: "Decompression",
284
286
  Lambda: "Lambda",
285
287
  MetadataExtraction: "MetadataExtraction",
@@ -1114,10 +1116,12 @@ var se_ExtendedS3DestinationConfiguration = /* @__PURE__ */ __name((input, conte
1114
1116
  BufferingHints: import_smithy_client._json,
1115
1117
  CloudWatchLoggingOptions: import_smithy_client._json,
1116
1118
  CompressionFormat: [],
1119
+ CustomTimeZone: [],
1117
1120
  DataFormatConversionConfiguration: (_) => se_DataFormatConversionConfiguration(_, context),
1118
1121
  DynamicPartitioningConfiguration: import_smithy_client._json,
1119
1122
  EncryptionConfiguration: import_smithy_client._json,
1120
1123
  ErrorOutputPrefix: [],
1124
+ FileExtension: [],
1121
1125
  Prefix: [],
1122
1126
  ProcessingConfiguration: import_smithy_client._json,
1123
1127
  RoleARN: [],
@@ -1131,10 +1135,12 @@ var se_ExtendedS3DestinationUpdate = /* @__PURE__ */ __name((input, context) =>
1131
1135
  BufferingHints: import_smithy_client._json,
1132
1136
  CloudWatchLoggingOptions: import_smithy_client._json,
1133
1137
  CompressionFormat: [],
1138
+ CustomTimeZone: [],
1134
1139
  DataFormatConversionConfiguration: (_) => se_DataFormatConversionConfiguration(_, context),
1135
1140
  DynamicPartitioningConfiguration: import_smithy_client._json,
1136
1141
  EncryptionConfiguration: import_smithy_client._json,
1137
1142
  ErrorOutputPrefix: [],
1143
+ FileExtension: [],
1138
1144
  Prefix: [],
1139
1145
  ProcessingConfiguration: import_smithy_client._json,
1140
1146
  RoleARN: [],
@@ -1260,10 +1266,12 @@ var de_ExtendedS3DestinationDescription = /* @__PURE__ */ __name((output, contex
1260
1266
  BufferingHints: import_smithy_client._json,
1261
1267
  CloudWatchLoggingOptions: import_smithy_client._json,
1262
1268
  CompressionFormat: import_smithy_client.expectString,
1269
+ CustomTimeZone: import_smithy_client.expectString,
1263
1270
  DataFormatConversionConfiguration: (_) => de_DataFormatConversionConfiguration(_, context),
1264
1271
  DynamicPartitioningConfiguration: import_smithy_client._json,
1265
1272
  EncryptionConfiguration: import_smithy_client._json,
1266
1273
  ErrorOutputPrefix: import_smithy_client.expectString,
1274
+ FileExtension: import_smithy_client.expectString,
1267
1275
  Prefix: import_smithy_client.expectString,
1268
1276
  ProcessingConfiguration: import_smithy_client._json,
1269
1277
  RoleARN: import_smithy_client.expectString,
@@ -4,6 +4,7 @@ export const ProcessorParameterName = {
4
4
  BUFFER_INTERVAL_IN_SECONDS: "BufferIntervalInSeconds",
5
5
  BUFFER_SIZE_IN_MB: "BufferSizeInMBs",
6
6
  COMPRESSION_FORMAT: "CompressionFormat",
7
+ DATA_MESSAGE_EXTRACTION: "DataMessageExtraction",
7
8
  Delimiter: "Delimiter",
8
9
  JSON_PARSING_ENGINE: "JsonParsingEngine",
9
10
  LAMBDA_ARN: "LambdaArn",
@@ -14,6 +15,7 @@ export const ProcessorParameterName = {
14
15
  };
15
16
  export const ProcessorType = {
16
17
  AppendDelimiterToRecord: "AppendDelimiterToRecord",
18
+ CloudWatchLogProcessing: "CloudWatchLogProcessing",
17
19
  Decompression: "Decompression",
18
20
  Lambda: "Lambda",
19
21
  MetadataExtraction: "MetadataExtraction",
@@ -375,10 +375,12 @@ const se_ExtendedS3DestinationConfiguration = (input, context) => {
375
375
  BufferingHints: _json,
376
376
  CloudWatchLoggingOptions: _json,
377
377
  CompressionFormat: [],
378
+ CustomTimeZone: [],
378
379
  DataFormatConversionConfiguration: (_) => se_DataFormatConversionConfiguration(_, context),
379
380
  DynamicPartitioningConfiguration: _json,
380
381
  EncryptionConfiguration: _json,
381
382
  ErrorOutputPrefix: [],
383
+ FileExtension: [],
382
384
  Prefix: [],
383
385
  ProcessingConfiguration: _json,
384
386
  RoleARN: [],
@@ -392,10 +394,12 @@ const se_ExtendedS3DestinationUpdate = (input, context) => {
392
394
  BufferingHints: _json,
393
395
  CloudWatchLoggingOptions: _json,
394
396
  CompressionFormat: [],
397
+ CustomTimeZone: [],
395
398
  DataFormatConversionConfiguration: (_) => se_DataFormatConversionConfiguration(_, context),
396
399
  DynamicPartitioningConfiguration: _json,
397
400
  EncryptionConfiguration: _json,
398
401
  ErrorOutputPrefix: [],
402
+ FileExtension: [],
399
403
  Prefix: [],
400
404
  ProcessingConfiguration: _json,
401
405
  RoleARN: [],
@@ -525,10 +529,12 @@ const de_ExtendedS3DestinationDescription = (output, context) => {
525
529
  BufferingHints: _json,
526
530
  CloudWatchLoggingOptions: _json,
527
531
  CompressionFormat: __expectString,
532
+ CustomTimeZone: __expectString,
528
533
  DataFormatConversionConfiguration: (_) => de_DataFormatConversionConfiguration(_, context),
529
534
  DynamicPartitioningConfiguration: _json,
530
535
  EncryptionConfiguration: _json,
531
536
  ErrorOutputPrefix: __expectString,
537
+ FileExtension: __expectString,
532
538
  Prefix: __expectString,
533
539
  ProcessingConfiguration: _json,
534
540
  RoleARN: __expectString,
@@ -88,8 +88,8 @@ export interface Firehose {
88
88
  }
89
89
  /**
90
90
  * @public
91
- * <fullname>Amazon Kinesis Data Firehose API Reference</fullname>
92
- * <p>Amazon Kinesis Data Firehose is a fully managed service that delivers real-time
91
+ * <fullname>Amazon Data Firehose</fullname>
92
+ * <p>Amazon Data Firehose is a fully managed service that delivers real-time
93
93
  * streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon
94
94
  * OpenSearch Service, Amazon Redshift, Splunk, and various other supportd
95
95
  * destinations.</p>
@@ -164,8 +164,8 @@ export interface FirehoseClientResolvedConfig extends FirehoseClientResolvedConf
164
164
  }
165
165
  /**
166
166
  * @public
167
- * <fullname>Amazon Kinesis Data Firehose API Reference</fullname>
168
- * <p>Amazon Kinesis Data Firehose is a fully managed service that delivers real-time
167
+ * <fullname>Amazon Data Firehose</fullname>
168
+ * <p>Amazon Data Firehose is a fully managed service that delivers real-time
169
169
  * streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon
170
170
  * OpenSearch Service, Amazon Redshift, Splunk, and various other supportd
171
171
  * destinations.</p>
@@ -26,7 +26,7 @@ declare const CreateDeliveryStreamCommand_base: {
26
26
  };
27
27
  /**
28
28
  * @public
29
- * <p>Creates a Kinesis Data Firehose delivery stream.</p>
29
+ * <p>Creates a Firehose delivery stream.</p>
30
30
  * <p>By default, you can create up to 50 delivery streams per Amazon Web Services
31
31
  * Region.</p>
32
32
  * <p>This is an asynchronous operation that immediately returns. The initial status of the
@@ -39,7 +39,7 @@ declare const CreateDeliveryStreamCommand_base: {
39
39
  * doesn't change, and you can't invoke <code>CreateDeliveryStream</code> again on it.
40
40
  * However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete
41
41
  * it.</p>
42
- * <p>A Kinesis Data Firehose delivery stream can be configured to receive records directly
42
+ * <p>A Firehose delivery stream can be configured to receive records directly
43
43
  * from providers using <a>PutRecord</a> or <a>PutRecordBatch</a>, or it
44
44
  * can be configured to use an existing Kinesis stream as its source. To specify a Kinesis
45
45
  * data stream as input, set the <code>DeliveryStreamType</code> parameter to
@@ -62,7 +62,7 @@ declare const CreateDeliveryStreamCommand_base: {
62
62
  * <p>When you specify <code>S3DestinationConfiguration</code>, you can also provide the
63
63
  * following optional values: BufferingHints, <code>EncryptionConfiguration</code>, and
64
64
  * <code>CompressionFormat</code>. By default, if no <code>BufferingHints</code> value is
65
- * provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever
65
+ * provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever
66
66
  * condition is satisfied first. <code>BufferingHints</code> is a hint, so there are some
67
67
  * cases where the service cannot adhere to these conditions strictly. For example, record
68
68
  * boundaries might be such that the size is a little over or under the configured buffering
@@ -72,7 +72,7 @@ declare const CreateDeliveryStreamCommand_base: {
72
72
  * <ul>
73
73
  * <li>
74
74
  * <p>An Amazon Redshift destination requires an S3 bucket as intermediate location.
75
- * Kinesis Data Firehose first delivers data to Amazon S3 and then uses
75
+ * Firehose first delivers data to Amazon S3 and then uses
76
76
  * <code>COPY</code> syntax to load data into an Amazon Redshift table. This is
77
77
  * specified in the <code>RedshiftDestinationConfiguration.S3Configuration</code>
78
78
  * parameter.</p>
@@ -85,16 +85,14 @@ declare const CreateDeliveryStreamCommand_base: {
85
85
  * </li>
86
86
  * <li>
87
87
  * <p>We strongly recommend that you use the user name and password you provide
88
- * exclusively with Kinesis Data Firehose, and that the permissions for the account are
88
+ * exclusively with Firehose, and that the permissions for the account are
89
89
  * restricted for Amazon Redshift <code>INSERT</code> permissions.</p>
90
90
  * </li>
91
91
  * </ul>
92
- * <p>Kinesis Data Firehose assumes the IAM role that is configured as part of the
93
- * destination. The role should allow the Kinesis Data Firehose principal to assume the role,
92
+ * <p>Firehose assumes the IAM role that is configured as part of the
93
+ * destination. The role should allow the Firehose principal to assume the role,
94
94
  * and the role should have permissions that allow the service to deliver the data. For more
95
- * information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3">Grant Kinesis Data
96
- * Firehose Access to an Amazon S3 Destination</a> in the <i>Amazon Kinesis Data
97
- * Firehose Developer Guide</i>.</p>
95
+ * information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3">Grant Firehose Access to an Amazon S3 Destination</a> in the <i>Amazon Firehose Developer Guide</i>.</p>
98
96
  * @example
99
97
  * Use a bare-bones client and the command you need to make an API call.
100
98
  * ```javascript
@@ -159,10 +157,10 @@ declare const CreateDeliveryStreamCommand_base: {
159
157
  * Enabled: true || false,
160
158
  * Processors: [ // ProcessorList
161
159
  * { // Processor
162
- * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
160
+ * Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
163
161
  * Parameters: [ // ProcessorParameterList
164
162
  * { // ProcessorParameter
165
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
163
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
166
164
  * ParameterValue: "STRING_VALUE", // required
167
165
  * },
168
166
  * ],
@@ -251,6 +249,8 @@ declare const CreateDeliveryStreamCommand_base: {
251
249
  * },
252
250
  * Enabled: true || false,
253
251
  * },
252
+ * FileExtension: "STRING_VALUE",
253
+ * CustomTimeZone: "STRING_VALUE",
254
254
  * },
255
255
  * RedshiftDestinationConfiguration: { // RedshiftDestinationConfiguration
256
256
  * RoleARN: "STRING_VALUE", // required
@@ -291,10 +291,10 @@ declare const CreateDeliveryStreamCommand_base: {
291
291
  * Enabled: true || false,
292
292
  * Processors: [
293
293
  * {
294
- * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
294
+ * Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
295
295
  * Parameters: [
296
296
  * {
297
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
297
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
298
298
  * ParameterValue: "STRING_VALUE", // required
299
299
  * },
300
300
  * ],
@@ -355,10 +355,10 @@ declare const CreateDeliveryStreamCommand_base: {
355
355
  * Enabled: true || false,
356
356
  * Processors: [
357
357
  * {
358
- * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
358
+ * Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
359
359
  * Parameters: [
360
360
  * {
361
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
361
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
362
362
  * ParameterValue: "STRING_VALUE", // required
363
363
  * },
364
364
  * ],
@@ -399,10 +399,10 @@ declare const CreateDeliveryStreamCommand_base: {
399
399
  * Enabled: true || false,
400
400
  * Processors: [
401
401
  * {
402
- * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
402
+ * Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
403
403
  * Parameters: [
404
404
  * {
405
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
405
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
406
406
  * ParameterValue: "STRING_VALUE", // required
407
407
  * },
408
408
  * ],
@@ -437,10 +437,10 @@ declare const CreateDeliveryStreamCommand_base: {
437
437
  * Enabled: true || false,
438
438
  * Processors: [
439
439
  * {
440
- * Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
440
+ * Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
441
441
  * Parameters: [
442
442
  * {
443
- * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
443
+ * ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
444
444
  * ParameterValue: "STRING_VALUE", // required
445
445
  * },
446
446
  * ],
@@ -566,7 +566,7 @@ declare const CreateDeliveryStreamCommand_base: {
566
566
  * <p>The specified input parameter has a value that is not valid.</p>
567
567
  *
568
568
  * @throws {@link InvalidKMSResourceException} (client fault)
569
- * <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start
569
+ * <p>Firehose throws this exception when an attempt to put records or to start
570
570
  * or stop delivery stream encryption fails. This happens when the KMS service throws one of
571
571
  * the following exception types: <code>AccessDeniedException</code>,
572
572
  * <code>InvalidStateException</code>, <code>DisabledException</code>, or
@@ -27,15 +27,18 @@ declare const DeleteDeliveryStreamCommand_base: {
27
27
  /**
28
28
  * @public
29
29
  * <p>Deletes a delivery stream and its data.</p>
30
- * <p>To check the state of a delivery stream, use <a>DescribeDeliveryStream</a>. You can delete a delivery stream only if it is in one of the following states:
30
+ * <p>You can delete a delivery stream only if it is in one of the following states:
31
31
  * <code>ACTIVE</code>, <code>DELETING</code>, <code>CREATING_FAILED</code>, or
32
32
  * <code>DELETING_FAILED</code>. You can't delete a delivery stream that is in the
33
- * <code>CREATING</code> state. While the deletion request is in process, the delivery
34
- * stream is in the <code>DELETING</code> state.</p>
35
- * <p>While the delivery stream is in the <code>DELETING</code> state, the service might
33
+ * <code>CREATING</code> state. To check the state of a delivery stream, use <a>DescribeDeliveryStream</a>. </p>
34
+ * <p>DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the
35
+ * <code>DELETING</code> state.While the delivery stream is in the <code>DELETING</code> state, the service might
36
36
  * continue to accept records, but it doesn't make any guarantees with respect to delivering
37
37
  * the data. Therefore, as a best practice, first stop any applications that are sending
38
38
  * records before you delete a delivery stream.</p>
39
+ * <p>Removal of a delivery stream that is in the <code>DELETING</code> state is a low priority operation for the service. A stream may remain in the
40
+ * <code>DELETING</code> state for several minutes. Therefore, as a best practice, applications should not wait for streams in the <code>DELETING</code> state
41
+ * to be removed. </p>
39
42
  * @example
40
43
  * Use a bare-bones client and the command you need to make an API call.
41
44
  * ```javascript
@@ -134,10 +134,10 @@ declare const DescribeDeliveryStreamCommand_base: {
134
134
  * // Enabled: true || false,
135
135
  * // Processors: [ // ProcessorList
136
136
  * // { // Processor
137
- * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
137
+ * // Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
138
138
  * // Parameters: [ // ProcessorParameterList
139
139
  * // { // ProcessorParameter
140
- * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
140
+ * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
141
141
  * // ParameterValue: "STRING_VALUE", // required
142
142
  * // },
143
143
  * // ],
@@ -226,6 +226,8 @@ declare const DescribeDeliveryStreamCommand_base: {
226
226
  * // },
227
227
  * // Enabled: true || false,
228
228
  * // },
229
+ * // FileExtension: "STRING_VALUE",
230
+ * // CustomTimeZone: "STRING_VALUE",
229
231
  * // },
230
232
  * // RedshiftDestinationDescription: { // RedshiftDestinationDescription
231
233
  * // RoleARN: "STRING_VALUE", // required
@@ -265,10 +267,10 @@ declare const DescribeDeliveryStreamCommand_base: {
265
267
  * // Enabled: true || false,
266
268
  * // Processors: [
267
269
  * // {
268
- * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
270
+ * // Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
269
271
  * // Parameters: [
270
272
  * // {
271
- * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
273
+ * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
272
274
  * // ParameterValue: "STRING_VALUE", // required
273
275
  * // },
274
276
  * // ],
@@ -329,10 +331,10 @@ declare const DescribeDeliveryStreamCommand_base: {
329
331
  * // Enabled: true || false,
330
332
  * // Processors: [
331
333
  * // {
332
- * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
334
+ * // Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
333
335
  * // Parameters: [
334
336
  * // {
335
- * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
337
+ * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
336
338
  * // ParameterValue: "STRING_VALUE", // required
337
339
  * // },
338
340
  * // ],
@@ -374,10 +376,10 @@ declare const DescribeDeliveryStreamCommand_base: {
374
376
  * // Enabled: true || false,
375
377
  * // Processors: [
376
378
  * // {
377
- * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
379
+ * // Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
378
380
  * // Parameters: [
379
381
  * // {
380
- * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
382
+ * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
381
383
  * // ParameterValue: "STRING_VALUE", // required
382
384
  * // },
383
385
  * // ],
@@ -413,10 +415,10 @@ declare const DescribeDeliveryStreamCommand_base: {
413
415
  * // Enabled: true || false,
414
416
  * // Processors: [
415
417
  * // {
416
- * // Type: "RecordDeAggregation" || "Decompression" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
418
+ * // Type: "RecordDeAggregation" || "Decompression" || "CloudWatchLogProcessing" || "Lambda" || "MetadataExtraction" || "AppendDelimiterToRecord", // required
417
419
  * // Parameters: [
418
420
  * // {
419
- * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat", // required
421
+ * // ParameterName: "LambdaArn" || "NumberOfRetries" || "MetadataExtractionQuery" || "JsonParsingEngine" || "RoleArn" || "BufferSizeInMBs" || "BufferIntervalInSeconds" || "SubRecordType" || "Delimiter" || "CompressionFormat" || "DataMessageExtraction", // required
420
422
  * // ParameterValue: "STRING_VALUE", // required
421
423
  * // },
422
424
  * // ],
@@ -30,8 +30,8 @@ declare const PutRecordBatchCommand_base: {
30
30
  * achieve higher throughput per producer than when writing single records. To write single
31
31
  * data records into a delivery stream, use <a>PutRecord</a>. Applications using
32
32
  * these operations are referred to as producers.</p>
33
- * <p>Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
34
- * <p>For information about service quota, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose
33
+ * <p>Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
34
+ * <p>For information about service quota, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Firehose
35
35
  * Quota</a>.</p>
36
36
  * <p>Each <a>PutRecordBatch</a> request supports up to 500 records. Each record
37
37
  * in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB
@@ -39,7 +39,7 @@ declare const PutRecordBatchCommand_base: {
39
39
  * <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000
40
40
  * KB in size, and any kind of data. For example, it could be a segment from a log file,
41
41
  * geographic location data, website clickstream data, and so on.</p>
42
- * <p>Kinesis Data Firehose buffers records before delivering them to the destination. To
42
+ * <p>Firehose buffers records before delivering them to the destination. To
43
43
  * disambiguate the data blobs at the destination, a common solution is to use delimiters in
44
44
  * the data, such as a newline (<code>\n</code>) or some other character unique within the
45
45
  * data. This allows the consumer application to parse individual data items when reading the
@@ -53,7 +53,7 @@ declare const PutRecordBatchCommand_base: {
53
53
  * record in the request array using the same ordering, from the top to the bottom. The
54
54
  * response array always includes the same number of records as the request array.
55
55
  * <code>RequestResponses</code> includes both successfully and unsuccessfully processed
56
- * records. Kinesis Data Firehose tries to process all records in each <a>PutRecordBatch</a> request. A single record failure does not stop the processing
56
+ * records. Firehose tries to process all records in each <a>PutRecordBatch</a> request. A single record failure does not stop the processing
57
57
  * of subsequent records. </p>
58
58
  * <p>A successfully processed record includes a <code>RecordId</code> value, which is
59
59
  * unique for the record. An unsuccessfully processed record includes <code>ErrorCode</code>
@@ -72,7 +72,7 @@ declare const PutRecordBatchCommand_base: {
72
72
  * <p>Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can
73
73
  * result in data duplicates. For larger data assets, allow for a longer time out before
74
74
  * retrying Put API operations.</p>
75
- * <p>Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they
75
+ * <p>Data records sent to Firehose are stored for 24 hours from the time they
76
76
  * are added to a delivery stream as it attempts to send the records to the destination. If
77
77
  * the destination is unreachable for more than 24 hours, the data is no longer
78
78
  * available.</p>
@@ -120,7 +120,7 @@ declare const PutRecordBatchCommand_base: {
120
120
  * <p>The specified input parameter has a value that is not valid.</p>
121
121
  *
122
122
  * @throws {@link InvalidKMSResourceException} (client fault)
123
- * <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start
123
+ * <p>Firehose throws this exception when an attempt to put records or to start
124
124
  * or stop delivery stream encryption fails. This happens when the KMS service throws one of
125
125
  * the following exception types: <code>AccessDeniedException</code>,
126
126
  * <code>InvalidStateException</code>, <code>DisabledException</code>, or
@@ -135,7 +135,7 @@ declare const PutRecordBatchCommand_base: {
135
135
  * @throws {@link ServiceUnavailableException} (server fault)
136
136
  * <p>The service is unavailable. Back off and retry the operation. If you continue to see
137
137
  * the exception, throughput limits for the delivery stream may have been exceeded. For more
138
- * information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose
138
+ * information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Firehose
139
139
  * Limits</a>.</p>
140
140
  *
141
141
  * @throws {@link FirehoseServiceException}
@@ -26,7 +26,7 @@ declare const PutRecordCommand_base: {
26
26
  };
27
27
  /**
28
28
  * @public
29
- * <p>Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To
29
+ * <p>Writes a single data record into an Amazon Firehose delivery stream. To
30
30
  * write multiple data records into a delivery stream, use <a>PutRecordBatch</a>.
31
31
  * Applications using these operations are referred to as producers.</p>
32
32
  * <p>By default, each delivery stream can take in up to 2,000 transactions per second,
@@ -34,12 +34,12 @@ declare const PutRecordCommand_base: {
34
34
  * <a>PutRecordBatch</a>, the limits are an aggregate across these two
35
35
  * operations for each delivery stream. For more information about limits and how to request
36
36
  * an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon
37
- * Kinesis Data Firehose Limits</a>. </p>
38
- * <p>Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
37
+ * Firehose Limits</a>. </p>
38
+ * <p>Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
39
39
  * <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000
40
40
  * KiB in size, and any kind of data. For example, it can be a segment from a log file,
41
41
  * geographic location data, website clickstream data, and so on.</p>
42
- * <p>Kinesis Data Firehose buffers records before delivering them to the destination. To
42
+ * <p>Firehose buffers records before delivering them to the destination. To
43
43
  * disambiguate the data blobs at the destination, a common solution is to use delimiters in
44
44
  * the data, such as a newline (<code>\n</code>) or some other character unique within the
45
45
  * data. This allows the consumer application to parse individual data items when reading the
@@ -54,7 +54,7 @@ declare const PutRecordCommand_base: {
54
54
  * <p>Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can
55
55
  * result in data duplicates. For larger data assets, allow for a longer time out before
56
56
  * retrying Put API operations.</p>
57
- * <p>Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they
57
+ * <p>Data records sent to Firehose are stored for 24 hours from the time they
58
58
  * are added to a delivery stream as it tries to send the records to the destination. If the
59
59
  * destination is unreachable for more than 24 hours, the data is no longer
60
60
  * available.</p>
@@ -93,7 +93,7 @@ declare const PutRecordCommand_base: {
93
93
  * <p>The specified input parameter has a value that is not valid.</p>
94
94
  *
95
95
  * @throws {@link InvalidKMSResourceException} (client fault)
96
- * <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start
96
+ * <p>Firehose throws this exception when an attempt to put records or to start
97
97
  * or stop delivery stream encryption fails. This happens when the KMS service throws one of
98
98
  * the following exception types: <code>AccessDeniedException</code>,
99
99
  * <code>InvalidStateException</code>, <code>DisabledException</code>, or
@@ -108,7 +108,7 @@ declare const PutRecordCommand_base: {
108
108
  * @throws {@link ServiceUnavailableException} (server fault)
109
109
  * <p>The service is unavailable. Back off and retry the operation. If you continue to see
110
110
  * the exception, throughput limits for the delivery stream may have been exceeded. For more
111
- * information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose
111
+ * information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Firehose
112
112
  * Limits</a>.</p>
113
113
  *
114
114
  * @throws {@link FirehoseServiceException}
@@ -27,8 +27,7 @@ declare const StartDeliveryStreamEncryptionCommand_base: {
27
27
  /**
28
28
  * @public
29
29
  * <p>Enables server-side encryption (SSE) for the delivery stream. </p>
30
- * <p>This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data
31
- * Firehose first sets the encryption status of the stream to <code>ENABLING</code>, and then
30
+ * <p>This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to <code>ENABLING</code>, and then
32
31
  * to <code>ENABLED</code>. The encryption status of a delivery stream is the
33
32
  * <code>Status</code> property in <a>DeliveryStreamEncryptionConfiguration</a>.
34
33
  * If the operation fails, the encryption status changes to <code>ENABLING_FAILED</code>. You
@@ -42,18 +41,18 @@ declare const StartDeliveryStreamEncryptionCommand_base: {
42
41
  * <p>Even if encryption is currently enabled for a delivery stream, you can still invoke this
43
42
  * operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this
44
43
  * method to change the CMK, and the old CMK is of type <code>CUSTOMER_MANAGED_CMK</code>,
45
- * Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new
46
- * CMK is of type <code>CUSTOMER_MANAGED_CMK</code>, Kinesis Data Firehose creates a grant
44
+ * Firehose schedules the grant it had on the old CMK for retirement. If the new
45
+ * CMK is of type <code>CUSTOMER_MANAGED_CMK</code>, Firehose creates a grant
47
46
  * that enables it to use the new CMK to encrypt and decrypt data and to manage the
48
47
  * grant.</p>
49
- * <p>For the KMS grant creation to be successful, Kinesis Data Firehose APIs <code>StartDeliveryStreamEncryption</code> and <code>CreateDeliveryStream</code> should not be called with session credentials that are more than 6 hours old.</p>
48
+ * <p>For the KMS grant creation to be successful, Firehose APIs <code>StartDeliveryStreamEncryption</code> and <code>CreateDeliveryStream</code> should not be called with session credentials that are more than 6 hours old.</p>
50
49
  * <p>If a delivery stream already has encryption enabled and then you invoke this operation
51
50
  * to change the ARN of the CMK or both its type and ARN and you get
52
51
  * <code>ENABLING_FAILED</code>, this only means that the attempt to change the CMK failed.
53
52
  * In this case, encryption remains enabled with the old CMK.</p>
54
53
  * <p>If the encryption status of your delivery stream is <code>ENABLING_FAILED</code>, you
55
54
  * can invoke this operation again with a valid CMK. The CMK must be enabled and the key
56
- * policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS
55
+ * policy mustn't explicitly deny the permission for Firehose to invoke KMS
57
56
  * encrypt and decrypt operations.</p>
58
57
  * <p>You can enable SSE for a delivery stream only if it's a delivery stream that uses
59
58
  * <code>DirectPut</code> as its source. </p>
@@ -92,7 +91,7 @@ declare const StartDeliveryStreamEncryptionCommand_base: {
92
91
  * <p>The specified input parameter has a value that is not valid.</p>
93
92
  *
94
93
  * @throws {@link InvalidKMSResourceException} (client fault)
95
- * <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start
94
+ * <p>Firehose throws this exception when an attempt to put records or to start
96
95
  * or stop delivery stream encryption fails. This happens when the KMS service throws one of
97
96
  * the following exception types: <code>AccessDeniedException</code>,
98
97
  * <code>InvalidStateException</code>, <code>DisabledException</code>, or
@@ -27,8 +27,7 @@ declare const StopDeliveryStreamEncryptionCommand_base: {
27
27
  /**
28
28
  * @public
29
29
  * <p>Disables server-side encryption (SSE) for the delivery stream. </p>
30
- * <p>This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data
31
- * Firehose first sets the encryption status of the stream to <code>DISABLING</code>, and then
30
+ * <p>This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to <code>DISABLING</code>, and then
32
31
  * to <code>DISABLED</code>. You can continue to read and write data to your stream while its
33
32
  * status is <code>DISABLING</code>. It can take up to 5 seconds after the encryption status
34
33
  * changes to <code>DISABLED</code> before all records written to the delivery stream are no
@@ -37,7 +36,7 @@ declare const StopDeliveryStreamEncryptionCommand_base: {
37
36
  * <a>PutRecordBatchOutput$Encrypted</a>, respectively.</p>
38
37
  * <p>To check the encryption state of a delivery stream, use <a>DescribeDeliveryStream</a>. </p>
39
38
  * <p>If SSE is enabled using a customer managed CMK and then you invoke
40
- * <code>StopDeliveryStreamEncryption</code>, Kinesis Data Firehose schedules the related
39
+ * <code>StopDeliveryStreamEncryption</code>, Firehose schedules the related
41
40
  * KMS grant for retirement and then retires it after it ensures that it is finished
42
41
  * delivering records to the destination.</p>
43
42
  * <p>The <code>StartDeliveryStreamEncryption</code> and