aws-sdk-firehose 1.63.0 → 1.64.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -398,7 +398,7 @@ module Aws::Firehose
398
398
 
399
399
  # @!group API Operations
400
400
 
401
- # Creates a Kinesis Data Firehose delivery stream.
401
+ # Creates a Firehose delivery stream.
402
402
  #
403
403
  # By default, you can create up to 50 delivery streams per Amazon Web
404
404
  # Services Region.
@@ -416,12 +416,12 @@ module Aws::Firehose
416
416
  # it. However, you can invoke the DeleteDeliveryStream operation to
417
417
  # delete it.
418
418
  #
419
- # A Kinesis Data Firehose delivery stream can be configured to receive
420
- # records directly from providers using PutRecord or PutRecordBatch, or
421
- # it can be configured to use an existing Kinesis stream as its source.
422
- # To specify a Kinesis data stream as input, set the
423
- # `DeliveryStreamType` parameter to `KinesisStreamAsSource`, and provide
424
- # the Kinesis stream Amazon Resource Name (ARN) and role ARN in the
419
+ # A Firehose delivery stream can be configured to receive records
420
+ # directly from providers using PutRecord or PutRecordBatch, or it can
421
+ # be configured to use an existing Kinesis stream as its source. To
422
+ # specify a Kinesis data stream as input, set the `DeliveryStreamType`
423
+ # parameter to `KinesisStreamAsSource`, and provide the Kinesis stream
424
+ # Amazon Resource Name (ARN) and role ARN in the
425
425
  # `KinesisStreamSourceConfiguration` parameter.
426
426
  #
427
427
  # To create a delivery stream with server-side encryption (SSE) enabled,
@@ -445,8 +445,8 @@ module Aws::Firehose
445
445
  # When you specify `S3DestinationConfiguration`, you can also provide
446
446
  # the following optional values: BufferingHints,
447
447
  # `EncryptionConfiguration`, and `CompressionFormat`. By default, if no
448
- # `BufferingHints` value is provided, Kinesis Data Firehose buffers data
449
- # up to 5 MB or for 5 minutes, whichever condition is satisfied first.
448
+ # `BufferingHints` value is provided, Firehose buffers data up to 5 MB
449
+ # or for 5 minutes, whichever condition is satisfied first.
450
450
  # `BufferingHints` is a hint, so there are some cases where the service
451
451
  # cannot adhere to these conditions strictly. For example, record
452
452
  # boundaries might be such that the size is a little over or under the
@@ -457,10 +457,10 @@ module Aws::Firehose
457
457
  # A few notes about Amazon Redshift as a destination:
458
458
  #
459
459
  # * An Amazon Redshift destination requires an S3 bucket as intermediate
460
- # location. Kinesis Data Firehose first delivers data to Amazon S3 and
461
- # then uses `COPY` syntax to load data into an Amazon Redshift table.
462
- # This is specified in the
463
- # `RedshiftDestinationConfiguration.S3Configuration` parameter.
460
+ # location. Firehose first delivers data to Amazon S3 and then uses
461
+ # `COPY` syntax to load data into an Amazon Redshift table. This is
462
+ # specified in the `RedshiftDestinationConfiguration.S3Configuration`
463
+ # parameter.
464
464
  #
465
465
  # * The compression formats `SNAPPY` or `ZIP` cannot be specified in
466
466
  # `RedshiftDestinationConfiguration.S3Configuration` because the
@@ -468,16 +468,15 @@ module Aws::Firehose
468
468
  # doesn't support these compression formats.
469
469
  #
470
470
  # * We strongly recommend that you use the user name and password you
471
- # provide exclusively with Kinesis Data Firehose, and that the
472
- # permissions for the account are restricted for Amazon Redshift
473
- # `INSERT` permissions.
474
- #
475
- # Kinesis Data Firehose assumes the IAM role that is configured as part
476
- # of the destination. The role should allow the Kinesis Data Firehose
477
- # principal to assume the role, and the role should have permissions
478
- # that allow the service to deliver the data. For more information, see
479
- # [Grant Kinesis Data Firehose Access to an Amazon S3 Destination][1] in
480
- # the *Amazon Kinesis Data Firehose Developer Guide*.
471
+ # provide exclusively with Firehose, and that the permissions for the
472
+ # account are restricted for Amazon Redshift `INSERT` permissions.
473
+ #
474
+ # Firehose assumes the IAM role that is configured as part of the
475
+ # destination. The role should allow the Firehose principal to assume
476
+ # the role, and the role should have permissions that allow the service
477
+ # to deliver the data. For more information, see [Grant Firehose Access
478
+ # to an Amazon S3 Destination][1] in the *Amazon Firehose Developer
479
+ # Guide*.
481
480
  #
482
481
  #
483
482
  #
@@ -624,10 +623,10 @@ module Aws::Firehose
624
623
  # enabled: false,
625
624
  # processors: [
626
625
  # {
627
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
626
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
628
627
  # parameters: [
629
628
  # {
630
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
629
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
631
630
  # parameter_value: "ProcessorParameterValue", # required
632
631
  # },
633
632
  # ],
@@ -712,6 +711,8 @@ module Aws::Firehose
712
711
  # },
713
712
  # enabled: false,
714
713
  # },
714
+ # file_extension: "FileExtension",
715
+ # custom_time_zone: "CustomTimeZone",
715
716
  # },
716
717
  # redshift_destination_configuration: {
717
718
  # role_arn: "RoleARN", # required
@@ -752,10 +753,10 @@ module Aws::Firehose
752
753
  # enabled: false,
753
754
  # processors: [
754
755
  # {
755
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
756
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
756
757
  # parameters: [
757
758
  # {
758
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
759
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
759
760
  # parameter_value: "ProcessorParameterValue", # required
760
761
  # },
761
762
  # ],
@@ -832,10 +833,10 @@ module Aws::Firehose
832
833
  # enabled: false,
833
834
  # processors: [
834
835
  # {
835
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
836
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
836
837
  # parameters: [
837
838
  # {
838
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
839
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
839
840
  # parameter_value: "ProcessorParameterValue", # required
840
841
  # },
841
842
  # ],
@@ -897,10 +898,10 @@ module Aws::Firehose
897
898
  # enabled: false,
898
899
  # processors: [
899
900
  # {
900
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
901
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
901
902
  # parameters: [
902
903
  # {
903
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
904
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
904
905
  # parameter_value: "ProcessorParameterValue", # required
905
906
  # },
906
907
  # ],
@@ -956,10 +957,10 @@ module Aws::Firehose
956
957
  # enabled: false,
957
958
  # processors: [
958
959
  # {
959
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
960
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
960
961
  # parameters: [
961
962
  # {
962
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
963
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
963
964
  # parameter_value: "ProcessorParameterValue", # required
964
965
  # },
965
966
  # ],
@@ -1004,10 +1005,10 @@ module Aws::Firehose
1004
1005
  # enabled: false,
1005
1006
  # processors: [
1006
1007
  # {
1007
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
1008
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
1008
1009
  # parameters: [
1009
1010
  # {
1010
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
1011
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
1011
1012
  # parameter_value: "ProcessorParameterValue", # required
1012
1013
  # },
1013
1014
  # ],
@@ -1086,10 +1087,10 @@ module Aws::Firehose
1086
1087
  # enabled: false,
1087
1088
  # processors: [
1088
1089
  # {
1089
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
1090
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
1090
1091
  # parameters: [
1091
1092
  # {
1092
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
1093
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
1093
1094
  # parameter_value: "ProcessorParameterValue", # required
1094
1095
  # },
1095
1096
  # ],
@@ -1142,10 +1143,10 @@ module Aws::Firehose
1142
1143
  # enabled: false,
1143
1144
  # processors: [
1144
1145
  # {
1145
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
1146
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
1146
1147
  # parameters: [
1147
1148
  # {
1148
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
1149
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
1149
1150
  # parameter_value: "ProcessorParameterValue", # required
1150
1151
  # },
1151
1152
  # ],
@@ -1197,31 +1198,38 @@ module Aws::Firehose
1197
1198
 
1198
1199
  # Deletes a delivery stream and its data.
1199
1200
  #
1200
- # To check the state of a delivery stream, use DescribeDeliveryStream.
1201
1201
  # You can delete a delivery stream only if it is in one of the following
1202
1202
  # states: `ACTIVE`, `DELETING`, `CREATING_FAILED`, or `DELETING_FAILED`.
1203
1203
  # You can't delete a delivery stream that is in the `CREATING` state.
1204
- # While the deletion request is in process, the delivery stream is in
1205
- # the `DELETING` state.
1204
+ # To check the state of a delivery stream, use DescribeDeliveryStream.
1206
1205
  #
1207
- # While the delivery stream is in the `DELETING` state, the service
1208
- # might continue to accept records, but it doesn't make any guarantees
1209
- # with respect to delivering the data. Therefore, as a best practice,
1210
- # first stop any applications that are sending records before you delete
1211
- # a delivery stream.
1206
+ # DeleteDeliveryStream is an asynchronous API. When an API request to
1207
+ # DeleteDeliveryStream succeeds, the delivery stream is marked for
1208
+ # deletion, and it goes into the `DELETING` state.While the delivery
1209
+ # stream is in the `DELETING` state, the service might continue to
1210
+ # accept records, but it doesn't make any guarantees with respect to
1211
+ # delivering the data. Therefore, as a best practice, first stop any
1212
+ # applications that are sending records before you delete a delivery
1213
+ # stream.
1214
+ #
1215
+ # Removal of a delivery stream that is in the `DELETING` state is a low
1216
+ # priority operation for the service. A stream may remain in the
1217
+ # `DELETING` state for several minutes. Therefore, as a best practice,
1218
+ # applications should not wait for streams in the `DELETING` state to be
1219
+ # removed.
1212
1220
  #
1213
1221
  # @option params [required, String] :delivery_stream_name
1214
1222
  # The name of the delivery stream.
1215
1223
  #
1216
1224
  # @option params [Boolean] :allow_force_delete
1217
1225
  # Set this to true if you want to delete the delivery stream even if
1218
- # Kinesis Data Firehose is unable to retire the grant for the CMK.
1219
- # Kinesis Data Firehose might be unable to retire the grant due to a
1220
- # customer error, such as when the CMK or the grant are in an invalid
1221
- # state. If you force deletion, you can then use the [RevokeGrant][1]
1222
- # operation to revoke the grant you gave to Kinesis Data Firehose. If a
1223
- # failure to retire the grant happens due to an Amazon Web Services KMS
1224
- # issue, Kinesis Data Firehose keeps retrying the delete operation.
1226
+ # Firehose is unable to retire the grant for the CMK. Firehose might be
1227
+ # unable to retire the grant due to a customer error, such as when the
1228
+ # CMK or the grant are in an invalid state. If you force deletion, you
1229
+ # can then use the [RevokeGrant][1] operation to revoke the grant you
1230
+ # gave to Firehose. If a failure to retire the grant happens due to an
1231
+ # Amazon Web Services KMS issue, Firehose keeps retrying the delete
1232
+ # operation.
1225
1233
  #
1226
1234
  # The default value is false.
1227
1235
  #
@@ -1268,8 +1276,7 @@ module Aws::Firehose
1268
1276
  #
1269
1277
  # @option params [String] :exclusive_start_destination_id
1270
1278
  # The ID of the destination to start returning the destination
1271
- # information. Kinesis Data Firehose supports one destination per
1272
- # delivery stream.
1279
+ # information. Firehose supports one destination per delivery stream.
1273
1280
  #
1274
1281
  # @return [Types::DescribeDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1275
1282
  #
@@ -1335,9 +1342,9 @@ module Aws::Firehose
1335
1342
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
1336
1343
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.enabled #=> Boolean
1337
1344
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors #=> Array
1338
- # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1345
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1339
1346
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].parameters #=> Array
1340
- # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1347
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1341
1348
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1342
1349
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_mode #=> String, one of "Disabled", "Enabled"
1343
1350
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.role_arn #=> String
@@ -1384,6 +1391,8 @@ module Aws::Firehose
1384
1391
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.enabled #=> Boolean
1385
1392
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.dynamic_partitioning_configuration.retry_options.duration_in_seconds #=> Integer
1386
1393
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.dynamic_partitioning_configuration.enabled #=> Boolean
1394
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.file_extension #=> String
1395
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.custom_time_zone #=> String
1387
1396
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.role_arn #=> String
1388
1397
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.cluster_jdbc_url #=> String
1389
1398
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_name #=> String
@@ -1405,9 +1414,9 @@ module Aws::Firehose
1405
1414
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
1406
1415
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.enabled #=> Boolean
1407
1416
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors #=> Array
1408
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1417
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1409
1418
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].parameters #=> Array
1410
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1419
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1411
1420
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1412
1421
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_mode #=> String, one of "Disabled", "Enabled"
1413
1422
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.role_arn #=> String
@@ -1449,9 +1458,9 @@ module Aws::Firehose
1449
1458
  # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
1450
1459
  # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.enabled #=> Boolean
1451
1460
  # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors #=> Array
1452
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1461
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1453
1462
  # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].parameters #=> Array
1454
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1463
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1455
1464
  # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1456
1465
  # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.enabled #=> Boolean
1457
1466
  # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.log_group_name #=> String
@@ -1487,9 +1496,9 @@ module Aws::Firehose
1487
1496
  # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
1488
1497
  # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.enabled #=> Boolean
1489
1498
  # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.processors #=> Array
1490
- # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1499
+ # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1491
1500
  # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.processors[0].parameters #=> Array
1492
- # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1501
+ # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1493
1502
  # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1494
1503
  # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.cloud_watch_logging_options.enabled #=> Boolean
1495
1504
  # resp.delivery_stream_description.destinations[0].amazonopensearchservice_destination_description.cloud_watch_logging_options.log_group_name #=> String
@@ -1521,9 +1530,9 @@ module Aws::Firehose
1521
1530
  # resp.delivery_stream_description.destinations[0].splunk_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
1522
1531
  # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.enabled #=> Boolean
1523
1532
  # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.processors #=> Array
1524
- # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1533
+ # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1525
1534
  # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.processors[0].parameters #=> Array
1526
- # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1535
+ # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1527
1536
  # resp.delivery_stream_description.destinations[0].splunk_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1528
1537
  # resp.delivery_stream_description.destinations[0].splunk_destination_description.cloud_watch_logging_options.enabled #=> Boolean
1529
1538
  # resp.delivery_stream_description.destinations[0].splunk_destination_description.cloud_watch_logging_options.log_group_name #=> String
@@ -1543,9 +1552,9 @@ module Aws::Firehose
1543
1552
  # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.request_configuration.common_attributes[0].attribute_value #=> String
1544
1553
  # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.enabled #=> Boolean
1545
1554
  # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.processors #=> Array
1546
- # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1555
+ # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1547
1556
  # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.processors[0].parameters #=> Array
1548
- # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1557
+ # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1549
1558
  # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1550
1559
  # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.role_arn #=> String
1551
1560
  # resp.delivery_stream_description.destinations[0].http_endpoint_destination_description.retry_options.duration_in_seconds #=> Integer
@@ -1578,9 +1587,9 @@ module Aws::Firehose
1578
1587
  # resp.delivery_stream_description.destinations[0].snowflake_destination_description.cloud_watch_logging_options.log_stream_name #=> String
1579
1588
  # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.enabled #=> Boolean
1580
1589
  # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.processors #=> Array
1581
- # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1590
+ # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1582
1591
  # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.processors[0].parameters #=> Array
1583
- # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1592
+ # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1584
1593
  # resp.delivery_stream_description.destinations[0].snowflake_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1585
1594
  # resp.delivery_stream_description.destinations[0].snowflake_destination_description.role_arn #=> String
1586
1595
  # resp.delivery_stream_description.destinations[0].snowflake_destination_description.retry_options.duration_in_seconds #=> Integer
@@ -1618,9 +1627,9 @@ module Aws::Firehose
1618
1627
  # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
1619
1628
  # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.enabled #=> Boolean
1620
1629
  # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.processors #=> Array
1621
- # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1630
+ # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.processors[0].type #=> String, one of "RecordDeAggregation", "Decompression", "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord"
1622
1631
  # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.processors[0].parameters #=> Array
1623
- # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat"
1632
+ # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries", "MetadataExtractionQuery", "JsonParsingEngine", "RoleArn", "BufferSizeInMBs", "BufferIntervalInSeconds", "SubRecordType", "Delimiter", "CompressionFormat", "DataMessageExtraction"
1624
1633
  # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
1625
1634
  # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.cloud_watch_logging_options.enabled #=> Boolean
1626
1635
  # resp.delivery_stream_description.destinations[0].amazon_open_search_serverless_destination_description.cloud_watch_logging_options.log_group_name #=> String
@@ -1642,6 +1651,36 @@ module Aws::Firehose
1642
1651
  req.send_request(options)
1643
1652
  end
1644
1653
 
1654
+ # @option params [required, String] :delivery_stream_arn
1655
+ #
1656
+ # @return [Types::GetKinesisStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1657
+ #
1658
+ # * {Types::GetKinesisStreamOutput#kinesis_stream_arn #kinesis_stream_arn} => String
1659
+ # * {Types::GetKinesisStreamOutput#credentials_for_reading_kinesis_stream #credentials_for_reading_kinesis_stream} => Types::SessionCredentials
1660
+ #
1661
+ # @example Request syntax with placeholder values
1662
+ #
1663
+ # resp = client.get_kinesis_stream({
1664
+ # delivery_stream_arn: "DeliveryStreamARN", # required
1665
+ # })
1666
+ #
1667
+ # @example Response structure
1668
+ #
1669
+ # resp.kinesis_stream_arn #=> String
1670
+ # resp.credentials_for_reading_kinesis_stream.access_key_id #=> String
1671
+ # resp.credentials_for_reading_kinesis_stream.secret_access_key #=> String
1672
+ # resp.credentials_for_reading_kinesis_stream.session_token #=> String
1673
+ # resp.credentials_for_reading_kinesis_stream.expiration #=> Time
1674
+ #
1675
+ # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/GetKinesisStream AWS API Documentation
1676
+ #
1677
+ # @overload get_kinesis_stream(params = {})
1678
+ # @param [Hash] params ({})
1679
+ def get_kinesis_stream(params = {}, options = {})
1680
+ req = build_request(:get_kinesis_stream, params)
1681
+ req.send_request(options)
1682
+ end
1683
+
1645
1684
  # Lists your delivery streams in alphabetical order of their names.
1646
1685
  #
1647
1686
  # The number of delivery streams might be too large to return using a
@@ -1749,24 +1788,23 @@ module Aws::Firehose
1749
1788
  req.send_request(options)
1750
1789
  end
1751
1790
 
1752
- # Writes a single data record into an Amazon Kinesis Data Firehose
1753
- # delivery stream. To write multiple data records into a delivery
1754
- # stream, use PutRecordBatch. Applications using these operations are
1755
- # referred to as producers.
1791
+ # Writes a single data record into an Amazon Firehose delivery stream.
1792
+ # To write multiple data records into a delivery stream, use
1793
+ # PutRecordBatch. Applications using these operations are referred to as
1794
+ # producers.
1756
1795
  #
1757
1796
  # By default, each delivery stream can take in up to 2,000 transactions
1758
1797
  # per second, 5,000 records per second, or 5 MB per second. If you use
1759
1798
  # PutRecord and PutRecordBatch, the limits are an aggregate across these
1760
1799
  # two operations for each delivery stream. For more information about
1761
- # limits and how to request an increase, see [Amazon Kinesis Data
1762
- # Firehose Limits][1].
1800
+ # limits and how to request an increase, see [Amazon Firehose
1801
+ # Limits][1].
1763
1802
  #
1764
- # Kinesis Data Firehose accumulates and publishes a particular metric
1765
- # for a customer account in one minute intervals. It is possible that
1766
- # the bursts of incoming bytes/records ingested to a delivery stream
1767
- # last only for a few seconds. Due to this, the actual spikes in the
1768
- # traffic might not be fully visible in the customer's 1 minute
1769
- # CloudWatch metrics.
1803
+ # Firehose accumulates and publishes a particular metric for a customer
1804
+ # account in one minute intervals. It is possible that the bursts of
1805
+ # incoming bytes/records ingested to a delivery stream last only for a
1806
+ # few seconds. Due to this, the actual spikes in the traffic might not
1807
+ # be fully visible in the customer's 1 minute CloudWatch metrics.
1770
1808
  #
1771
1809
  # You must specify the name of the delivery stream and the data record
1772
1810
  # when using PutRecord. The data record consists of a data blob that can
@@ -1774,12 +1812,12 @@ module Aws::Firehose
1774
1812
  # be a segment from a log file, geographic location data, website
1775
1813
  # clickstream data, and so on.
1776
1814
  #
1777
- # Kinesis Data Firehose buffers records before delivering them to the
1778
- # destination. To disambiguate the data blobs at the destination, a
1779
- # common solution is to use delimiters in the data, such as a newline
1780
- # (`\n`) or some other character unique within the data. This allows the
1781
- # consumer application to parse individual data items when reading the
1782
- # data from the destination.
1815
+ # Firehose buffers records before delivering them to the destination. To
1816
+ # disambiguate the data blobs at the destination, a common solution is
1817
+ # to use delimiters in the data, such as a newline (`\n`) or some other
1818
+ # character unique within the data. This allows the consumer application
1819
+ # to parse individual data items when reading the data from the
1820
+ # destination.
1783
1821
  #
1784
1822
  # The `PutRecord` operation returns a `RecordId`, which is a unique
1785
1823
  # string assigned to each record. Producer applications can use this ID
@@ -1794,10 +1832,10 @@ module Aws::Firehose
1794
1832
  # PutRecordBatch) can result in data duplicates. For larger data assets,
1795
1833
  # allow for a longer time out before retrying Put API operations.
1796
1834
  #
1797
- # Data records sent to Kinesis Data Firehose are stored for 24 hours
1798
- # from the time they are added to a delivery stream as it tries to send
1799
- # the records to the destination. If the destination is unreachable for
1800
- # more than 24 hours, the data is no longer available.
1835
+ # Data records sent to Firehose are stored for 24 hours from the time
1836
+ # they are added to a delivery stream as it tries to send the records to
1837
+ # the destination. If the destination is unreachable for more than 24
1838
+ # hours, the data is no longer available.
1801
1839
  #
1802
1840
  # Don't concatenate two or more base64 strings to form the data fields
1803
1841
  # of your records. Instead, concatenate the raw data, then perform
@@ -1847,15 +1885,13 @@ module Aws::Firehose
1847
1885
  # use PutRecord. Applications using these operations are referred to as
1848
1886
  # producers.
1849
1887
  #
1850
- # Kinesis Data Firehose accumulates and publishes a particular metric
1851
- # for a customer account in one minute intervals. It is possible that
1852
- # the bursts of incoming bytes/records ingested to a delivery stream
1853
- # last only for a few seconds. Due to this, the actual spikes in the
1854
- # traffic might not be fully visible in the customer's 1 minute
1855
- # CloudWatch metrics.
1888
+ # Firehose accumulates and publishes a particular metric for a customer
1889
+ # account in one minute intervals. It is possible that the bursts of
1890
+ # incoming bytes/records ingested to a delivery stream last only for a
1891
+ # few seconds. Due to this, the actual spikes in the traffic might not
1892
+ # be fully visible in the customer's 1 minute CloudWatch metrics.
1856
1893
  #
1857
- # For information about service quota, see [Amazon Kinesis Data Firehose
1858
- # Quota][1].
1894
+ # For information about service quota, see [Amazon Firehose Quota][1].
1859
1895
  #
1860
1896
  # Each PutRecordBatch request supports up to 500 records. Each record in
1861
1897
  # the request can be as large as 1,000 KB (before base64 encoding), up
@@ -1868,12 +1904,12 @@ module Aws::Firehose
1868
1904
  # be a segment from a log file, geographic location data, website
1869
1905
  # clickstream data, and so on.
1870
1906
  #
1871
- # Kinesis Data Firehose buffers records before delivering them to the
1872
- # destination. To disambiguate the data blobs at the destination, a
1873
- # common solution is to use delimiters in the data, such as a newline
1874
- # (`\n`) or some other character unique within the data. This allows the
1875
- # consumer application to parse individual data items when reading the
1876
- # data from the destination.
1907
+ # Firehose buffers records before delivering them to the destination. To
1908
+ # disambiguate the data blobs at the destination, a common solution is
1909
+ # to use delimiters in the data, such as a newline (`\n`) or some other
1910
+ # character unique within the data. This allows the consumer application
1911
+ # to parse individual data items when reading the data from the
1912
+ # destination.
1877
1913
  #
1878
1914
  # The PutRecordBatch response includes a count of failed records,
1879
1915
  # `FailedPutCount`, and an array of responses, `RequestResponses`. Even
@@ -1885,9 +1921,9 @@ module Aws::Firehose
1885
1921
  # ordering, from the top to the bottom. The response array always
1886
1922
  # includes the same number of records as the request array.
1887
1923
  # `RequestResponses` includes both successfully and unsuccessfully
1888
- # processed records. Kinesis Data Firehose tries to process all records
1889
- # in each PutRecordBatch request. A single record failure does not stop
1890
- # the processing of subsequent records.
1924
+ # processed records. Firehose tries to process all records in each
1925
+ # PutRecordBatch request. A single record failure does not stop the
1926
+ # processing of subsequent records.
1891
1927
  #
1892
1928
  # A successfully processed record includes a `RecordId` value, which is
1893
1929
  # unique for the record. An unsuccessfully processed record includes
@@ -1912,10 +1948,10 @@ module Aws::Firehose
1912
1948
  # PutRecordBatch) can result in data duplicates. For larger data assets,
1913
1949
  # allow for a longer time out before retrying Put API operations.
1914
1950
  #
1915
- # Data records sent to Kinesis Data Firehose are stored for 24 hours
1916
- # from the time they are added to a delivery stream as it attempts to
1917
- # send the records to the destination. If the destination is unreachable
1918
- # for more than 24 hours, the data is no longer available.
1951
+ # Data records sent to Firehose are stored for 24 hours from the time
1952
+ # they are added to a delivery stream as it attempts to send the records
1953
+ # to the destination. If the destination is unreachable for more than 24
1954
+ # hours, the data is no longer available.
1919
1955
  #
1920
1956
  # Don't concatenate two or more base64 strings to form the data fields
1921
1957
  # of your records. Instead, concatenate the raw data, then perform
@@ -1969,9 +2005,9 @@ module Aws::Firehose
1969
2005
  # Enables server-side encryption (SSE) for the delivery stream.
1970
2006
  #
1971
2007
  # This operation is asynchronous. It returns immediately. When you
1972
- # invoke it, Kinesis Data Firehose first sets the encryption status of
1973
- # the stream to `ENABLING`, and then to `ENABLED`. The encryption status
1974
- # of a delivery stream is the `Status` property in
2008
+ # invoke it, Firehose first sets the encryption status of the stream to
2009
+ # `ENABLING`, and then to `ENABLED`. The encryption status of a delivery
2010
+ # stream is the `Status` property in
1975
2011
  # DeliveryStreamEncryptionConfiguration. If the operation fails, the
1976
2012
  # encryption status changes to `ENABLING_FAILED`. You can continue to
1977
2013
  # read and write data to your delivery stream while the encryption
@@ -1988,15 +2024,14 @@ module Aws::Firehose
1988
2024
  # Even if encryption is currently enabled for a delivery stream, you can
1989
2025
  # still invoke this operation on it to change the ARN of the CMK or both
1990
2026
  # its type and ARN. If you invoke this method to change the CMK, and the
1991
- # old CMK is of type `CUSTOMER_MANAGED_CMK`, Kinesis Data Firehose
1992
- # schedules the grant it had on the old CMK for retirement. If the new
1993
- # CMK is of type `CUSTOMER_MANAGED_CMK`, Kinesis Data Firehose creates a
1994
- # grant that enables it to use the new CMK to encrypt and decrypt data
1995
- # and to manage the grant.
2027
+ # old CMK is of type `CUSTOMER_MANAGED_CMK`, Firehose schedules the
2028
+ # grant it had on the old CMK for retirement. If the new CMK is of type
2029
+ # `CUSTOMER_MANAGED_CMK`, Firehose creates a grant that enables it to
2030
+ # use the new CMK to encrypt and decrypt data and to manage the grant.
1996
2031
  #
1997
- # For the KMS grant creation to be successful, Kinesis Data Firehose
1998
- # APIs `StartDeliveryStreamEncryption` and `CreateDeliveryStream` should
1999
- # not be called with session credentials that are more than 6 hours old.
2032
+ # For the KMS grant creation to be successful, Firehose APIs
2033
+ # `StartDeliveryStreamEncryption` and `CreateDeliveryStream` should not
2034
+ # be called with session credentials that are more than 6 hours old.
2000
2035
  #
2001
2036
  # If a delivery stream already has encryption enabled and then you
2002
2037
  # invoke this operation to change the ARN of the CMK or both its type
@@ -2007,7 +2042,7 @@ module Aws::Firehose
2007
2042
  # If the encryption status of your delivery stream is `ENABLING_FAILED`,
2008
2043
  # you can invoke this operation again with a valid CMK. The CMK must be
2009
2044
  # enabled and the key policy mustn't explicitly deny the permission for
2010
- # Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.
2045
+ # Firehose to invoke KMS encrypt and decrypt operations.
2011
2046
  #
2012
2047
  # You can enable SSE for a delivery stream only if it's a delivery
2013
2048
  # stream that uses `DirectPut` as its source.
@@ -2051,13 +2086,13 @@ module Aws::Firehose
2051
2086
  # Disables server-side encryption (SSE) for the delivery stream.
2052
2087
  #
2053
2088
  # This operation is asynchronous. It returns immediately. When you
2054
- # invoke it, Kinesis Data Firehose first sets the encryption status of
2055
- # the stream to `DISABLING`, and then to `DISABLED`. You can continue to
2056
- # read and write data to your stream while its status is `DISABLING`. It
2057
- # can take up to 5 seconds after the encryption status changes to
2058
- # `DISABLED` before all records written to the delivery stream are no
2059
- # longer subject to encryption. To find out whether a record or a batch
2060
- # of records was encrypted, check the response elements
2089
+ # invoke it, Firehose first sets the encryption status of the stream to
2090
+ # `DISABLING`, and then to `DISABLED`. You can continue to read and
2091
+ # write data to your stream while its status is `DISABLING`. It can take
2092
+ # up to 5 seconds after the encryption status changes to `DISABLED`
2093
+ # before all records written to the delivery stream are no longer
2094
+ # subject to encryption. To find out whether a record or a batch of
2095
+ # records was encrypted, check the response elements
2061
2096
  # PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted,
2062
2097
  # respectively.
2063
2098
  #
@@ -2065,9 +2100,9 @@ module Aws::Firehose
2065
2100
  # DescribeDeliveryStream.
2066
2101
  #
2067
2102
  # If SSE is enabled using a customer managed CMK and then you invoke
2068
- # `StopDeliveryStreamEncryption`, Kinesis Data Firehose schedules the
2069
- # related KMS grant for retirement and then retires it after it ensures
2070
- # that it is finished delivering records to the destination.
2103
+ # `StopDeliveryStreamEncryption`, Firehose schedules the related KMS
2104
+ # grant for retirement and then retires it after it ensures that it is
2105
+ # finished delivering records to the destination.
2071
2106
  #
2072
2107
  # The `StartDeliveryStreamEncryption` and `StopDeliveryStreamEncryption`
2073
2108
  # operations have a combined limit of 25 calls per delivery stream per
@@ -2194,25 +2229,24 @@ module Aws::Firehose
2194
2229
  # supported. For an Amazon OpenSearch Service destination, you can only
2195
2230
  # update to another Amazon OpenSearch Service destination.
2196
2231
  #
2197
- # If the destination type is the same, Kinesis Data Firehose merges the
2198
- # configuration parameters specified with the destination configuration
2199
- # that already exists on the delivery stream. If any of the parameters
2200
- # are not specified in the call, the existing values are retained. For
2201
- # example, in the Amazon S3 destination, if EncryptionConfiguration is
2202
- # not specified, then the existing `EncryptionConfiguration` is
2203
- # maintained on the destination.
2232
+ # If the destination type is the same, Firehose merges the configuration
2233
+ # parameters specified with the destination configuration that already
2234
+ # exists on the delivery stream. If any of the parameters are not
2235
+ # specified in the call, the existing values are retained. For example,
2236
+ # in the Amazon S3 destination, if EncryptionConfiguration is not
2237
+ # specified, then the existing `EncryptionConfiguration` is maintained
2238
+ # on the destination.
2204
2239
  #
2205
2240
  # If the destination type is not the same, for example, changing the
2206
- # destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose
2207
- # does not merge any parameters. In this case, all parameters must be
2208
- # specified.
2209
- #
2210
- # Kinesis Data Firehose uses `CurrentDeliveryStreamVersionId` to avoid
2211
- # race conditions and conflicting merges. This is a required field, and
2212
- # the service updates the configuration only if the existing
2213
- # configuration has a version ID that matches. After the update is
2214
- # applied successfully, the version ID is updated, and can be retrieved
2215
- # using DescribeDeliveryStream. Use the new version ID to set
2241
+ # destination from Amazon S3 to Amazon Redshift, Firehose does not merge
2242
+ # any parameters. In this case, all parameters must be specified.
2243
+ #
2244
+ # Firehose uses `CurrentDeliveryStreamVersionId` to avoid race
2245
+ # conditions and conflicting merges. This is a required field, and the
2246
+ # service updates the configuration only if the existing configuration
2247
+ # has a version ID that matches. After the update is applied
2248
+ # successfully, the version ID is updated, and can be retrieved using
2249
+ # DescribeDeliveryStream. Use the new version ID to set
2216
2250
  # `CurrentDeliveryStreamVersionId` in the next call.
2217
2251
  #
2218
2252
  # @option params [required, String] :delivery_stream_name
@@ -2313,10 +2347,10 @@ module Aws::Firehose
2313
2347
  # enabled: false,
2314
2348
  # processors: [
2315
2349
  # {
2316
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2350
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2317
2351
  # parameters: [
2318
2352
  # {
2319
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2353
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2320
2354
  # parameter_value: "ProcessorParameterValue", # required
2321
2355
  # },
2322
2356
  # ],
@@ -2401,6 +2435,8 @@ module Aws::Firehose
2401
2435
  # },
2402
2436
  # enabled: false,
2403
2437
  # },
2438
+ # file_extension: "FileExtension",
2439
+ # custom_time_zone: "CustomTimeZone",
2404
2440
  # },
2405
2441
  # redshift_destination_update: {
2406
2442
  # role_arn: "RoleARN",
@@ -2441,10 +2477,10 @@ module Aws::Firehose
2441
2477
  # enabled: false,
2442
2478
  # processors: [
2443
2479
  # {
2444
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2480
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2445
2481
  # parameters: [
2446
2482
  # {
2447
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2483
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2448
2484
  # parameter_value: "ProcessorParameterValue", # required
2449
2485
  # },
2450
2486
  # ],
@@ -2520,10 +2556,10 @@ module Aws::Firehose
2520
2556
  # enabled: false,
2521
2557
  # processors: [
2522
2558
  # {
2523
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2559
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2524
2560
  # parameters: [
2525
2561
  # {
2526
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2562
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2527
2563
  # parameter_value: "ProcessorParameterValue", # required
2528
2564
  # },
2529
2565
  # ],
@@ -2579,10 +2615,10 @@ module Aws::Firehose
2579
2615
  # enabled: false,
2580
2616
  # processors: [
2581
2617
  # {
2582
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2618
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2583
2619
  # parameters: [
2584
2620
  # {
2585
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2621
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2586
2622
  # parameter_value: "ProcessorParameterValue", # required
2587
2623
  # },
2588
2624
  # ],
@@ -2633,10 +2669,10 @@ module Aws::Firehose
2633
2669
  # enabled: false,
2634
2670
  # processors: [
2635
2671
  # {
2636
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2672
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2637
2673
  # parameters: [
2638
2674
  # {
2639
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2675
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2640
2676
  # parameter_value: "ProcessorParameterValue", # required
2641
2677
  # },
2642
2678
  # ],
@@ -2681,10 +2717,10 @@ module Aws::Firehose
2681
2717
  # enabled: false,
2682
2718
  # processors: [
2683
2719
  # {
2684
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2720
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2685
2721
  # parameters: [
2686
2722
  # {
2687
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2723
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2688
2724
  # parameter_value: "ProcessorParameterValue", # required
2689
2725
  # },
2690
2726
  # ],
@@ -2756,10 +2792,10 @@ module Aws::Firehose
2756
2792
  # enabled: false,
2757
2793
  # processors: [
2758
2794
  # {
2759
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2795
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2760
2796
  # parameters: [
2761
2797
  # {
2762
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2798
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2763
2799
  # parameter_value: "ProcessorParameterValue", # required
2764
2800
  # },
2765
2801
  # ],
@@ -2796,10 +2832,10 @@ module Aws::Firehose
2796
2832
  # enabled: false,
2797
2833
  # processors: [
2798
2834
  # {
2799
- # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, Lambda, MetadataExtraction, AppendDelimiterToRecord
2835
+ # type: "RecordDeAggregation", # required, accepts RecordDeAggregation, Decompression, CloudWatchLogProcessing, Lambda, MetadataExtraction, AppendDelimiterToRecord
2800
2836
  # parameters: [
2801
2837
  # {
2802
- # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat
2838
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter, CompressionFormat, DataMessageExtraction
2803
2839
  # parameter_value: "ProcessorParameterValue", # required
2804
2840
  # },
2805
2841
  # ],
@@ -2845,6 +2881,39 @@ module Aws::Firehose
2845
2881
  req.send_request(options)
2846
2882
  end
2847
2883
 
2884
+ # @option params [required, Array<Types::TagrisSweepListItem>] :tagris_sweep_list
2885
+ #
2886
+ # @return [Types::TagrisVerifyResourcesExistOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2887
+ #
2888
+ # * {Types::TagrisVerifyResourcesExistOutput#tagris_sweep_list_result #tagris_sweep_list_result} => Hash&lt;String,String&gt;
2889
+ #
2890
+ # @example Request syntax with placeholder values
2891
+ #
2892
+ # resp = client.verify_resources_exist_for_tagris({
2893
+ # tagris_sweep_list: [ # required
2894
+ # {
2895
+ # tagris_account_id: "TagrisAccountId",
2896
+ # tagris_amazon_resource_name: "TagrisAmazonResourceName",
2897
+ # tagris_internal_id: "TagrisInternalId",
2898
+ # tagris_version: 1,
2899
+ # },
2900
+ # ],
2901
+ # })
2902
+ #
2903
+ # @example Response structure
2904
+ #
2905
+ # resp.tagris_sweep_list_result #=> Hash
2906
+ # resp.tagris_sweep_list_result["TagrisAmazonResourceName"] #=> String, one of "ACTIVE", "NOT_ACTIVE"
2907
+ #
2908
+ # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/VerifyResourcesExistForTagris AWS API Documentation
2909
+ #
2910
+ # @overload verify_resources_exist_for_tagris(params = {})
2911
+ # @param [Hash] params ({})
2912
+ def verify_resources_exist_for_tagris(params = {}, options = {})
2913
+ req = build_request(:verify_resources_exist_for_tagris, params)
2914
+ req.send_request(options)
2915
+ end
2916
+
2848
2917
  # @!endgroup
2849
2918
 
2850
2919
  # @param params ({})
@@ -2858,7 +2927,7 @@ module Aws::Firehose
2858
2927
  params: params,
2859
2928
  config: config)
2860
2929
  context[:gem_name] = 'aws-sdk-firehose'
2861
- context[:gem_version] = '1.63.0'
2930
+ context[:gem_version] = '1.64.0'
2862
2931
  Seahorse::Client::Request.new(handlers, context)
2863
2932
  end
2864
2933