aws-sdk-firehose 1.2.0 → 1.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: f4af5896c9f34d011f0c5a4ab5737d5ba18205f8
4
- data.tar.gz: a5e8625fcbf53b9d69d2832b70537cd4c221eeeb
3
+ metadata.gz: '00903556a3b249e45ec2a5fcdb6c377e90e000bf'
4
+ data.tar.gz: 29934883c12ab8145ae727680897c61f5b96f47e
5
5
  SHA512:
6
- metadata.gz: 83f24cc8b997436c6e9ea9821c6d3b2c202cd2b2330f6fab8b59b31960ec5b618d113eb417ddfd781a98d8fa53b1cf421384a60f891a43992255e2fc68f389cf
7
- data.tar.gz: 4aa07cee5e527b231bd5dcbf230bc6eda2e10bdffbac71cdfb2ae284c173bd91ecaf35944c504c1070829dd27217d73bdebd96ed5b22e88377e9dfe6518c46aa
6
+ metadata.gz: 6a853e532686ea330229e25e8bbcb7809880b9db30a82a8fb93d5058779739ba96c10055fb9f9f9c2f7fb5115691502a3e098531b9c923e55dbea317965fd417
7
+ data.tar.gz: 36d74e68e191718debcc2611f8c5868ed331a472de23ec674a671014cb89651c3932ad8fceb3bca941c418f7413d9d69f805519cd188a31e7f2b41abf3a872d0
@@ -42,6 +42,6 @@ require_relative 'aws-sdk-firehose/customizations'
42
42
  # @service
43
43
  module Aws::Firehose
44
44
 
45
- GEM_VERSION = '1.2.0'
45
+ GEM_VERSION = '1.3.0'
46
46
 
47
47
  end
@@ -155,7 +155,7 @@ module Aws::Firehose
155
155
 
156
156
  # @!group API Operations
157
157
 
158
- # Creates a delivery stream.
158
+ # Creates a Kinesis Data Firehose delivery stream.
159
159
  #
160
160
  # By default, you can create up to 50 delivery streams per AWS Region.
161
161
  #
@@ -168,56 +168,57 @@ module Aws::Firehose
168
168
  #
169
169
  # A Kinesis Data Firehose delivery stream can be configured to receive
170
170
  # records directly from providers using PutRecord or PutRecordBatch, or
171
- # it can be configured to use an existing Kinesis data stream as its
172
- # source. To specify a Kinesis data stream as input, set the
171
+ # it can be configured to use an existing Kinesis stream as its source.
172
+ # To specify a Kinesis data stream as input, set the
173
173
  # `DeliveryStreamType` parameter to `KinesisStreamAsSource`, and provide
174
- # the Kinesis data stream Amazon Resource Name (ARN) and role ARN in the
174
+ # the Kinesis stream Amazon Resource Name (ARN) and role ARN in the
175
175
  # `KinesisStreamSourceConfiguration` parameter.
176
176
  #
177
177
  # A delivery stream is configured with a single destination: Amazon S3,
178
- # Amazon ES, Amazon Redshift, or Splunk. Specify only one of the
179
- # following destination configuration parameters:
180
- # `ExtendedS3DestinationConfiguration`, `S3DestinationConfiguration`,
181
- # `ElasticsearchDestinationConfiguration`,
182
- # `RedshiftDestinationConfiguration`, or
183
- # `SplunkDestinationConfiguration`.
184
- #
185
- # When you specify `S3DestinationConfiguration`, you can also provide
186
- # the following optional values: `BufferingHints`,
187
- # `EncryptionConfiguration`, and `CompressionFormat`. By default, if no
188
- # `BufferingHints` value is provided, Kinesis Data Firehose buffers data
189
- # up to 5 MB or for 5 minutes, whichever condition is satisfied first.
190
- # `BufferingHints` is a hint, so there are some cases where the service
191
- # cannot adhere to these conditions strictly. For example, record
192
- # boundaries are such that the size is a little over or under the
193
- # configured buffering size. By default, no encryption is performed. We
194
- # strongly recommend that you enable encryption to ensure secure data
195
- # storage in Amazon S3.
178
+ # Amazon ES, Amazon Redshift, or Splunk. You must specify only one of
179
+ # the following destination configuration parameters:
180
+ # **ExtendedS3DestinationConfiguration**,
181
+ # **S3DestinationConfiguration**,
182
+ # **ElasticsearchDestinationConfiguration**,
183
+ # **RedshiftDestinationConfiguration**, or
184
+ # **SplunkDestinationConfiguration**.
185
+ #
186
+ # When you specify **S3DestinationConfiguration**, you can also provide
187
+ # the following optional values: **BufferingHints**,
188
+ # **EncryptionConfiguration**, and **CompressionFormat**. By default, if
189
+ # no **BufferingHints** value is provided, Kinesis Data Firehose buffers
190
+ # data up to 5 MB or for 5 minutes, whichever condition is satisfied
191
+ # first. **BufferingHints** is a hint, so there are some cases where the
192
+ # service cannot adhere to these conditions strictly. For example,
193
+ # record boundaries might be such that the size is a little over or
194
+ # under the configured buffering size. By default, no encryption is
195
+ # performed. We strongly recommend that you enable encryption to ensure
196
+ # secure data storage in Amazon S3.
196
197
  #
197
198
  # A few notes about Amazon Redshift as a destination:
198
199
  #
199
200
  # * An Amazon Redshift destination requires an S3 bucket as intermediate
200
- # location. This is because Kinesis Data Firehose first delivers data
201
- # to Amazon S3 and then uses `COPY` syntax to load data into an Amazon
202
- # Redshift table. This is specified in the
203
- # `RedshiftDestinationConfiguration.S3Configuration` parameter.
201
+ # location. Kinesis Data Firehose first delivers data to Amazon S3 and
202
+ # then uses `COPY` syntax to load data into an Amazon Redshift table.
203
+ # This is specified in the
204
+ # **RedshiftDestinationConfiguration.S3Configuration** parameter.
204
205
  #
205
206
  # * The compression formats `SNAPPY` or `ZIP` cannot be specified in
206
207
  # `RedshiftDestinationConfiguration.S3Configuration` because the
207
208
  # Amazon Redshift `COPY` operation that reads from the S3 bucket
208
209
  # doesn't support these compression formats.
209
210
  #
210
- # * We strongly recommend that you use the user name and password that
211
- # you provide exclusively with Kinesis Data Firehose. In addition, the
212
- # permissions for the account should be restricted for Amazon Redshift
211
+ # * We strongly recommend that you use the user name and password you
212
+ # provide exclusively with Kinesis Data Firehose, and that the
213
+ # permissions for the account are restricted for Amazon Redshift
213
214
  # `INSERT` permissions.
214
215
  #
215
216
  # Kinesis Data Firehose assumes the IAM role that is configured as part
216
217
  # of the destination. The role should allow the Kinesis Data Firehose
217
218
  # principal to assume the role, and the role should have permissions
218
219
  # that allow the service to deliver the data. For more information, see
219
- # [Grant Kinesis Firehose Access to an Amazon S3 Destination][1] in the
220
- # *Amazon Kinesis Data Firehose Developer Guide*.
220
+ # [Grant Kinesis Data Firehose Access to an Amazon S3 Destination][1] in
221
+ # the *Amazon Kinesis Data Firehose Developer Guide*.
221
222
  #
222
223
  #
223
224
  #
@@ -225,9 +226,9 @@ module Aws::Firehose
225
226
  #
226
227
  # @option params [required, String] :delivery_stream_name
227
228
  # The name of the delivery stream. This name must be unique per AWS
228
- # account in the same Region. If the delivery streams are in different
229
- # accounts or different Regions, you can have multiple delivery streams
230
- # with the same name.
229
+ # account in the same AWS Region. If the delivery streams are in
230
+ # different accounts or different Regions, you can have multiple
231
+ # delivery streams with the same name.
231
232
  #
232
233
  # @option params [String] :delivery_stream_type
233
234
  # The delivery stream type. This parameter can be one of the following
@@ -352,6 +353,55 @@ module Aws::Firehose
352
353
  # log_stream_name: "LogStreamName",
353
354
  # },
354
355
  # },
356
+ # data_format_conversion_configuration: {
357
+ # schema_configuration: {
358
+ # role_arn: "NonEmptyStringWithoutWhitespace",
359
+ # catalog_id: "NonEmptyStringWithoutWhitespace",
360
+ # database_name: "NonEmptyStringWithoutWhitespace",
361
+ # table_name: "NonEmptyStringWithoutWhitespace",
362
+ # region: "NonEmptyStringWithoutWhitespace",
363
+ # version_id: "NonEmptyStringWithoutWhitespace",
364
+ # },
365
+ # input_format_configuration: {
366
+ # deserializer: {
367
+ # open_x_json_ser_de: {
368
+ # convert_dots_in_json_keys_to_underscores: false,
369
+ # case_insensitive: false,
370
+ # column_to_json_key_mappings: {
371
+ # "NonEmptyStringWithoutWhitespace" => "NonEmptyString",
372
+ # },
373
+ # },
374
+ # hive_json_ser_de: {
375
+ # timestamp_formats: ["NonEmptyString"],
376
+ # },
377
+ # },
378
+ # },
379
+ # output_format_configuration: {
380
+ # serializer: {
381
+ # parquet_ser_de: {
382
+ # block_size_bytes: 1,
383
+ # page_size_bytes: 1,
384
+ # compression: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, SNAPPY
385
+ # enable_dictionary_compression: false,
386
+ # max_padding_bytes: 1,
387
+ # writer_version: "V1", # accepts V1, V2
388
+ # },
389
+ # orc_ser_de: {
390
+ # stripe_size_bytes: 1,
391
+ # block_size_bytes: 1,
392
+ # row_index_stride: 1,
393
+ # enable_padding: false,
394
+ # padding_tolerance: 1.0,
395
+ # compression: "NONE", # accepts NONE, ZLIB, SNAPPY
396
+ # bloom_filter_columns: ["NonEmptyStringWithoutWhitespace"],
397
+ # bloom_filter_false_positive_probability: 1.0,
398
+ # dictionary_key_threshold: 1.0,
399
+ # format_version: "V0_11", # accepts V0_11, V0_12
400
+ # },
401
+ # },
402
+ # },
403
+ # enabled: false,
404
+ # },
355
405
  # },
356
406
  # redshift_destination_configuration: {
357
407
  # role_arn: "RoleARN", # required
@@ -557,11 +607,11 @@ module Aws::Firehose
557
607
  #
558
608
  # To check the state of a delivery stream, use DescribeDeliveryStream.
559
609
  #
560
- # While the delivery stream is `DELETING` state, the service may
561
- # continue to accept the records, but the service doesn't make any
562
- # guarantees with respect to delivering the data. Therefore, as a best
563
- # practice, you should first stop any applications that are sending
564
- # records before deleting a delivery stream.
610
+ # While the delivery stream is `DELETING` state, the service might
611
+ # continue to accept the records, but it doesn't make any guarantees
612
+ # with respect to delivering the data. Therefore, as a best practice,
613
+ # you should first stop any applications that are sending records before
614
+ # deleting a delivery stream.
565
615
  #
566
616
  # @option params [required, String] :delivery_stream_name
567
617
  # The name of the delivery stream.
@@ -592,13 +642,13 @@ module Aws::Firehose
592
642
  # The name of the delivery stream.
593
643
  #
594
644
  # @option params [Integer] :limit
595
- # The limit on the number of destinations to return. Currently, you can
596
- # have one destination per delivery stream.
645
+ # The limit on the number of destinations to return. You can have one
646
+ # destination per delivery stream.
597
647
  #
598
648
  # @option params [String] :exclusive_start_destination_id
599
649
  # The ID of the destination to start returning the destination
600
- # information. Currently, Kinesis Data Firehose supports one destination
601
- # per delivery stream.
650
+ # information. Kinesis Data Firehose supports one destination per
651
+ # delivery stream.
602
652
  #
603
653
  # @return [Types::DescribeDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
604
654
  #
@@ -666,6 +716,36 @@ module Aws::Firehose
666
716
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.cloud_watch_logging_options.enabled #=> Boolean
667
717
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.cloud_watch_logging_options.log_group_name #=> String
668
718
  # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.cloud_watch_logging_options.log_stream_name #=> String
719
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.schema_configuration.role_arn #=> String
720
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.schema_configuration.catalog_id #=> String
721
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.schema_configuration.database_name #=> String
722
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.schema_configuration.table_name #=> String
723
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.schema_configuration.region #=> String
724
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.schema_configuration.version_id #=> String
725
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.input_format_configuration.deserializer.open_x_json_ser_de.convert_dots_in_json_keys_to_underscores #=> Boolean
726
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.input_format_configuration.deserializer.open_x_json_ser_de.case_insensitive #=> Boolean
727
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.input_format_configuration.deserializer.open_x_json_ser_de.column_to_json_key_mappings #=> Hash
728
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.input_format_configuration.deserializer.open_x_json_ser_de.column_to_json_key_mappings["NonEmptyStringWithoutWhitespace"] #=> String
729
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.input_format_configuration.deserializer.hive_json_ser_de.timestamp_formats #=> Array
730
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.input_format_configuration.deserializer.hive_json_ser_de.timestamp_formats[0] #=> String
731
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.parquet_ser_de.block_size_bytes #=> Integer
732
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.parquet_ser_de.page_size_bytes #=> Integer
733
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.parquet_ser_de.compression #=> String, one of "UNCOMPRESSED", "GZIP", "SNAPPY"
734
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.parquet_ser_de.enable_dictionary_compression #=> Boolean
735
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.parquet_ser_de.max_padding_bytes #=> Integer
736
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.parquet_ser_de.writer_version #=> String, one of "V1", "V2"
737
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.stripe_size_bytes #=> Integer
738
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.block_size_bytes #=> Integer
739
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.row_index_stride #=> Integer
740
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.enable_padding #=> Boolean
741
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.padding_tolerance #=> Float
742
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.compression #=> String, one of "NONE", "ZLIB", "SNAPPY"
743
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.bloom_filter_columns #=> Array
744
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.bloom_filter_columns[0] #=> String
745
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.bloom_filter_false_positive_probability #=> Float
746
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.dictionary_key_threshold #=> Float
747
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.output_format_configuration.serializer.orc_ser_de.format_version #=> String, one of "V0_11", "V0_12"
748
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.data_format_conversion_configuration.enabled #=> Boolean
669
749
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.role_arn #=> String
670
750
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.cluster_jdbc_url #=> String
671
751
  # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_name #=> String
@@ -881,16 +961,16 @@ module Aws::Firehose
881
961
  # referred to as producers.
882
962
  #
883
963
  # By default, each delivery stream can take in up to 2,000 transactions
884
- # per second, 5,000 records per second, or 5 MB per second. Note that if
885
- # you use `PutRecord` and PutRecordBatch, the limits are an aggregate
886
- # across these two operations for each delivery stream. For more
887
- # information about limits and how to request an increase, see [Amazon
888
- # Kinesis Data Firehose Limits][1].
964
+ # per second, 5,000 records per second, or 5 MB per second. If you use
965
+ # PutRecord and PutRecordBatch, the limits are an aggregate across these
966
+ # two operations for each delivery stream. For more information about
967
+ # limits and how to request an increase, see [Amazon Kinesis Data
968
+ # Firehose Limits][1].
889
969
  #
890
970
  # You must specify the name of the delivery stream and the data record
891
- # when using `PutRecord`. The data record consists of a data blob that
892
- # can be up to 1,000 KB in size and any kind of data. For example, it
893
- # can be a segment from a log file, geographic location data, website
971
+ # when using PutRecord. The data record consists of a data blob that can
972
+ # be up to 1,000 KB in size, and any kind of data. For example, it can
973
+ # be a segment from a log file, geographic location data, website
894
974
  # clickstream data, and so on.
895
975
  #
896
976
  # Kinesis Data Firehose buffers records before delivering them to the
@@ -909,9 +989,9 @@ module Aws::Firehose
909
989
  # throughput limits have been exceeded for the delivery stream.
910
990
  #
911
991
  # Data records sent to Kinesis Data Firehose are stored for 24 hours
912
- # from the time they are added to a delivery stream as it attempts to
913
- # send the records to the destination. If the destination is unreachable
914
- # for more than 24 hours, the data is no longer available.
992
+ # from the time they are added to a delivery stream as it tries to send
993
+ # the records to the destination. If the destination is unreachable for
994
+ # more than 24 hours, the data is no longer available.
915
995
  #
916
996
  #
917
997
  #
@@ -957,18 +1037,18 @@ module Aws::Firehose
957
1037
  #
958
1038
  # By default, each delivery stream can take in up to 2,000 transactions
959
1039
  # per second, 5,000 records per second, or 5 MB per second. If you use
960
- # PutRecord and `PutRecordBatch`, the limits are an aggregate across
961
- # these two operations for each delivery stream. For more information
962
- # about limits, see [Amazon Kinesis Data Firehose Limits][1].
1040
+ # PutRecord and PutRecordBatch, the limits are an aggregate across these
1041
+ # two operations for each delivery stream. For more information about
1042
+ # limits, see [Amazon Kinesis Data Firehose Limits][1].
963
1043
  #
964
- # Each `PutRecordBatch` request supports up to 500 records. Each record
965
- # in the request can be as large as 1,000 KB (before 64-bit encoding),
966
- # up to a limit of 4 MB for the entire request. These limits cannot be
1044
+ # Each PutRecordBatch request supports up to 500 records. Each record in
1045
+ # the request can be as large as 1,000 KB (before 64-bit encoding), up
1046
+ # to a limit of 4 MB for the entire request. These limits cannot be
967
1047
  # changed.
968
1048
  #
969
1049
  # You must specify the name of the delivery stream and the data record
970
1050
  # when using PutRecord. The data record consists of a data blob that can
971
- # be up to 1,000 KB in size and any kind of data. For example, it could
1051
+ # be up to 1,000 KB in size, and any kind of data. For example, it could
972
1052
  # be a segment from a log file, geographic location data, website
973
1053
  # clickstream data, and so on.
974
1054
  #
@@ -979,32 +1059,34 @@ module Aws::Firehose
979
1059
  # consumer application to parse individual data items when reading the
980
1060
  # data from the destination.
981
1061
  #
982
- # The `PutRecordBatch` response includes a count of failed records,
983
- # `FailedPutCount`, and an array of responses, `RequestResponses`. Each
984
- # entry in the `RequestResponses` array provides additional information
985
- # about the processed record. It directly correlates with a record in
986
- # the request array using the same ordering, from the top to the bottom.
987
- # The response array always includes the same number of records as the
988
- # request array. `RequestResponses` includes both successfully and
989
- # unsuccessfully processed records. Kinesis Data Firehose attempts to
990
- # process all records in each `PutRecordBatch` request. A single record
991
- # failure does not stop the processing of subsequent records.
992
- #
993
- # A successfully processed record includes a `RecordId` value, which is
994
- # unique for the record. An unsuccessfully processed record includes
995
- # `ErrorCode` and `ErrorMessage` values. `ErrorCode` reflects the type
996
- # of error, and is one of the following values: `ServiceUnavailable` or
997
- # `InternalFailure`. `ErrorMessage` provides more detailed information
998
- # about the error.
1062
+ # The PutRecordBatch response includes a count of failed records,
1063
+ # **FailedPutCount**, and an array of responses, **RequestResponses**.
1064
+ # Each entry in the **RequestResponses** array provides additional
1065
+ # information about the processed record. It directly correlates with a
1066
+ # record in the request array using the same ordering, from the top to
1067
+ # the bottom. The response array always includes the same number of
1068
+ # records as the request array. **RequestResponses** includes both
1069
+ # successfully and unsuccessfully processed records. Kinesis Data
1070
+ # Firehose tries to process all records in each PutRecordBatch request.
1071
+ # A single record failure does not stop the processing of subsequent
1072
+ # records.
1073
+ #
1074
+ # A successfully processed record includes a **RecordId** value, which
1075
+ # is unique for the record. An unsuccessfully processed record includes
1076
+ # **ErrorCode** and **ErrorMessage** values. **ErrorCode** reflects the
1077
+ # type of error, and is one of the following values:
1078
+ # `ServiceUnavailable` or `InternalFailure`. **ErrorMessage** provides
1079
+ # more detailed information about the error.
999
1080
  #
1000
1081
  # If there is an internal server error or a timeout, the write might
1001
- # have completed or it might have failed. If `FailedPutCount` is greater
1002
- # than 0, retry the request, resending only those records that might
1003
- # have failed processing. This minimizes the possible duplicate records
1004
- # and also reduces the total bytes sent (and corresponding charges). We
1005
- # recommend that you handle any duplicates at the destination.
1082
+ # have completed or it might have failed. If **FailedPutCount** is
1083
+ # greater than 0, retry the request, resending only those records that
1084
+ # might have failed processing. This minimizes the possible duplicate
1085
+ # records and also reduces the total bytes sent (and corresponding
1086
+ # charges). We recommend that you handle any duplicates at the
1087
+ # destination.
1006
1088
  #
1007
- # If `PutRecordBatch` throws `ServiceUnavailableException`, back off and
1089
+ # If PutRecordBatch throws **ServiceUnavailableException**, back off and
1008
1090
  # retry. If the exception persists, it is possible that the throughput
1009
1091
  # limits have been exceeded for the delivery stream.
1010
1092
  #
@@ -1166,20 +1248,20 @@ module Aws::Firehose
1166
1248
  # does not merge any parameters. In this case, all parameters must be
1167
1249
  # specified.
1168
1250
  #
1169
- # Kinesis Data Firehose uses `CurrentDeliveryStreamVersionId` to avoid
1251
+ # Kinesis Data Firehose uses **CurrentDeliveryStreamVersionId** to avoid
1170
1252
  # race conditions and conflicting merges. This is a required field, and
1171
1253
  # the service updates the configuration only if the existing
1172
1254
  # configuration has a version ID that matches. After the update is
1173
- # applied successfully, the version ID is updated, and you can retrieve
1174
- # it using DescribeDeliveryStream. Use the new version ID to set
1175
- # `CurrentDeliveryStreamVersionId` in the next call.
1255
+ # applied successfully, the version ID is updated, and can be retrieved
1256
+ # using DescribeDeliveryStream. Use the new version ID to set
1257
+ # **CurrentDeliveryStreamVersionId** in the next call.
1176
1258
  #
1177
1259
  # @option params [required, String] :delivery_stream_name
1178
1260
  # The name of the delivery stream.
1179
1261
  #
1180
1262
  # @option params [required, String] :current_delivery_stream_version_id
1181
- # Obtain this value from the `VersionId` result of
1182
- # DeliveryStreamDescription. This value is required, and it helps the
1263
+ # Obtain this value from the **VersionId** result of
1264
+ # DeliveryStreamDescription. This value is required, and helps the
1183
1265
  # service perform conditional operations. For example, if there is an
1184
1266
  # interleaving update and this value is null, then the update
1185
1267
  # destination fails. After the update is successful, the `VersionId`
@@ -1289,6 +1371,55 @@ module Aws::Firehose
1289
1371
  # log_stream_name: "LogStreamName",
1290
1372
  # },
1291
1373
  # },
1374
+ # data_format_conversion_configuration: {
1375
+ # schema_configuration: {
1376
+ # role_arn: "NonEmptyStringWithoutWhitespace",
1377
+ # catalog_id: "NonEmptyStringWithoutWhitespace",
1378
+ # database_name: "NonEmptyStringWithoutWhitespace",
1379
+ # table_name: "NonEmptyStringWithoutWhitespace",
1380
+ # region: "NonEmptyStringWithoutWhitespace",
1381
+ # version_id: "NonEmptyStringWithoutWhitespace",
1382
+ # },
1383
+ # input_format_configuration: {
1384
+ # deserializer: {
1385
+ # open_x_json_ser_de: {
1386
+ # convert_dots_in_json_keys_to_underscores: false,
1387
+ # case_insensitive: false,
1388
+ # column_to_json_key_mappings: {
1389
+ # "NonEmptyStringWithoutWhitespace" => "NonEmptyString",
1390
+ # },
1391
+ # },
1392
+ # hive_json_ser_de: {
1393
+ # timestamp_formats: ["NonEmptyString"],
1394
+ # },
1395
+ # },
1396
+ # },
1397
+ # output_format_configuration: {
1398
+ # serializer: {
1399
+ # parquet_ser_de: {
1400
+ # block_size_bytes: 1,
1401
+ # page_size_bytes: 1,
1402
+ # compression: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, SNAPPY
1403
+ # enable_dictionary_compression: false,
1404
+ # max_padding_bytes: 1,
1405
+ # writer_version: "V1", # accepts V1, V2
1406
+ # },
1407
+ # orc_ser_de: {
1408
+ # stripe_size_bytes: 1,
1409
+ # block_size_bytes: 1,
1410
+ # row_index_stride: 1,
1411
+ # enable_padding: false,
1412
+ # padding_tolerance: 1.0,
1413
+ # compression: "NONE", # accepts NONE, ZLIB, SNAPPY
1414
+ # bloom_filter_columns: ["NonEmptyStringWithoutWhitespace"],
1415
+ # bloom_filter_false_positive_probability: 1.0,
1416
+ # dictionary_key_threshold: 1.0,
1417
+ # format_version: "V0_11", # accepts V0_11, V0_12
1418
+ # },
1419
+ # },
1420
+ # },
1421
+ # enabled: false,
1422
+ # },
1292
1423
  # },
1293
1424
  # redshift_destination_update: {
1294
1425
  # role_arn: "RoleARN",
@@ -1494,7 +1625,7 @@ module Aws::Firehose
1494
1625
  params: params,
1495
1626
  config: config)
1496
1627
  context[:gem_name] = 'aws-sdk-firehose'
1497
- context[:gem_version] = '1.2.0'
1628
+ context[:gem_version] = '1.3.0'
1498
1629
  Seahorse::Client::Request.new(handlers, context)
1499
1630
  end
1500
1631