aws-sdk-firehose 1.0.0.rc1 → 1.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: e67c5b56405613265e7334a0c8a2331a3f7ad36c
4
- data.tar.gz: bf741ff986044cd5a9a8590c2d2641d818b45ec7
3
+ metadata.gz: 91bebaf0ce2a6190efc9c4f851139ee07ac21806
4
+ data.tar.gz: 4bce1c6d51dbca2a525a88e796ef28d851ffabba
5
5
  SHA512:
6
- metadata.gz: 81f6b9ef90ea28e2b41c1753cabca4f1203ec93a474129a42e45dc70015d6c95d8d744282fecf65a193e6b4a66593bb43523b9a84d000622359af9f8a806d4d6
7
- data.tar.gz: aac6e75335e0b5eebef2a8b7ca138c19f31295b6aeee88ffdba315620d2a4b7642bb337fc718cc6dcdf34a46094444133faa31ffd71292ea234b0599ff71123a
6
+ metadata.gz: e311433ad3ac3f597cacc3688685579e30e7f00f6677ea87bf42aa83c45bb722ac69e9ef197708e26d30aeae44cb4e8187fd32c03a753f0b29a34ff2a0567af6
7
+ data.tar.gz: 418e618d5aa99b15159c774c9f712143181112fb6c22810dd05253f3393fd4465211670102eb39834835fcf6f499611af34e37919ba7a0e7c3035847ee2a18e8
@@ -1,6 +1,6 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
@@ -1,6 +1,6 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
@@ -23,848 +23,1172 @@ require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
23
23
 
24
24
  Aws::Plugins::GlobalConfiguration.add_identifier(:firehose)
25
25
 
26
- module Aws
27
- module Firehose
28
- class Client < Seahorse::Client::Base
26
+ module Aws::Firehose
27
+ class Client < Seahorse::Client::Base
29
28
 
30
- include Aws::ClientStubs
29
+ include Aws::ClientStubs
31
30
 
32
- @identifier = :firehose
31
+ @identifier = :firehose
33
32
 
34
- set_api(ClientApi::API)
33
+ set_api(ClientApi::API)
35
34
 
36
- add_plugin(Seahorse::Client::Plugins::ContentLength)
37
- add_plugin(Aws::Plugins::CredentialsConfiguration)
38
- add_plugin(Aws::Plugins::Logging)
39
- add_plugin(Aws::Plugins::ParamConverter)
40
- add_plugin(Aws::Plugins::ParamValidator)
41
- add_plugin(Aws::Plugins::UserAgent)
42
- add_plugin(Aws::Plugins::HelpfulSocketErrors)
43
- add_plugin(Aws::Plugins::RetryErrors)
44
- add_plugin(Aws::Plugins::GlobalConfiguration)
45
- add_plugin(Aws::Plugins::RegionalEndpoint)
46
- add_plugin(Aws::Plugins::ResponsePaging)
47
- add_plugin(Aws::Plugins::StubResponses)
48
- add_plugin(Aws::Plugins::IdempotencyToken)
49
- add_plugin(Aws::Plugins::SignatureV4)
50
- add_plugin(Aws::Plugins::Protocols::JsonRpc)
35
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
36
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
37
+ add_plugin(Aws::Plugins::Logging)
38
+ add_plugin(Aws::Plugins::ParamConverter)
39
+ add_plugin(Aws::Plugins::ParamValidator)
40
+ add_plugin(Aws::Plugins::UserAgent)
41
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
42
+ add_plugin(Aws::Plugins::RetryErrors)
43
+ add_plugin(Aws::Plugins::GlobalConfiguration)
44
+ add_plugin(Aws::Plugins::RegionalEndpoint)
45
+ add_plugin(Aws::Plugins::ResponsePaging)
46
+ add_plugin(Aws::Plugins::StubResponses)
47
+ add_plugin(Aws::Plugins::IdempotencyToken)
48
+ add_plugin(Aws::Plugins::SignatureV4)
49
+ add_plugin(Aws::Plugins::Protocols::JsonRpc)
51
50
 
52
- # @option options [required, Aws::CredentialProvider] :credentials
53
- # Your AWS credentials. This can be an instance of any one of the
54
- # following classes:
55
- #
56
- # * `Aws::Credentials` - Used for configuring static, non-refreshing
57
- # credentials.
58
- #
59
- # * `Aws::InstanceProfileCredentials` - Used for loading credentials
60
- # from an EC2 IMDS on an EC2 instance.
61
- #
62
- # * `Aws::SharedCredentials` - Used for loading credentials from a
63
- # shared file, such as `~/.aws/config`.
64
- #
65
- # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
66
- #
67
- # When `:credentials` are not configured directly, the following
68
- # locations will be searched for credentials:
69
- #
70
- # * `Aws.config[:credentials]`
71
- # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
72
- # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
73
- # * `~/.aws/credentials`
74
- # * `~/.aws/config`
75
- # * EC2 IMDS instance profile - When used by default, the timeouts are
76
- # very aggressive. Construct and pass an instance of
77
- # `Aws::InstanceProfileCredentails` to enable retries and extended
78
- # timeouts.
79
- # @option options [required, String] :region
80
- # The AWS region to connect to. The configured `:region` is
81
- # used to determine the service `:endpoint`. When not passed,
82
- # a default `:region` is search for in the following locations:
83
- #
84
- # * `Aws.config[:region]`
85
- # * `ENV['AWS_REGION']`
86
- # * `ENV['AMAZON_REGION']`
87
- # * `ENV['AWS_DEFAULT_REGION']`
88
- # * `~/.aws/credentials`
89
- # * `~/.aws/config`
90
- # @option options [String] :access_key_id
91
- # @option options [Boolean] :convert_params (true)
92
- # When `true`, an attempt is made to coerce request parameters into
93
- # the required types.
94
- # @option options [String] :endpoint
95
- # The client endpoint is normally constructed from the `:region`
96
- # option. You should only configure an `:endpoint` when connecting
97
- # to test endpoints. This should be avalid HTTP(S) URI.
98
- # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
99
- # The log formatter.
100
- # @option options [Symbol] :log_level (:info)
101
- # The log level to send messages to the `:logger` at.
102
- # @option options [Logger] :logger
103
- # The Logger instance to send log messages to. If this option
104
- # is not set, logging will be disabled.
105
- # @option options [String] :profile ("default")
106
- # Used when loading credentials from the shared credentials file
107
- # at HOME/.aws/credentials. When not specified, 'default' is used.
108
- # @option options [Integer] :retry_limit (3)
109
- # The maximum number of times to retry failed requests. Only
110
- # ~ 500 level server errors and certain ~ 400 level client errors
111
- # are retried. Generally, these are throttling errors, data
112
- # checksum errors, networking errors, timeout errors and auth
113
- # errors from expired credentials.
114
- # @option options [String] :secret_access_key
115
- # @option options [String] :session_token
116
- # @option options [Boolean] :simple_json (false)
117
- # Disables request parameter conversion, validation, and formatting.
118
- # Also disable response data type conversions. This option is useful
119
- # when you want to ensure the highest level of performance by
120
- # avoiding overhead of walking request parameters and response data
121
- # structures.
122
- #
123
- # When `:simple_json` is enabled, the request parameters hash must
124
- # be formatted exactly as the DynamoDB API expects.
125
- # @option options [Boolean] :stub_responses (false)
126
- # Causes the client to return stubbed responses. By default
127
- # fake responses are generated and returned. You can specify
128
- # the response data to return or errors to raise by calling
129
- # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
130
- #
131
- # ** Please note ** When response stubbing is enabled, no HTTP
132
- # requests are made, and retries are disabled.
133
- # @option options [Boolean] :validate_params (true)
134
- # When `true`, request parameters are validated before
135
- # sending the request.
136
- def initialize(*args)
137
- super
138
- end
139
-
140
- # @!group API Operations
51
+ # @option options [required, Aws::CredentialProvider] :credentials
52
+ # Your AWS credentials. This can be an instance of any one of the
53
+ # following classes:
54
+ #
55
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
56
+ # credentials.
57
+ #
58
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
59
+ # from an EC2 IMDS on an EC2 instance.
60
+ #
61
+ # * `Aws::SharedCredentials` - Used for loading credentials from a
62
+ # shared file, such as `~/.aws/config`.
63
+ #
64
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
65
+ #
66
+ # When `:credentials` are not configured directly, the following
67
+ # locations will be searched for credentials:
68
+ #
69
+ # * `Aws.config[:credentials]`
70
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
71
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
72
+ # * `~/.aws/credentials`
73
+ # * `~/.aws/config`
74
+ # * EC2 IMDS instance profile - When used by default, the timeouts are
75
+ # very aggressive. Construct and pass an instance of
76
+ # `Aws::InstanceProfileCredentails` to enable retries and extended
77
+ # timeouts.
78
+ #
79
+ # @option options [required, String] :region
80
+ # The AWS region to connect to. The configured `:region` is
81
+ # used to determine the service `:endpoint`. When not passed,
82
+ # a default `:region` is search for in the following locations:
83
+ #
84
+ # * `Aws.config[:region]`
85
+ # * `ENV['AWS_REGION']`
86
+ # * `ENV['AMAZON_REGION']`
87
+ # * `ENV['AWS_DEFAULT_REGION']`
88
+ # * `~/.aws/credentials`
89
+ # * `~/.aws/config`
90
+ #
91
+ # @option options [String] :access_key_id
92
+ #
93
+ # @option options [Boolean] :convert_params (true)
94
+ # When `true`, an attempt is made to coerce request parameters into
95
+ # the required types.
96
+ #
97
+ # @option options [String] :endpoint
98
+ # The client endpoint is normally constructed from the `:region`
99
+ # option. You should only configure an `:endpoint` when connecting
100
+ # to test endpoints. This should be avalid HTTP(S) URI.
101
+ #
102
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
103
+ # The log formatter.
104
+ #
105
+ # @option options [Symbol] :log_level (:info)
106
+ # The log level to send messages to the `:logger` at.
107
+ #
108
+ # @option options [Logger] :logger
109
+ # The Logger instance to send log messages to. If this option
110
+ # is not set, logging will be disabled.
111
+ #
112
+ # @option options [String] :profile ("default")
113
+ # Used when loading credentials from the shared credentials file
114
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
115
+ #
116
+ # @option options [Integer] :retry_limit (3)
117
+ # The maximum number of times to retry failed requests. Only
118
+ # ~ 500 level server errors and certain ~ 400 level client errors
119
+ # are retried. Generally, these are throttling errors, data
120
+ # checksum errors, networking errors, timeout errors and auth
121
+ # errors from expired credentials.
122
+ #
123
+ # @option options [String] :secret_access_key
124
+ #
125
+ # @option options [String] :session_token
126
+ #
127
+ # @option options [Boolean] :simple_json (false)
128
+ # Disables request parameter conversion, validation, and formatting.
129
+ # Also disable response data type conversions. This option is useful
130
+ # when you want to ensure the highest level of performance by
131
+ # avoiding overhead of walking request parameters and response data
132
+ # structures.
133
+ #
134
+ # When `:simple_json` is enabled, the request parameters hash must
135
+ # be formatted exactly as the DynamoDB API expects.
136
+ #
137
+ # @option options [Boolean] :stub_responses (false)
138
+ # Causes the client to return stubbed responses. By default
139
+ # fake responses are generated and returned. You can specify
140
+ # the response data to return or errors to raise by calling
141
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
142
+ #
143
+ # ** Please note ** When response stubbing is enabled, no HTTP
144
+ # requests are made, and retries are disabled.
145
+ #
146
+ # @option options [Boolean] :validate_params (true)
147
+ # When `true`, request parameters are validated before
148
+ # sending the request.
149
+ #
150
+ def initialize(*args)
151
+ super
152
+ end
141
153
 
142
- # Creates a delivery stream.
143
- #
144
- # CreateDeliveryStream is an asynchronous operation that immediately
145
- # returns. The initial status of the delivery stream is `CREATING`.
146
- # After the delivery stream is created, its status is `ACTIVE` and it
147
- # now accepts data. Attempts to send data to a delivery stream that is
148
- # not in the `ACTIVE` state cause an exception. To check the state of a
149
- # delivery stream, use DescribeDeliveryStream.
150
- #
151
- # The name of a delivery stream identifies it. You can't have two
152
- # delivery streams with the same name in the same region. Two delivery
153
- # streams in different AWS accounts or different regions in the same AWS
154
- # account can have the same name.
155
- #
156
- # By default, you can create up to 20 delivery streams per region.
157
- #
158
- # A delivery stream can only be configured with a single destination,
159
- # Amazon S3, Amazon Elasticsearch Service, or Amazon Redshift. For
160
- # correct CreateDeliveryStream request syntax, specify only one
161
- # destination configuration parameter: either
162
- # **S3DestinationConfiguration**,
163
- # **ElasticsearchDestinationConfiguration**, or
164
- # **RedshiftDestinationConfiguration**.
165
- #
166
- # As part of **S3DestinationConfiguration**, optional values
167
- # **BufferingHints**, **EncryptionConfiguration**, and
168
- # **CompressionFormat** can be provided. By default, if no
169
- # **BufferingHints** value is provided, Firehose buffers data up to 5 MB
170
- # or for 5 minutes, whichever condition is satisfied first. Note that
171
- # **BufferingHints** is a hint, so there are some cases where the
172
- # service cannot adhere to these conditions strictly; for example,
173
- # record boundaries are such that the size is a little over or under the
174
- # configured buffering size. By default, no encryption is performed. We
175
- # strongly recommend that you enable encryption to ensure secure data
176
- # storage in Amazon S3.
177
- #
178
- # A few notes about **RedshiftDestinationConfiguration**\:
179
- #
180
- # * An Amazon Redshift destination requires an S3 bucket as intermediate
181
- # location, as Firehose first delivers data to S3 and then uses `COPY`
182
- # syntax to load data into an Amazon Redshift table. This is specified
183
- # in the **RedshiftDestinationConfiguration.S3Configuration**
184
- # parameter element.
185
- #
186
- # * The compression formats `SNAPPY` or `ZIP` cannot be specified in
187
- # **RedshiftDestinationConfiguration.S3Configuration** because the
188
- # Amazon Redshift `COPY` operation that reads from the S3 bucket
189
- # doesn't support these compression formats.
190
- #
191
- # * We strongly recommend that the username and password provided is
192
- # used exclusively for Firehose purposes, and that the permissions for
193
- # the account are restricted for Amazon Redshift `INSERT` permissions.
194
- #
195
- # Firehose assumes the IAM role that is configured as part of
196
- # destinations. The IAM role should allow the Firehose principal to
197
- # assume the role, and the role should have permissions that allows the
198
- # service to deliver the data. For more information, see [Amazon S3
199
- # Bucket Access][1] in the *Amazon Kinesis Firehose Developer Guide*.
200
- #
201
- #
202
- #
203
- # [1]: http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3
204
- # @option params [required, String] :delivery_stream_name
205
- # The name of the delivery stream.
206
- # @option params [Types::S3DestinationConfiguration] :s3_destination_configuration
207
- # The destination in Amazon S3. This value must be specified if
208
- # **ElasticsearchDestinationConfiguration** or
209
- # **RedshiftDestinationConfiguration** is specified (see restrictions
210
- # listed above).
211
- # @option params [Types::RedshiftDestinationConfiguration] :redshift_destination_configuration
212
- # The destination in Amazon Redshift. This value cannot be specified if
213
- # Amazon S3 or Amazon Elasticsearch is the desired destination (see
214
- # restrictions listed above).
215
- # @option params [Types::ElasticsearchDestinationConfiguration] :elasticsearch_destination_configuration
216
- # The destination in Amazon ES. This value cannot be specified if Amazon
217
- # S3 or Amazon Redshift is the desired destination (see restrictions
218
- # listed above).
219
- # @return [Types::CreateDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
220
- #
221
- # * {Types::CreateDeliveryStreamOutput#delivery_stream_arn #DeliveryStreamARN} => String
222
- #
223
- # @example Request syntax with placeholder values
224
- # resp = client.create_delivery_stream({
225
- # delivery_stream_name: "DeliveryStreamName", # required
226
- # s3_destination_configuration: {
227
- # role_arn: "RoleARN", # required
228
- # bucket_arn: "BucketARN", # required
229
- # prefix: "Prefix",
230
- # buffering_hints: {
231
- # size_in_m_bs: 1,
232
- # interval_in_seconds: 1,
233
- # },
234
- # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
235
- # encryption_configuration: {
236
- # no_encryption_config: "NoEncryption", # accepts NoEncryption
237
- # kms_encryption_config: {
238
- # awskms_key_arn: "AWSKMSKeyARN", # required
239
- # },
240
- # },
241
- # cloud_watch_logging_options: {
242
- # enabled: false,
243
- # log_group_name: "LogGroupName",
244
- # log_stream_name: "LogStreamName",
245
- # },
246
- # },
247
- # redshift_destination_configuration: {
248
- # role_arn: "RoleARN", # required
249
- # cluster_jdbcurl: "ClusterJDBCURL", # required
250
- # copy_command: { # required
251
- # data_table_name: "DataTableName", # required
252
- # data_table_columns: "DataTableColumns",
253
- # copy_options: "CopyOptions",
254
- # },
255
- # username: "Username", # required
256
- # password: "Password", # required
257
- # retry_options: {
258
- # duration_in_seconds: 1,
259
- # },
260
- # s3_configuration: { # required
261
- # role_arn: "RoleARN", # required
262
- # bucket_arn: "BucketARN", # required
263
- # prefix: "Prefix",
264
- # buffering_hints: {
265
- # size_in_m_bs: 1,
266
- # interval_in_seconds: 1,
267
- # },
268
- # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
269
- # encryption_configuration: {
270
- # no_encryption_config: "NoEncryption", # accepts NoEncryption
271
- # kms_encryption_config: {
272
- # awskms_key_arn: "AWSKMSKeyARN", # required
273
- # },
274
- # },
275
- # cloud_watch_logging_options: {
276
- # enabled: false,
277
- # log_group_name: "LogGroupName",
278
- # log_stream_name: "LogStreamName",
279
- # },
280
- # },
281
- # cloud_watch_logging_options: {
282
- # enabled: false,
283
- # log_group_name: "LogGroupName",
284
- # log_stream_name: "LogStreamName",
285
- # },
286
- # },
287
- # elasticsearch_destination_configuration: {
288
- # role_arn: "RoleARN", # required
289
- # domain_arn: "ElasticsearchDomainARN", # required
290
- # index_name: "ElasticsearchIndexName", # required
291
- # type_name: "ElasticsearchTypeName", # required
292
- # index_rotation_period: "NoRotation", # accepts NoRotation, OneHour, OneDay, OneWeek, OneMonth
293
- # buffering_hints: {
294
- # interval_in_seconds: 1,
295
- # size_in_m_bs: 1,
296
- # },
297
- # retry_options: {
298
- # duration_in_seconds: 1,
299
- # },
300
- # s3_backup_mode: "FailedDocumentsOnly", # accepts FailedDocumentsOnly, AllDocuments
301
- # s3_configuration: { # required
302
- # role_arn: "RoleARN", # required
303
- # bucket_arn: "BucketARN", # required
304
- # prefix: "Prefix",
305
- # buffering_hints: {
306
- # size_in_m_bs: 1,
307
- # interval_in_seconds: 1,
308
- # },
309
- # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
310
- # encryption_configuration: {
311
- # no_encryption_config: "NoEncryption", # accepts NoEncryption
312
- # kms_encryption_config: {
313
- # awskms_key_arn: "AWSKMSKeyARN", # required
314
- # },
315
- # },
316
- # cloud_watch_logging_options: {
317
- # enabled: false,
318
- # log_group_name: "LogGroupName",
319
- # log_stream_name: "LogStreamName",
320
- # },
321
- # },
322
- # cloud_watch_logging_options: {
323
- # enabled: false,
324
- # log_group_name: "LogGroupName",
325
- # log_stream_name: "LogStreamName",
326
- # },
327
- # },
328
- # })
329
- #
330
- # @example Response structure
331
- # resp.delivery_stream_arn #=> String
332
- # @overload create_delivery_stream(params = {})
333
- # @param [Hash] params ({})
334
- def create_delivery_stream(params = {}, options = {})
335
- req = build_request(:create_delivery_stream, params)
336
- req.send_request(options)
337
- end
154
+ # @!group API Operations
338
155
 
339
- # Deletes a delivery stream and its data.
340
- #
341
- # You can delete a delivery stream only if it is in `ACTIVE` or
342
- # `DELETING` state, and not in the `CREATING` state. While the deletion
343
- # request is in process, the delivery stream is in the `DELETING` state.
344
- #
345
- # To check the state of a delivery stream, use DescribeDeliveryStream.
346
- #
347
- # While the delivery stream is `DELETING` state, the service may
348
- # continue to accept the records, but the service doesn't make any
349
- # guarantees with respect to delivering the data. Therefore, as a best
350
- # practice, you should first stop any applications that are sending
351
- # records before deleting a delivery stream.
352
- # @option params [required, String] :delivery_stream_name
353
- # The name of the delivery stream.
354
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
355
- #
356
- # @example Request syntax with placeholder values
357
- # resp = client.delete_delivery_stream({
358
- # delivery_stream_name: "DeliveryStreamName", # required
359
- # })
360
- # @overload delete_delivery_stream(params = {})
361
- # @param [Hash] params ({})
362
- def delete_delivery_stream(params = {}, options = {})
363
- req = build_request(:delete_delivery_stream, params)
364
- req.send_request(options)
365
- end
156
+ # Creates a delivery stream.
157
+ #
158
+ # By default, you can create up to 20 delivery streams per region.
159
+ #
160
+ # This is an asynchronous operation that immediately returns. The
161
+ # initial status of the delivery stream is `CREATING`. After the
162
+ # delivery stream is created, its status is `ACTIVE` and it now accepts
163
+ # data. Attempts to send data to a delivery stream that is not in the
164
+ # `ACTIVE` state cause an exception. To check the state of a delivery
165
+ # stream, use DescribeDeliveryStream.
166
+ #
167
+ # A delivery stream is configured with a single destination: Amazon S3,
168
+ # Amazon Elasticsearch Service, or Amazon Redshift. You must specify
169
+ # only one of the following destination configuration parameters:
170
+ # **ExtendedS3DestinationConfiguration**,
171
+ # **S3DestinationConfiguration**,
172
+ # **ElasticsearchDestinationConfiguration**, or
173
+ # **RedshiftDestinationConfiguration**.
174
+ #
175
+ # When you specify **S3DestinationConfiguration**, you can also provide
176
+ # the following optional values: **BufferingHints**,
177
+ # **EncryptionConfiguration**, and **CompressionFormat**. By default, if
178
+ # no **BufferingHints** value is provided, Firehose buffers data up to 5
179
+ # MB or for 5 minutes, whichever condition is satisfied first. Note that
180
+ # **BufferingHints** is a hint, so there are some cases where the
181
+ # service cannot adhere to these conditions strictly; for example,
182
+ # record boundaries are such that the size is a little over or under the
183
+ # configured buffering size. By default, no encryption is performed. We
184
+ # strongly recommend that you enable encryption to ensure secure data
185
+ # storage in Amazon S3.
186
+ #
187
+ # A few notes about Amazon Redshift as a destination:
188
+ #
189
+ # * An Amazon Redshift destination requires an S3 bucket as intermediate
190
+ # location, as Firehose first delivers data to S3 and then uses `COPY`
191
+ # syntax to load data into an Amazon Redshift table. This is specified
192
+ # in the **RedshiftDestinationConfiguration.S3Configuration**
193
+ # parameter.
194
+ #
195
+ # * The compression formats `SNAPPY` or `ZIP` cannot be specified in
196
+ # **RedshiftDestinationConfiguration.S3Configuration** because the
197
+ # Amazon Redshift `COPY` operation that reads from the S3 bucket
198
+ # doesn't support these compression formats.
199
+ #
200
+ # * We strongly recommend that you use the user name and password you
201
+ # provide exclusively with Firehose, and that the permissions for the
202
+ # account are restricted for Amazon Redshift `INSERT` permissions.
203
+ #
204
+ # Firehose assumes the IAM role that is configured as part of the
205
+ # destination. The role should allow the Firehose principal to assume
206
+ # the role, and the role should have permissions that allows the service
207
+ # to deliver the data. For more information, see [Amazon S3 Bucket
208
+ # Access][1] in the *Amazon Kinesis Firehose Developer Guide*.
209
+ #
210
+ #
211
+ #
212
+ # [1]: http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3
213
+ #
214
+ # @option params [required, String] :delivery_stream_name
215
+ # The name of the delivery stream. This name must be unique per AWS
216
+ # account in the same region. You can have multiple delivery streams
217
+ # with the same name if they are in different accounts or different
218
+ # regions.
219
+ #
220
+ # @option params [Types::S3DestinationConfiguration] :s3_destination_configuration
221
+ # \[Deprecated\] The destination in Amazon S3. You can specify only one
222
+ # destination.
223
+ #
224
+ # @option params [Types::ExtendedS3DestinationConfiguration] :extended_s3_destination_configuration
225
+ # The destination in Amazon S3. You can specify only one destination.
226
+ #
227
+ # @option params [Types::RedshiftDestinationConfiguration] :redshift_destination_configuration
228
+ # The destination in Amazon Redshift. You can specify only one
229
+ # destination.
230
+ #
231
+ # @option params [Types::ElasticsearchDestinationConfiguration] :elasticsearch_destination_configuration
232
+ # The destination in Amazon ES. You can specify only one destination.
233
+ #
234
+ # @return [Types::CreateDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
235
+ #
236
+ # * {Types::CreateDeliveryStreamOutput#delivery_stream_arn #delivery_stream_arn} => String
237
+ #
238
+ # @example Request syntax with placeholder values
239
+ #
240
+ # resp = client.create_delivery_stream({
241
+ # delivery_stream_name: "DeliveryStreamName", # required
242
+ # s3_destination_configuration: {
243
+ # role_arn: "RoleARN", # required
244
+ # bucket_arn: "BucketARN", # required
245
+ # prefix: "Prefix",
246
+ # buffering_hints: {
247
+ # size_in_m_bs: 1,
248
+ # interval_in_seconds: 1,
249
+ # },
250
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
251
+ # encryption_configuration: {
252
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
253
+ # kms_encryption_config: {
254
+ # awskms_key_arn: "AWSKMSKeyARN", # required
255
+ # },
256
+ # },
257
+ # cloud_watch_logging_options: {
258
+ # enabled: false,
259
+ # log_group_name: "LogGroupName",
260
+ # log_stream_name: "LogStreamName",
261
+ # },
262
+ # },
263
+ # extended_s3_destination_configuration: {
264
+ # role_arn: "RoleARN", # required
265
+ # bucket_arn: "BucketARN", # required
266
+ # prefix: "Prefix",
267
+ # buffering_hints: {
268
+ # size_in_m_bs: 1,
269
+ # interval_in_seconds: 1,
270
+ # },
271
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
272
+ # encryption_configuration: {
273
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
274
+ # kms_encryption_config: {
275
+ # awskms_key_arn: "AWSKMSKeyARN", # required
276
+ # },
277
+ # },
278
+ # cloud_watch_logging_options: {
279
+ # enabled: false,
280
+ # log_group_name: "LogGroupName",
281
+ # log_stream_name: "LogStreamName",
282
+ # },
283
+ # processing_configuration: {
284
+ # enabled: false,
285
+ # processors: [
286
+ # {
287
+ # type: "Lambda", # required, accepts Lambda
288
+ # parameters: [
289
+ # {
290
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries
291
+ # parameter_value: "ProcessorParameterValue", # required
292
+ # },
293
+ # ],
294
+ # },
295
+ # ],
296
+ # },
297
+ # s3_backup_mode: "Disabled", # accepts Disabled, Enabled
298
+ # s3_backup_configuration: {
299
+ # role_arn: "RoleARN", # required
300
+ # bucket_arn: "BucketARN", # required
301
+ # prefix: "Prefix",
302
+ # buffering_hints: {
303
+ # size_in_m_bs: 1,
304
+ # interval_in_seconds: 1,
305
+ # },
306
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
307
+ # encryption_configuration: {
308
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
309
+ # kms_encryption_config: {
310
+ # awskms_key_arn: "AWSKMSKeyARN", # required
311
+ # },
312
+ # },
313
+ # cloud_watch_logging_options: {
314
+ # enabled: false,
315
+ # log_group_name: "LogGroupName",
316
+ # log_stream_name: "LogStreamName",
317
+ # },
318
+ # },
319
+ # },
320
+ # redshift_destination_configuration: {
321
+ # role_arn: "RoleARN", # required
322
+ # cluster_jdbcurl: "ClusterJDBCURL", # required
323
+ # copy_command: { # required
324
+ # data_table_name: "DataTableName", # required
325
+ # data_table_columns: "DataTableColumns",
326
+ # copy_options: "CopyOptions",
327
+ # },
328
+ # username: "Username", # required
329
+ # password: "Password", # required
330
+ # retry_options: {
331
+ # duration_in_seconds: 1,
332
+ # },
333
+ # s3_configuration: { # required
334
+ # role_arn: "RoleARN", # required
335
+ # bucket_arn: "BucketARN", # required
336
+ # prefix: "Prefix",
337
+ # buffering_hints: {
338
+ # size_in_m_bs: 1,
339
+ # interval_in_seconds: 1,
340
+ # },
341
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
342
+ # encryption_configuration: {
343
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
344
+ # kms_encryption_config: {
345
+ # awskms_key_arn: "AWSKMSKeyARN", # required
346
+ # },
347
+ # },
348
+ # cloud_watch_logging_options: {
349
+ # enabled: false,
350
+ # log_group_name: "LogGroupName",
351
+ # log_stream_name: "LogStreamName",
352
+ # },
353
+ # },
354
+ # processing_configuration: {
355
+ # enabled: false,
356
+ # processors: [
357
+ # {
358
+ # type: "Lambda", # required, accepts Lambda
359
+ # parameters: [
360
+ # {
361
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries
362
+ # parameter_value: "ProcessorParameterValue", # required
363
+ # },
364
+ # ],
365
+ # },
366
+ # ],
367
+ # },
368
+ # s3_backup_mode: "Disabled", # accepts Disabled, Enabled
369
+ # s3_backup_configuration: {
370
+ # role_arn: "RoleARN", # required
371
+ # bucket_arn: "BucketARN", # required
372
+ # prefix: "Prefix",
373
+ # buffering_hints: {
374
+ # size_in_m_bs: 1,
375
+ # interval_in_seconds: 1,
376
+ # },
377
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
378
+ # encryption_configuration: {
379
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
380
+ # kms_encryption_config: {
381
+ # awskms_key_arn: "AWSKMSKeyARN", # required
382
+ # },
383
+ # },
384
+ # cloud_watch_logging_options: {
385
+ # enabled: false,
386
+ # log_group_name: "LogGroupName",
387
+ # log_stream_name: "LogStreamName",
388
+ # },
389
+ # },
390
+ # cloud_watch_logging_options: {
391
+ # enabled: false,
392
+ # log_group_name: "LogGroupName",
393
+ # log_stream_name: "LogStreamName",
394
+ # },
395
+ # },
396
+ # elasticsearch_destination_configuration: {
397
+ # role_arn: "RoleARN", # required
398
+ # domain_arn: "ElasticsearchDomainARN", # required
399
+ # index_name: "ElasticsearchIndexName", # required
400
+ # type_name: "ElasticsearchTypeName", # required
401
+ # index_rotation_period: "NoRotation", # accepts NoRotation, OneHour, OneDay, OneWeek, OneMonth
402
+ # buffering_hints: {
403
+ # interval_in_seconds: 1,
404
+ # size_in_m_bs: 1,
405
+ # },
406
+ # retry_options: {
407
+ # duration_in_seconds: 1,
408
+ # },
409
+ # s3_backup_mode: "FailedDocumentsOnly", # accepts FailedDocumentsOnly, AllDocuments
410
+ # s3_configuration: { # required
411
+ # role_arn: "RoleARN", # required
412
+ # bucket_arn: "BucketARN", # required
413
+ # prefix: "Prefix",
414
+ # buffering_hints: {
415
+ # size_in_m_bs: 1,
416
+ # interval_in_seconds: 1,
417
+ # },
418
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
419
+ # encryption_configuration: {
420
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
421
+ # kms_encryption_config: {
422
+ # awskms_key_arn: "AWSKMSKeyARN", # required
423
+ # },
424
+ # },
425
+ # cloud_watch_logging_options: {
426
+ # enabled: false,
427
+ # log_group_name: "LogGroupName",
428
+ # log_stream_name: "LogStreamName",
429
+ # },
430
+ # },
431
+ # processing_configuration: {
432
+ # enabled: false,
433
+ # processors: [
434
+ # {
435
+ # type: "Lambda", # required, accepts Lambda
436
+ # parameters: [
437
+ # {
438
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries
439
+ # parameter_value: "ProcessorParameterValue", # required
440
+ # },
441
+ # ],
442
+ # },
443
+ # ],
444
+ # },
445
+ # cloud_watch_logging_options: {
446
+ # enabled: false,
447
+ # log_group_name: "LogGroupName",
448
+ # log_stream_name: "LogStreamName",
449
+ # },
450
+ # },
451
+ # })
452
+ #
453
+ # @example Response structure
454
+ #
455
+ # resp.delivery_stream_arn #=> String
456
+ #
457
+ # @overload create_delivery_stream(params = {})
458
+ # @param [Hash] params ({})
459
+ def create_delivery_stream(params = {}, options = {})
460
+ req = build_request(:create_delivery_stream, params)
461
+ req.send_request(options)
462
+ end
366
463
 
367
- # Describes the specified delivery stream and gets the status. For
368
- # example, after your delivery stream is created, call
369
- # DescribeDeliveryStream to see if the delivery stream is `ACTIVE` and
370
- # therefore ready for data to be sent to it.
371
- # @option params [required, String] :delivery_stream_name
372
- # The name of the delivery stream.
373
- # @option params [Integer] :limit
374
- # The limit on the number of destinations to return. Currently, you can
375
- # have one destination per delivery stream.
376
- # @option params [String] :exclusive_start_destination_id
377
- # Specifies the destination ID to start returning the destination
378
- # information. Currently Firehose supports one destination per delivery
379
- # stream.
380
- # @return [Types::DescribeDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
381
- #
382
- # * {Types::DescribeDeliveryStreamOutput#delivery_stream_description #DeliveryStreamDescription} => Types::DeliveryStreamDescription
383
- #
384
- # @example Request syntax with placeholder values
385
- # resp = client.describe_delivery_stream({
386
- # delivery_stream_name: "DeliveryStreamName", # required
387
- # limit: 1,
388
- # exclusive_start_destination_id: "DestinationId",
389
- # })
390
- #
391
- # @example Response structure
392
- # resp.delivery_stream_description.delivery_stream_name #=> String
393
- # resp.delivery_stream_description.delivery_stream_arn #=> String
394
- # resp.delivery_stream_description.delivery_stream_status #=> String, one of "CREATING", "DELETING", "ACTIVE"
395
- # resp.delivery_stream_description.version_id #=> String
396
- # resp.delivery_stream_description.create_timestamp #=> Time
397
- # resp.delivery_stream_description.last_update_timestamp #=> Time
398
- # resp.delivery_stream_description.destinations #=> Array
399
- # resp.delivery_stream_description.destinations[0].destination_id #=> String
400
- # resp.delivery_stream_description.destinations[0].s3_destination_description.role_arn #=> String
401
- # resp.delivery_stream_description.destinations[0].s3_destination_description.bucket_arn #=> String
402
- # resp.delivery_stream_description.destinations[0].s3_destination_description.prefix #=> String
403
- # resp.delivery_stream_description.destinations[0].s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
404
- # resp.delivery_stream_description.destinations[0].s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
405
- # resp.delivery_stream_description.destinations[0].s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
406
- # resp.delivery_stream_description.destinations[0].s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
407
- # resp.delivery_stream_description.destinations[0].s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
408
- # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
409
- # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
410
- # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
411
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.role_arn #=> String
412
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.cluster_jdbcurl #=> String
413
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_name #=> String
414
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_columns #=> String
415
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.copy_options #=> String
416
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.username #=> String
417
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.retry_options.duration_in_seconds #=> Integer
418
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.role_arn #=> String
419
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.bucket_arn #=> String
420
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.prefix #=> String
421
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
422
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
423
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
424
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
425
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
426
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
427
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
428
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
429
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.enabled #=> Boolean
430
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.log_group_name #=> String
431
- # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.log_stream_name #=> String
432
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.role_arn #=> String
433
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.domain_arn #=> String
434
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.index_name #=> String
435
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.type_name #=> String
436
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.index_rotation_period #=> String, one of "NoRotation", "OneHour", "OneDay", "OneWeek", "OneMonth"
437
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.buffering_hints.interval_in_seconds #=> Integer
438
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.buffering_hints.size_in_m_bs #=> Integer
439
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.retry_options.duration_in_seconds #=> Integer
440
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_backup_mode #=> String, one of "FailedDocumentsOnly", "AllDocuments"
441
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.role_arn #=> String
442
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.bucket_arn #=> String
443
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.prefix #=> String
444
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
445
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
446
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
447
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
448
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
449
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
450
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
451
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
452
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.enabled #=> Boolean
453
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.log_group_name #=> String
454
- # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.log_stream_name #=> String
455
- # resp.delivery_stream_description.has_more_destinations #=> Boolean
456
- # @overload describe_delivery_stream(params = {})
457
- # @param [Hash] params ({})
458
- def describe_delivery_stream(params = {}, options = {})
459
- req = build_request(:describe_delivery_stream, params)
460
- req.send_request(options)
461
- end
464
+ # Deletes a delivery stream and its data.
465
+ #
466
+ # You can delete a delivery stream only if it is in `ACTIVE` or
467
+ # `DELETING` state, and not in the `CREATING` state. While the deletion
468
+ # request is in process, the delivery stream is in the `DELETING` state.
469
+ #
470
+ # To check the state of a delivery stream, use DescribeDeliveryStream.
471
+ #
472
+ # While the delivery stream is `DELETING` state, the service may
473
+ # continue to accept the records, but the service doesn't make any
474
+ # guarantees with respect to delivering the data. Therefore, as a best
475
+ # practice, you should first stop any applications that are sending
476
+ # records before deleting a delivery stream.
477
+ #
478
+ # @option params [required, String] :delivery_stream_name
479
+ # The name of the delivery stream.
480
+ #
481
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
482
+ #
483
+ # @example Request syntax with placeholder values
484
+ #
485
+ # resp = client.delete_delivery_stream({
486
+ # delivery_stream_name: "DeliveryStreamName", # required
487
+ # })
488
+ #
489
+ # @overload delete_delivery_stream(params = {})
490
+ # @param [Hash] params ({})
491
+ def delete_delivery_stream(params = {}, options = {})
492
+ req = build_request(:delete_delivery_stream, params)
493
+ req.send_request(options)
494
+ end
462
495
 
463
- # Lists your delivery streams.
464
- #
465
- # The number of delivery streams might be too large to return using a
466
- # single call to ListDeliveryStreams. You can limit the number of
467
- # delivery streams returned, using the **Limit** parameter. To determine
468
- # whether there are more delivery streams to list, check the value of
469
- # **HasMoreDeliveryStreams** in the output. If there are more delivery
470
- # streams to list, you can request them by specifying the name of the
471
- # last delivery stream returned in the call in the
472
- # **ExclusiveStartDeliveryStreamName** parameter of a subsequent call.
473
- # @option params [Integer] :limit
474
- # The maximum number of delivery streams to list.
475
- # @option params [String] :exclusive_start_delivery_stream_name
476
- # The name of the delivery stream to start the list with.
477
- # @return [Types::ListDeliveryStreamsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
478
- #
479
- # * {Types::ListDeliveryStreamsOutput#delivery_stream_names #DeliveryStreamNames} => Array&lt;String&gt;
480
- # * {Types::ListDeliveryStreamsOutput#has_more_delivery_streams #HasMoreDeliveryStreams} => Boolean
481
- #
482
- # @example Request syntax with placeholder values
483
- # resp = client.list_delivery_streams({
484
- # limit: 1,
485
- # exclusive_start_delivery_stream_name: "DeliveryStreamName",
486
- # })
487
- #
488
- # @example Response structure
489
- # resp.delivery_stream_names #=> Array
490
- # resp.delivery_stream_names[0] #=> String
491
- # resp.has_more_delivery_streams #=> Boolean
492
- # @overload list_delivery_streams(params = {})
493
- # @param [Hash] params ({})
494
- def list_delivery_streams(params = {}, options = {})
495
- req = build_request(:list_delivery_streams, params)
496
- req.send_request(options)
497
- end
496
+ # Describes the specified delivery stream and gets the status. For
497
+ # example, after your delivery stream is created, call
498
+ # DescribeDeliveryStream to see if the delivery stream is `ACTIVE` and
499
+ # therefore ready for data to be sent to it.
500
+ #
501
+ # @option params [required, String] :delivery_stream_name
502
+ # The name of the delivery stream.
503
+ #
504
+ # @option params [Integer] :limit
505
+ # The limit on the number of destinations to return. Currently, you can
506
+ # have one destination per delivery stream.
507
+ #
508
+ # @option params [String] :exclusive_start_destination_id
509
+ # The ID of the destination to start returning the destination
510
+ # information. Currently Firehose supports one destination per delivery
511
+ # stream.
512
+ #
513
+ # @return [Types::DescribeDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
514
+ #
515
+ # * {Types::DescribeDeliveryStreamOutput#delivery_stream_description #delivery_stream_description} => Types::DeliveryStreamDescription
516
+ #
517
+ # @example Request syntax with placeholder values
518
+ #
519
+ # resp = client.describe_delivery_stream({
520
+ # delivery_stream_name: "DeliveryStreamName", # required
521
+ # limit: 1,
522
+ # exclusive_start_destination_id: "DestinationId",
523
+ # })
524
+ #
525
+ # @example Response structure
526
+ #
527
+ # resp.delivery_stream_description.delivery_stream_name #=> String
528
+ # resp.delivery_stream_description.delivery_stream_arn #=> String
529
+ # resp.delivery_stream_description.delivery_stream_status #=> String, one of "CREATING", "DELETING", "ACTIVE"
530
+ # resp.delivery_stream_description.version_id #=> String
531
+ # resp.delivery_stream_description.create_timestamp #=> Time
532
+ # resp.delivery_stream_description.last_update_timestamp #=> Time
533
+ # resp.delivery_stream_description.destinations #=> Array
534
+ # resp.delivery_stream_description.destinations[0].destination_id #=> String
535
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.role_arn #=> String
536
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.bucket_arn #=> String
537
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.prefix #=> String
538
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
539
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
540
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
541
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
542
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
543
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
544
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
545
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
546
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.role_arn #=> String
547
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.bucket_arn #=> String
548
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.prefix #=> String
549
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
550
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
551
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
552
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
553
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
554
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
555
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
556
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
557
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.enabled #=> Boolean
558
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors #=> Array
559
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].type #=> String, one of "Lambda"
560
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].parameters #=> Array
561
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries"
562
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
563
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_mode #=> String, one of "Disabled", "Enabled"
564
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.role_arn #=> String
565
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.bucket_arn #=> String
566
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.prefix #=> String
567
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.buffering_hints.size_in_m_bs #=> Integer
568
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.buffering_hints.interval_in_seconds #=> Integer
569
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
570
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
571
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
572
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.cloud_watch_logging_options.enabled #=> Boolean
573
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.cloud_watch_logging_options.log_group_name #=> String
574
+ # resp.delivery_stream_description.destinations[0].extended_s3_destination_description.s3_backup_description.cloud_watch_logging_options.log_stream_name #=> String
575
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.role_arn #=> String
576
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cluster_jdbcurl #=> String
577
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_name #=> String
578
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_columns #=> String
579
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.copy_options #=> String
580
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.username #=> String
581
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.retry_options.duration_in_seconds #=> Integer
582
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.role_arn #=> String
583
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.bucket_arn #=> String
584
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.prefix #=> String
585
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
586
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
587
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
588
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
589
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
590
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
591
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
592
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
593
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.enabled #=> Boolean
594
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors #=> Array
595
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].type #=> String, one of "Lambda"
596
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].parameters #=> Array
597
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries"
598
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
599
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_mode #=> String, one of "Disabled", "Enabled"
600
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.role_arn #=> String
601
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.bucket_arn #=> String
602
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.prefix #=> String
603
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.buffering_hints.size_in_m_bs #=> Integer
604
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.buffering_hints.interval_in_seconds #=> Integer
605
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
606
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
607
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
608
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.cloud_watch_logging_options.enabled #=> Boolean
609
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.cloud_watch_logging_options.log_group_name #=> String
610
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_backup_description.cloud_watch_logging_options.log_stream_name #=> String
611
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.enabled #=> Boolean
612
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.log_group_name #=> String
613
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.log_stream_name #=> String
614
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.role_arn #=> String
615
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.domain_arn #=> String
616
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.index_name #=> String
617
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.type_name #=> String
618
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.index_rotation_period #=> String, one of "NoRotation", "OneHour", "OneDay", "OneWeek", "OneMonth"
619
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.buffering_hints.interval_in_seconds #=> Integer
620
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.buffering_hints.size_in_m_bs #=> Integer
621
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.retry_options.duration_in_seconds #=> Integer
622
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_backup_mode #=> String, one of "FailedDocumentsOnly", "AllDocuments"
623
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.role_arn #=> String
624
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.bucket_arn #=> String
625
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.prefix #=> String
626
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
627
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
628
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
629
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
630
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
631
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
632
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
633
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
634
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.enabled #=> Boolean
635
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors #=> Array
636
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].type #=> String, one of "Lambda"
637
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].parameters #=> Array
638
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].parameters[0].parameter_name #=> String, one of "LambdaArn", "NumberOfRetries"
639
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.processing_configuration.processors[0].parameters[0].parameter_value #=> String
640
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.enabled #=> Boolean
641
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.log_group_name #=> String
642
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.log_stream_name #=> String
643
+ # resp.delivery_stream_description.has_more_destinations #=> Boolean
644
+ #
645
+ # @overload describe_delivery_stream(params = {})
646
+ # @param [Hash] params ({})
647
+ def describe_delivery_stream(params = {}, options = {})
648
+ req = build_request(:describe_delivery_stream, params)
649
+ req.send_request(options)
650
+ end
498
651
 
499
- # Writes a single data record into an Amazon Kinesis Firehose delivery
500
- # stream. To write multiple data records into a delivery stream, use
501
- # PutRecordBatch. Applications using these operations are referred to as
502
- # producers.
503
- #
504
- # By default, each delivery stream can take in up to 2,000 transactions
505
- # per second, 5,000 records per second, or 5 MB per second. Note that if
506
- # you use PutRecord and PutRecordBatch, the limits are an aggregate
507
- # across these two operations for each delivery stream. For more
508
- # information about limits and how to request an increase, see [Amazon
509
- # Kinesis Firehose Limits][1].
510
- #
511
- # You must specify the name of the delivery stream and the data record
512
- # when using PutRecord. The data record consists of a data blob that can
513
- # be up to 1,000 KB in size, and any kind of data, for example, a
514
- # segment from a log file, geographic location data, web site
515
- # clickstream data, etc.
516
- #
517
- # Firehose buffers records before delivering them to the destination. To
518
- # disambiguate the data blobs at the destination, a common solution is
519
- # to use delimiters in the data, such as a newline (`\n`) or some other
520
- # character unique within the data. This allows the consumer
521
- # application(s) to parse individual data items when reading the data
522
- # from the destination.
523
- #
524
- # The PutRecord operation returns a **RecordId**, which is a unique
525
- # string assigned to each record. Producer applications can use this ID
526
- # for purposes such as auditability and investigation.
527
- #
528
- # If the PutRecord operation throws a **ServiceUnavailableException**,
529
- # back off and retry. If the exception persists, it is possible that the
530
- # throughput limits have been exceeded for the delivery stream.
531
- #
532
- # Data records sent to Firehose are stored for 24 hours from the time
533
- # they are added to a delivery stream as it attempts to send the records
534
- # to the destination. If the destination is unreachable for more than 24
535
- # hours, the data is no longer available.
536
- #
537
- #
538
- #
539
- # [1]: http://docs.aws.amazon.com/firehose/latest/dev/limits.html
540
- # @option params [required, String] :delivery_stream_name
541
- # The name of the delivery stream.
542
- # @option params [required, Types::Record] :record
543
- # The record.
544
- # @return [Types::PutRecordOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
545
- #
546
- # * {Types::PutRecordOutput#record_id #RecordId} => String
547
- #
548
- # @example Request syntax with placeholder values
549
- # resp = client.put_record({
550
- # delivery_stream_name: "DeliveryStreamName", # required
551
- # record: { # required
552
- # data: "data", # required
553
- # },
554
- # })
555
- #
556
- # @example Response structure
557
- # resp.record_id #=> String
558
- # @overload put_record(params = {})
559
- # @param [Hash] params ({})
560
- def put_record(params = {}, options = {})
561
- req = build_request(:put_record, params)
562
- req.send_request(options)
563
- end
652
+ # Lists your delivery streams.
653
+ #
654
+ # The number of delivery streams might be too large to return using a
655
+ # single call to ListDeliveryStreams. You can limit the number of
656
+ # delivery streams returned, using the **Limit** parameter. To determine
657
+ # whether there are more delivery streams to list, check the value of
658
+ # **HasMoreDeliveryStreams** in the output. If there are more delivery
659
+ # streams to list, you can request them by specifying the name of the
660
+ # last delivery stream returned in the call in the
661
+ # **ExclusiveStartDeliveryStreamName** parameter of a subsequent call.
662
+ #
663
+ # @option params [Integer] :limit
664
+ # The maximum number of delivery streams to list.
665
+ #
666
+ # @option params [String] :exclusive_start_delivery_stream_name
667
+ # The name of the delivery stream to start the list with.
668
+ #
669
+ # @return [Types::ListDeliveryStreamsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
670
+ #
671
+ # * {Types::ListDeliveryStreamsOutput#delivery_stream_names #delivery_stream_names} => Array&lt;String&gt;
672
+ # * {Types::ListDeliveryStreamsOutput#has_more_delivery_streams #has_more_delivery_streams} => Boolean
673
+ #
674
+ # @example Request syntax with placeholder values
675
+ #
676
+ # resp = client.list_delivery_streams({
677
+ # limit: 1,
678
+ # exclusive_start_delivery_stream_name: "DeliveryStreamName",
679
+ # })
680
+ #
681
+ # @example Response structure
682
+ #
683
+ # resp.delivery_stream_names #=> Array
684
+ # resp.delivery_stream_names[0] #=> String
685
+ # resp.has_more_delivery_streams #=> Boolean
686
+ #
687
+ # @overload list_delivery_streams(params = {})
688
+ # @param [Hash] params ({})
689
+ def list_delivery_streams(params = {}, options = {})
690
+ req = build_request(:list_delivery_streams, params)
691
+ req.send_request(options)
692
+ end
564
693
 
565
- # Writes multiple data records into a delivery stream in a single call,
566
- # which can achieve higher throughput per producer than when writing
567
- # single records. To write single data records into a delivery stream,
568
- # use PutRecord. Applications using these operations are referred to as
569
- # producers.
570
- #
571
- # Each PutRecordBatch request supports up to 500 records. Each record in
572
- # the request can be as large as 1,000 KB (before 64-bit encoding), up
573
- # to a limit of 4 MB for the entire request. By default, each delivery
574
- # stream can take in up to 2,000 transactions per second, 5,000 records
575
- # per second, or 5 MB per second. Note that if you use PutRecord and
576
- # PutRecordBatch, the limits are an aggregate across these two
577
- # operations for each delivery stream. For more information about limits
578
- # and how to request an increase, see [Amazon Kinesis Firehose
579
- # Limits][1].
580
- #
581
- # You must specify the name of the delivery stream and the data record
582
- # when using PutRecord. The data record consists of a data blob that can
583
- # be up to 1,000 KB in size, and any kind of data, for example, a
584
- # segment from a log file, geographic location data, web site
585
- # clickstream data, and so on.
586
- #
587
- # Firehose buffers records before delivering them to the destination. To
588
- # disambiguate the data blobs at the destination, a common solution is
589
- # to use delimiters in the data, such as a newline (`\n`) or some other
590
- # character unique within the data. This allows the consumer
591
- # application(s) to parse individual data items when reading the data
592
- # from the destination.
593
- #
594
- # The PutRecordBatch response includes a count of any failed records,
595
- # **FailedPutCount**, and an array of responses, **RequestResponses**.
596
- # The **FailedPutCount** value is a count of records that failed. Each
597
- # entry in the **RequestResponses** array gives additional information
598
- # of the processed record. Each entry in **RequestResponses** directly
599
- # correlates with a record in the request array using the same ordering,
600
- # from the top to the bottom of the request and response.
601
- # **RequestResponses** always includes the same number of records as the
602
- # request array. **RequestResponses** both successfully and
603
- # unsuccessfully processed records. Firehose attempts to process all
604
- # records in each PutRecordBatch request. A single record failure does
605
- # not stop the processing of subsequent records.
606
- #
607
- # A successfully processed record includes a **RecordId** value, which
608
- # is a unique value identified for the record. An unsuccessfully
609
- # processed record includes **ErrorCode** and **ErrorMessage** values.
610
- # **ErrorCode** reflects the type of error and is one of the following
611
- # values: `ServiceUnavailable` or `InternalFailure`. `ErrorMessage`
612
- # provides more detailed information about the error.
613
- #
614
- # If **FailedPutCount** is greater than 0 (zero), retry the request. A
615
- # retry of the entire batch of records is possible; however, we strongly
616
- # recommend that you inspect the entire response and resend only those
617
- # records that failed processing. This minimizes duplicate records and
618
- # also reduces the total bytes sent (and corresponding charges).
619
- #
620
- # If the PutRecordBatch operation throws a
621
- # **ServiceUnavailableException**, back off and retry. If the exception
622
- # persists, it is possible that the throughput limits have been exceeded
623
- # for the delivery stream.
624
- #
625
- # Data records sent to Firehose are stored for 24 hours from the time
626
- # they are added to a delivery stream as it attempts to send the records
627
- # to the destination. If the destination is unreachable for more than 24
628
- # hours, the data is no longer available.
629
- #
630
- #
631
- #
632
- # [1]: http://docs.aws.amazon.com/firehose/latest/dev/limits.html
633
- # @option params [required, String] :delivery_stream_name
634
- # The name of the delivery stream.
635
- # @option params [required, Array<Types::Record>] :records
636
- # One or more records.
637
- # @return [Types::PutRecordBatchOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
638
- #
639
- # * {Types::PutRecordBatchOutput#failed_put_count #FailedPutCount} => Integer
640
- # * {Types::PutRecordBatchOutput#request_responses #RequestResponses} => Array&lt;Types::PutRecordBatchResponseEntry&gt;
641
- #
642
- # @example Request syntax with placeholder values
643
- # resp = client.put_record_batch({
644
- # delivery_stream_name: "DeliveryStreamName", # required
645
- # records: [ # required
646
- # {
647
- # data: "data", # required
648
- # },
649
- # ],
650
- # })
651
- #
652
- # @example Response structure
653
- # resp.failed_put_count #=> Integer
654
- # resp.request_responses #=> Array
655
- # resp.request_responses[0].record_id #=> String
656
- # resp.request_responses[0].error_code #=> String
657
- # resp.request_responses[0].error_message #=> String
658
- # @overload put_record_batch(params = {})
659
- # @param [Hash] params ({})
660
- def put_record_batch(params = {}, options = {})
661
- req = build_request(:put_record_batch, params)
662
- req.send_request(options)
663
- end
694
+ # Writes a single data record into an Amazon Kinesis Firehose delivery
695
+ # stream. To write multiple data records into a delivery stream, use
696
+ # PutRecordBatch. Applications using these operations are referred to as
697
+ # producers.
698
+ #
699
+ # By default, each delivery stream can take in up to 2,000 transactions
700
+ # per second, 5,000 records per second, or 5 MB per second. Note that if
701
+ # you use PutRecord and PutRecordBatch, the limits are an aggregate
702
+ # across these two operations for each delivery stream. For more
703
+ # information about limits and how to request an increase, see [Amazon
704
+ # Kinesis Firehose Limits][1].
705
+ #
706
+ # You must specify the name of the delivery stream and the data record
707
+ # when using PutRecord. The data record consists of a data blob that can
708
+ # be up to 1,000 KB in size, and any kind of data, for example, a
709
+ # segment from a log file, geographic location data, web site
710
+ # clickstream data, etc.
711
+ #
712
+ # Firehose buffers records before delivering them to the destination. To
713
+ # disambiguate the data blobs at the destination, a common solution is
714
+ # to use delimiters in the data, such as a newline (`\n`) or some other
715
+ # character unique within the data. This allows the consumer
716
+ # application(s) to parse individual data items when reading the data
717
+ # from the destination.
718
+ #
719
+ # The PutRecord operation returns a **RecordId**, which is a unique
720
+ # string assigned to each record. Producer applications can use this ID
721
+ # for purposes such as auditability and investigation.
722
+ #
723
+ # If the PutRecord operation throws a **ServiceUnavailableException**,
724
+ # back off and retry. If the exception persists, it is possible that the
725
+ # throughput limits have been exceeded for the delivery stream.
726
+ #
727
+ # Data records sent to Firehose are stored for 24 hours from the time
728
+ # they are added to a delivery stream as it attempts to send the records
729
+ # to the destination. If the destination is unreachable for more than 24
730
+ # hours, the data is no longer available.
731
+ #
732
+ #
733
+ #
734
+ # [1]: http://docs.aws.amazon.com/firehose/latest/dev/limits.html
735
+ #
736
+ # @option params [required, String] :delivery_stream_name
737
+ # The name of the delivery stream.
738
+ #
739
+ # @option params [required, Types::Record] :record
740
+ # The record.
741
+ #
742
+ # @return [Types::PutRecordOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
743
+ #
744
+ # * {Types::PutRecordOutput#record_id #record_id} => String
745
+ #
746
+ # @example Request syntax with placeholder values
747
+ #
748
+ # resp = client.put_record({
749
+ # delivery_stream_name: "DeliveryStreamName", # required
750
+ # record: { # required
751
+ # data: "data", # required
752
+ # },
753
+ # })
754
+ #
755
+ # @example Response structure
756
+ #
757
+ # resp.record_id #=> String
758
+ #
759
+ # @overload put_record(params = {})
760
+ # @param [Hash] params ({})
761
+ def put_record(params = {}, options = {})
762
+ req = build_request(:put_record, params)
763
+ req.send_request(options)
764
+ end
664
765
 
665
- # Updates the specified destination of the specified delivery stream.
666
- # Note: Switching between Elasticsearch and other services is not
667
- # supported. For Elasticsearch destination, you can only update an
668
- # existing Elasticsearch destination with this operation.
669
- #
670
- # This operation can be used to change the destination type (for
671
- # example, to replace the Amazon S3 destination with Amazon Redshift) or
672
- # change the parameters associated with a given destination (for
673
- # example, to change the bucket name of the Amazon S3 destination). The
674
- # update may not occur immediately. The target delivery stream remains
675
- # active while the configurations are updated, so data writes to the
676
- # delivery stream can continue during this process. The updated
677
- # configurations are normally effective within a few minutes.
678
- #
679
- # If the destination type is the same, Firehose merges the configuration
680
- # parameters specified in the UpdateDestination request with the
681
- # destination configuration that already exists on the delivery stream.
682
- # If any of the parameters are not specified in the update request, then
683
- # the existing configuration parameters are retained. For example, in
684
- # the Amazon S3 destination, if EncryptionConfiguration is not specified
685
- # then the existing EncryptionConfiguration is maintained on the
686
- # destination.
687
- #
688
- # If the destination type is not the same, for example, changing the
689
- # destination from Amazon S3 to Amazon Redshift, Firehose does not merge
690
- # any parameters. In this case, all parameters must be specified.
691
- #
692
- # Firehose uses the **CurrentDeliveryStreamVersionId** to avoid race
693
- # conditions and conflicting merges. This is a required field in every
694
- # request and the service only updates the configuration if the existing
695
- # configuration matches the **VersionId**. After the update is applied
696
- # successfully, the **VersionId** is updated, which can be retrieved
697
- # with the DescribeDeliveryStream operation. The new **VersionId**
698
- # should be uses to set **CurrentDeliveryStreamVersionId** in the next
699
- # UpdateDestination operation.
700
- # @option params [required, String] :delivery_stream_name
701
- # The name of the delivery stream.
702
- # @option params [required, String] :current_delivery_stream_version_id
703
- # Obtain this value from the **VersionId** result of the
704
- # DeliveryStreamDescription operation. This value is required, and helps
705
- # the service to perform conditional operations. For example, if there
706
- # is a interleaving update and this value is null, then the update
707
- # destination fails. After the update is successful, the **VersionId**
708
- # value is updated. The service then performs a merge of the old
709
- # configuration with the new configuration.
710
- # @option params [required, String] :destination_id
711
- # The ID of the destination.
712
- # @option params [Types::S3DestinationUpdate] :s3_destination_update
713
- # Describes an update for a destination in Amazon S3.
714
- # @option params [Types::RedshiftDestinationUpdate] :redshift_destination_update
715
- # Describes an update for a destination in Amazon Redshift.
716
- # @option params [Types::ElasticsearchDestinationUpdate] :elasticsearch_destination_update
717
- # Describes an update for a destination in Amazon ES.
718
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
719
- #
720
- # @example Request syntax with placeholder values
721
- # resp = client.update_destination({
722
- # delivery_stream_name: "DeliveryStreamName", # required
723
- # current_delivery_stream_version_id: "DeliveryStreamVersionId", # required
724
- # destination_id: "DestinationId", # required
725
- # s3_destination_update: {
726
- # role_arn: "RoleARN",
727
- # bucket_arn: "BucketARN",
728
- # prefix: "Prefix",
729
- # buffering_hints: {
730
- # size_in_m_bs: 1,
731
- # interval_in_seconds: 1,
732
- # },
733
- # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
734
- # encryption_configuration: {
735
- # no_encryption_config: "NoEncryption", # accepts NoEncryption
736
- # kms_encryption_config: {
737
- # awskms_key_arn: "AWSKMSKeyARN", # required
738
- # },
739
- # },
740
- # cloud_watch_logging_options: {
741
- # enabled: false,
742
- # log_group_name: "LogGroupName",
743
- # log_stream_name: "LogStreamName",
744
- # },
745
- # },
746
- # redshift_destination_update: {
747
- # role_arn: "RoleARN",
748
- # cluster_jdbcurl: "ClusterJDBCURL",
749
- # copy_command: {
750
- # data_table_name: "DataTableName", # required
751
- # data_table_columns: "DataTableColumns",
752
- # copy_options: "CopyOptions",
753
- # },
754
- # username: "Username",
755
- # password: "Password",
756
- # retry_options: {
757
- # duration_in_seconds: 1,
758
- # },
759
- # s3_update: {
760
- # role_arn: "RoleARN",
761
- # bucket_arn: "BucketARN",
762
- # prefix: "Prefix",
763
- # buffering_hints: {
764
- # size_in_m_bs: 1,
765
- # interval_in_seconds: 1,
766
- # },
767
- # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
768
- # encryption_configuration: {
769
- # no_encryption_config: "NoEncryption", # accepts NoEncryption
770
- # kms_encryption_config: {
771
- # awskms_key_arn: "AWSKMSKeyARN", # required
772
- # },
773
- # },
774
- # cloud_watch_logging_options: {
775
- # enabled: false,
776
- # log_group_name: "LogGroupName",
777
- # log_stream_name: "LogStreamName",
778
- # },
779
- # },
780
- # cloud_watch_logging_options: {
781
- # enabled: false,
782
- # log_group_name: "LogGroupName",
783
- # log_stream_name: "LogStreamName",
784
- # },
785
- # },
786
- # elasticsearch_destination_update: {
787
- # role_arn: "RoleARN",
788
- # domain_arn: "ElasticsearchDomainARN",
789
- # index_name: "ElasticsearchIndexName",
790
- # type_name: "ElasticsearchTypeName",
791
- # index_rotation_period: "NoRotation", # accepts NoRotation, OneHour, OneDay, OneWeek, OneMonth
792
- # buffering_hints: {
793
- # interval_in_seconds: 1,
794
- # size_in_m_bs: 1,
795
- # },
796
- # retry_options: {
797
- # duration_in_seconds: 1,
798
- # },
799
- # s3_update: {
800
- # role_arn: "RoleARN",
801
- # bucket_arn: "BucketARN",
802
- # prefix: "Prefix",
803
- # buffering_hints: {
804
- # size_in_m_bs: 1,
805
- # interval_in_seconds: 1,
806
- # },
807
- # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
808
- # encryption_configuration: {
809
- # no_encryption_config: "NoEncryption", # accepts NoEncryption
810
- # kms_encryption_config: {
811
- # awskms_key_arn: "AWSKMSKeyARN", # required
812
- # },
813
- # },
814
- # cloud_watch_logging_options: {
815
- # enabled: false,
816
- # log_group_name: "LogGroupName",
817
- # log_stream_name: "LogStreamName",
818
- # },
819
- # },
820
- # cloud_watch_logging_options: {
821
- # enabled: false,
822
- # log_group_name: "LogGroupName",
823
- # log_stream_name: "LogStreamName",
824
- # },
825
- # },
826
- # })
827
- # @overload update_destination(params = {})
828
- # @param [Hash] params ({})
829
- def update_destination(params = {}, options = {})
830
- req = build_request(:update_destination, params)
831
- req.send_request(options)
832
- end
766
+ # Writes multiple data records into a delivery stream in a single call,
767
+ # which can achieve higher throughput per producer than when writing
768
+ # single records. To write single data records into a delivery stream,
769
+ # use PutRecord. Applications using these operations are referred to as
770
+ # producers.
771
+ #
772
+ # By default, each delivery stream can take in up to 2,000 transactions
773
+ # per second, 5,000 records per second, or 5 MB per second. Note that if
774
+ # you use PutRecord and PutRecordBatch, the limits are an aggregate
775
+ # across these two operations for each delivery stream. For more
776
+ # information about limits, see [Amazon Kinesis Firehose Limits][1].
777
+ #
778
+ # Each PutRecordBatch request supports up to 500 records. Each record in
779
+ # the request can be as large as 1,000 KB (before 64-bit encoding), up
780
+ # to a limit of 4 MB for the entire request. These limits cannot be
781
+ # changed.
782
+ #
783
+ # You must specify the name of the delivery stream and the data record
784
+ # when using PutRecord. The data record consists of a data blob that can
785
+ # be up to 1,000 KB in size, and any kind of data, for example, a
786
+ # segment from a log file, geographic location data, web site
787
+ # clickstream data, and so on.
788
+ #
789
+ # Firehose buffers records before delivering them to the destination. To
790
+ # disambiguate the data blobs at the destination, a common solution is
791
+ # to use delimiters in the data, such as a newline (`\n`) or some other
792
+ # character unique within the data. This allows the consumer
793
+ # application(s) to parse individual data items when reading the data
794
+ # from the destination.
795
+ #
796
+ # The PutRecordBatch response includes a count of failed records,
797
+ # **FailedPutCount**, and an array of responses, **RequestResponses**.
798
+ # Each entry in the **RequestResponses** array provides additional
799
+ # information about the processed record, and directly correlates with a
800
+ # record in the request array using the same ordering, from the top to
801
+ # the bottom. The response array always includes the same number of
802
+ # records as the request array. **RequestResponses** includes both
803
+ # successfully and unsuccessfully processed records. Firehose attempts
804
+ # to process all records in each PutRecordBatch request. A single record
805
+ # failure does not stop the processing of subsequent records.
806
+ #
807
+ # A successfully processed record includes a **RecordId** value, which
808
+ # is unique for the record. An unsuccessfully processed record includes
809
+ # **ErrorCode** and **ErrorMessage** values. **ErrorCode** reflects the
810
+ # type of error, and is one of the following values:
811
+ # `ServiceUnavailable` or `InternalFailure`. **ErrorMessage** provides
812
+ # more detailed information about the error.
813
+ #
814
+ # If there is an internal server error or a timeout, the write might
815
+ # have completed or it might have failed. If **FailedPutCount** is
816
+ # greater than 0, retry the request, resending only those records that
817
+ # might have failed processing. This minimizes the possible duplicate
818
+ # records and also reduces the total bytes sent (and corresponding
819
+ # charges). We recommend that you handle any duplicates at the
820
+ # destination.
821
+ #
822
+ # If PutRecordBatch throws **ServiceUnavailableException**, back off and
823
+ # retry. If the exception persists, it is possible that the throughput
824
+ # limits have been exceeded for the delivery stream.
825
+ #
826
+ # Data records sent to Firehose are stored for 24 hours from the time
827
+ # they are added to a delivery stream as it attempts to send the records
828
+ # to the destination. If the destination is unreachable for more than 24
829
+ # hours, the data is no longer available.
830
+ #
831
+ #
832
+ #
833
+ # [1]: http://docs.aws.amazon.com/firehose/latest/dev/limits.html
834
+ #
835
+ # @option params [required, String] :delivery_stream_name
836
+ # The name of the delivery stream.
837
+ #
838
+ # @option params [required, Array<Types::Record>] :records
839
+ # One or more records.
840
+ #
841
+ # @return [Types::PutRecordBatchOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
842
+ #
843
+ # * {Types::PutRecordBatchOutput#failed_put_count #failed_put_count} => Integer
844
+ # * {Types::PutRecordBatchOutput#request_responses #request_responses} => Array&lt;Types::PutRecordBatchResponseEntry&gt;
845
+ #
846
+ # @example Request syntax with placeholder values
847
+ #
848
+ # resp = client.put_record_batch({
849
+ # delivery_stream_name: "DeliveryStreamName", # required
850
+ # records: [ # required
851
+ # {
852
+ # data: "data", # required
853
+ # },
854
+ # ],
855
+ # })
856
+ #
857
+ # @example Response structure
858
+ #
859
+ # resp.failed_put_count #=> Integer
860
+ # resp.request_responses #=> Array
861
+ # resp.request_responses[0].record_id #=> String
862
+ # resp.request_responses[0].error_code #=> String
863
+ # resp.request_responses[0].error_message #=> String
864
+ #
865
+ # @overload put_record_batch(params = {})
866
+ # @param [Hash] params ({})
867
+ def put_record_batch(params = {}, options = {})
868
+ req = build_request(:put_record_batch, params)
869
+ req.send_request(options)
870
+ end
833
871
 
834
- # @!endgroup
872
+ # Updates the specified destination of the specified delivery stream.
873
+ #
874
+ # You can use this operation to change the destination type (for
875
+ # example, to replace the Amazon S3 destination with Amazon Redshift) or
876
+ # change the parameters associated with a destination (for example, to
877
+ # change the bucket name of the Amazon S3 destination). The update might
878
+ # not occur immediately. The target delivery stream remains active while
879
+ # the configurations are updated, so data writes to the delivery stream
880
+ # can continue during this process. The updated configurations are
881
+ # usually effective within a few minutes.
882
+ #
883
+ # Note that switching between Amazon ES and other services is not
884
+ # supported. For an Amazon ES destination, you can only update to
885
+ # another Amazon ES destination.
886
+ #
887
+ # If the destination type is the same, Firehose merges the configuration
888
+ # parameters specified with the destination configuration that already
889
+ # exists on the delivery stream. If any of the parameters are not
890
+ # specified in the call, the existing values are retained. For example,
891
+ # in the Amazon S3 destination, if EncryptionConfiguration is not
892
+ # specified then the existing EncryptionConfiguration is maintained on
893
+ # the destination.
894
+ #
895
+ # If the destination type is not the same, for example, changing the
896
+ # destination from Amazon S3 to Amazon Redshift, Firehose does not merge
897
+ # any parameters. In this case, all parameters must be specified.
898
+ #
899
+ # Firehose uses **CurrentDeliveryStreamVersionId** to avoid race
900
+ # conditions and conflicting merges. This is a required field, and the
901
+ # service updates the configuration only if the existing configuration
902
+ # has a version ID that matches. After the update is applied
903
+ # successfully, the version ID is updated, and can be retrieved using
904
+ # DescribeDeliveryStream. You should use the new version ID to set
905
+ # **CurrentDeliveryStreamVersionId** in the next call.
906
+ #
907
+ # @option params [required, String] :delivery_stream_name
908
+ # The name of the delivery stream.
909
+ #
910
+ # @option params [required, String] :current_delivery_stream_version_id
911
+ # Obtain this value from the **VersionId** result of
912
+ # DeliveryStreamDescription. This value is required, and helps the
913
+ # service to perform conditional operations. For example, if there is a
914
+ # interleaving update and this value is null, then the update
915
+ # destination fails. After the update is successful, the **VersionId**
916
+ # value is updated. The service then performs a merge of the old
917
+ # configuration with the new configuration.
918
+ #
919
+ # @option params [required, String] :destination_id
920
+ # The ID of the destination.
921
+ #
922
+ # @option params [Types::S3DestinationUpdate] :s3_destination_update
923
+ # \[Deprecated\] Describes an update for a destination in Amazon S3.
924
+ #
925
+ # @option params [Types::ExtendedS3DestinationUpdate] :extended_s3_destination_update
926
+ # Describes an update for a destination in Amazon S3.
927
+ #
928
+ # @option params [Types::RedshiftDestinationUpdate] :redshift_destination_update
929
+ # Describes an update for a destination in Amazon Redshift.
930
+ #
931
+ # @option params [Types::ElasticsearchDestinationUpdate] :elasticsearch_destination_update
932
+ # Describes an update for a destination in Amazon ES.
933
+ #
934
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
935
+ #
936
+ # @example Request syntax with placeholder values
937
+ #
938
+ # resp = client.update_destination({
939
+ # delivery_stream_name: "DeliveryStreamName", # required
940
+ # current_delivery_stream_version_id: "DeliveryStreamVersionId", # required
941
+ # destination_id: "DestinationId", # required
942
+ # s3_destination_update: {
943
+ # role_arn: "RoleARN",
944
+ # bucket_arn: "BucketARN",
945
+ # prefix: "Prefix",
946
+ # buffering_hints: {
947
+ # size_in_m_bs: 1,
948
+ # interval_in_seconds: 1,
949
+ # },
950
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
951
+ # encryption_configuration: {
952
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
953
+ # kms_encryption_config: {
954
+ # awskms_key_arn: "AWSKMSKeyARN", # required
955
+ # },
956
+ # },
957
+ # cloud_watch_logging_options: {
958
+ # enabled: false,
959
+ # log_group_name: "LogGroupName",
960
+ # log_stream_name: "LogStreamName",
961
+ # },
962
+ # },
963
+ # extended_s3_destination_update: {
964
+ # role_arn: "RoleARN",
965
+ # bucket_arn: "BucketARN",
966
+ # prefix: "Prefix",
967
+ # buffering_hints: {
968
+ # size_in_m_bs: 1,
969
+ # interval_in_seconds: 1,
970
+ # },
971
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
972
+ # encryption_configuration: {
973
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
974
+ # kms_encryption_config: {
975
+ # awskms_key_arn: "AWSKMSKeyARN", # required
976
+ # },
977
+ # },
978
+ # cloud_watch_logging_options: {
979
+ # enabled: false,
980
+ # log_group_name: "LogGroupName",
981
+ # log_stream_name: "LogStreamName",
982
+ # },
983
+ # processing_configuration: {
984
+ # enabled: false,
985
+ # processors: [
986
+ # {
987
+ # type: "Lambda", # required, accepts Lambda
988
+ # parameters: [
989
+ # {
990
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries
991
+ # parameter_value: "ProcessorParameterValue", # required
992
+ # },
993
+ # ],
994
+ # },
995
+ # ],
996
+ # },
997
+ # s3_backup_mode: "Disabled", # accepts Disabled, Enabled
998
+ # s3_backup_update: {
999
+ # role_arn: "RoleARN",
1000
+ # bucket_arn: "BucketARN",
1001
+ # prefix: "Prefix",
1002
+ # buffering_hints: {
1003
+ # size_in_m_bs: 1,
1004
+ # interval_in_seconds: 1,
1005
+ # },
1006
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
1007
+ # encryption_configuration: {
1008
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
1009
+ # kms_encryption_config: {
1010
+ # awskms_key_arn: "AWSKMSKeyARN", # required
1011
+ # },
1012
+ # },
1013
+ # cloud_watch_logging_options: {
1014
+ # enabled: false,
1015
+ # log_group_name: "LogGroupName",
1016
+ # log_stream_name: "LogStreamName",
1017
+ # },
1018
+ # },
1019
+ # },
1020
+ # redshift_destination_update: {
1021
+ # role_arn: "RoleARN",
1022
+ # cluster_jdbcurl: "ClusterJDBCURL",
1023
+ # copy_command: {
1024
+ # data_table_name: "DataTableName", # required
1025
+ # data_table_columns: "DataTableColumns",
1026
+ # copy_options: "CopyOptions",
1027
+ # },
1028
+ # username: "Username",
1029
+ # password: "Password",
1030
+ # retry_options: {
1031
+ # duration_in_seconds: 1,
1032
+ # },
1033
+ # s3_update: {
1034
+ # role_arn: "RoleARN",
1035
+ # bucket_arn: "BucketARN",
1036
+ # prefix: "Prefix",
1037
+ # buffering_hints: {
1038
+ # size_in_m_bs: 1,
1039
+ # interval_in_seconds: 1,
1040
+ # },
1041
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
1042
+ # encryption_configuration: {
1043
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
1044
+ # kms_encryption_config: {
1045
+ # awskms_key_arn: "AWSKMSKeyARN", # required
1046
+ # },
1047
+ # },
1048
+ # cloud_watch_logging_options: {
1049
+ # enabled: false,
1050
+ # log_group_name: "LogGroupName",
1051
+ # log_stream_name: "LogStreamName",
1052
+ # },
1053
+ # },
1054
+ # processing_configuration: {
1055
+ # enabled: false,
1056
+ # processors: [
1057
+ # {
1058
+ # type: "Lambda", # required, accepts Lambda
1059
+ # parameters: [
1060
+ # {
1061
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries
1062
+ # parameter_value: "ProcessorParameterValue", # required
1063
+ # },
1064
+ # ],
1065
+ # },
1066
+ # ],
1067
+ # },
1068
+ # s3_backup_mode: "Disabled", # accepts Disabled, Enabled
1069
+ # s3_backup_update: {
1070
+ # role_arn: "RoleARN",
1071
+ # bucket_arn: "BucketARN",
1072
+ # prefix: "Prefix",
1073
+ # buffering_hints: {
1074
+ # size_in_m_bs: 1,
1075
+ # interval_in_seconds: 1,
1076
+ # },
1077
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
1078
+ # encryption_configuration: {
1079
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
1080
+ # kms_encryption_config: {
1081
+ # awskms_key_arn: "AWSKMSKeyARN", # required
1082
+ # },
1083
+ # },
1084
+ # cloud_watch_logging_options: {
1085
+ # enabled: false,
1086
+ # log_group_name: "LogGroupName",
1087
+ # log_stream_name: "LogStreamName",
1088
+ # },
1089
+ # },
1090
+ # cloud_watch_logging_options: {
1091
+ # enabled: false,
1092
+ # log_group_name: "LogGroupName",
1093
+ # log_stream_name: "LogStreamName",
1094
+ # },
1095
+ # },
1096
+ # elasticsearch_destination_update: {
1097
+ # role_arn: "RoleARN",
1098
+ # domain_arn: "ElasticsearchDomainARN",
1099
+ # index_name: "ElasticsearchIndexName",
1100
+ # type_name: "ElasticsearchTypeName",
1101
+ # index_rotation_period: "NoRotation", # accepts NoRotation, OneHour, OneDay, OneWeek, OneMonth
1102
+ # buffering_hints: {
1103
+ # interval_in_seconds: 1,
1104
+ # size_in_m_bs: 1,
1105
+ # },
1106
+ # retry_options: {
1107
+ # duration_in_seconds: 1,
1108
+ # },
1109
+ # s3_update: {
1110
+ # role_arn: "RoleARN",
1111
+ # bucket_arn: "BucketARN",
1112
+ # prefix: "Prefix",
1113
+ # buffering_hints: {
1114
+ # size_in_m_bs: 1,
1115
+ # interval_in_seconds: 1,
1116
+ # },
1117
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
1118
+ # encryption_configuration: {
1119
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
1120
+ # kms_encryption_config: {
1121
+ # awskms_key_arn: "AWSKMSKeyARN", # required
1122
+ # },
1123
+ # },
1124
+ # cloud_watch_logging_options: {
1125
+ # enabled: false,
1126
+ # log_group_name: "LogGroupName",
1127
+ # log_stream_name: "LogStreamName",
1128
+ # },
1129
+ # },
1130
+ # processing_configuration: {
1131
+ # enabled: false,
1132
+ # processors: [
1133
+ # {
1134
+ # type: "Lambda", # required, accepts Lambda
1135
+ # parameters: [
1136
+ # {
1137
+ # parameter_name: "LambdaArn", # required, accepts LambdaArn, NumberOfRetries
1138
+ # parameter_value: "ProcessorParameterValue", # required
1139
+ # },
1140
+ # ],
1141
+ # },
1142
+ # ],
1143
+ # },
1144
+ # cloud_watch_logging_options: {
1145
+ # enabled: false,
1146
+ # log_group_name: "LogGroupName",
1147
+ # log_stream_name: "LogStreamName",
1148
+ # },
1149
+ # },
1150
+ # })
1151
+ #
1152
+ # @overload update_destination(params = {})
1153
+ # @param [Hash] params ({})
1154
+ def update_destination(params = {}, options = {})
1155
+ req = build_request(:update_destination, params)
1156
+ req.send_request(options)
1157
+ end
835
1158
 
836
- # @param params ({})
837
- # @api private
838
- def build_request(operation_name, params = {})
839
- handlers = @handlers.for(operation_name)
840
- context = Seahorse::Client::RequestContext.new(
841
- operation_name: operation_name,
842
- operation: config.api.operation(operation_name),
843
- client: self,
844
- params: params,
845
- config: config)
846
- context[:gem_name] = 'aws-sdk-firehose'
847
- context[:gem_version] = '1.0.0.rc1'
848
- Seahorse::Client::Request.new(handlers, context)
849
- end
1159
+ # @!endgroup
850
1160
 
851
- # @api private
852
- # @deprecated
853
- def waiter_names
854
- []
855
- end
1161
+ # @param params ({})
1162
+ # @api private
1163
+ def build_request(operation_name, params = {})
1164
+ handlers = @handlers.for(operation_name)
1165
+ context = Seahorse::Client::RequestContext.new(
1166
+ operation_name: operation_name,
1167
+ operation: config.api.operation(operation_name),
1168
+ client: self,
1169
+ params: params,
1170
+ config: config)
1171
+ context[:gem_name] = 'aws-sdk-firehose'
1172
+ context[:gem_version] = '1.0.0.rc1'
1173
+ Seahorse::Client::Request.new(handlers, context)
1174
+ end
856
1175
 
857
- class << self
1176
+ # @api private
1177
+ # @deprecated
1178
+ def waiter_names
1179
+ []
1180
+ end
858
1181
 
859
- # @api private
860
- attr_reader :identifier
1182
+ class << self
861
1183
 
862
- # @api private
863
- def errors_module
864
- Errors
865
- end
1184
+ # @api private
1185
+ attr_reader :identifier
866
1186
 
1187
+ # @api private
1188
+ def errors_module
1189
+ Errors
867
1190
  end
1191
+
868
1192
  end
869
1193
  end
870
1194
  end