aws-sdk-firehose 1.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: e67c5b56405613265e7334a0c8a2331a3f7ad36c
4
+ data.tar.gz: bf741ff986044cd5a9a8590c2d2641d818b45ec7
5
+ SHA512:
6
+ metadata.gz: 81f6b9ef90ea28e2b41c1753cabca4f1203ec93a474129a42e45dc70015d6c95d8d744282fecf65a193e6b4a66593bb43523b9a84d000622359af9f8a806d4d6
7
+ data.tar.gz: aac6e75335e0b5eebef2a8b7ca138c19f31295b6aeee88ffdba315620d2a4b7642bb337fc718cc6dcdf34a46094444133faa31ffd71292ea234b0599ff71123a
@@ -0,0 +1,47 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'aws-sdk-core'
9
+ require 'aws-sigv4'
10
+
11
+ require_relative 'aws-sdk-firehose/types'
12
+ require_relative 'aws-sdk-firehose/client_api'
13
+ require_relative 'aws-sdk-firehose/client'
14
+ require_relative 'aws-sdk-firehose/errors'
15
+ require_relative 'aws-sdk-firehose/resource'
16
+ require_relative 'aws-sdk-firehose/customizations'
17
+
18
+ # This module provides support for Amazon Kinesis Firehose. This module is available in the
19
+ # `aws-sdk-firehose` gem.
20
+ #
21
+ # # Client
22
+ #
23
+ # The {Client} class provides one method for each API operation. Operation
24
+ # methods each accept a hash of request parameters and return a response
25
+ # structure.
26
+ #
27
+ # See {Client} for more information.
28
+ #
29
+ # # Errors
30
+ #
31
+ # Errors returned from Amazon Kinesis Firehose all
32
+ # extend {Errors::ServiceError}.
33
+ #
34
+ # begin
35
+ # # do stuff
36
+ # rescue Aws::Firehose::Errors::ServiceError
37
+ # # rescues all service API errors
38
+ # end
39
+ #
40
+ # See {Errors} for more information.
41
+ #
42
+ # @service
43
+ module Aws::Firehose
44
+
45
+ GEM_VERSION = '1.0.0.rc1'
46
+
47
+ end
@@ -0,0 +1,870 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'seahorse/client/plugins/content_length.rb'
9
+ require 'aws-sdk-core/plugins/credentials_configuration.rb'
10
+ require 'aws-sdk-core/plugins/logging.rb'
11
+ require 'aws-sdk-core/plugins/param_converter.rb'
12
+ require 'aws-sdk-core/plugins/param_validator.rb'
13
+ require 'aws-sdk-core/plugins/user_agent.rb'
14
+ require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
15
+ require 'aws-sdk-core/plugins/retry_errors.rb'
16
+ require 'aws-sdk-core/plugins/global_configuration.rb'
17
+ require 'aws-sdk-core/plugins/regional_endpoint.rb'
18
+ require 'aws-sdk-core/plugins/response_paging.rb'
19
+ require 'aws-sdk-core/plugins/stub_responses.rb'
20
+ require 'aws-sdk-core/plugins/idempotency_token.rb'
21
+ require 'aws-sdk-core/plugins/signature_v4.rb'
22
+ require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
23
+
24
+ Aws::Plugins::GlobalConfiguration.add_identifier(:firehose)
25
+
26
+ module Aws
27
+ module Firehose
28
+ class Client < Seahorse::Client::Base
29
+
30
+ include Aws::ClientStubs
31
+
32
+ @identifier = :firehose
33
+
34
+ set_api(ClientApi::API)
35
+
36
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
37
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
38
+ add_plugin(Aws::Plugins::Logging)
39
+ add_plugin(Aws::Plugins::ParamConverter)
40
+ add_plugin(Aws::Plugins::ParamValidator)
41
+ add_plugin(Aws::Plugins::UserAgent)
42
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
43
+ add_plugin(Aws::Plugins::RetryErrors)
44
+ add_plugin(Aws::Plugins::GlobalConfiguration)
45
+ add_plugin(Aws::Plugins::RegionalEndpoint)
46
+ add_plugin(Aws::Plugins::ResponsePaging)
47
+ add_plugin(Aws::Plugins::StubResponses)
48
+ add_plugin(Aws::Plugins::IdempotencyToken)
49
+ add_plugin(Aws::Plugins::SignatureV4)
50
+ add_plugin(Aws::Plugins::Protocols::JsonRpc)
51
+
52
+ # @option options [required, Aws::CredentialProvider] :credentials
53
+ # Your AWS credentials. This can be an instance of any one of the
54
+ # following classes:
55
+ #
56
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
57
+ # credentials.
58
+ #
59
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
60
+ # from an EC2 IMDS on an EC2 instance.
61
+ #
62
+ # * `Aws::SharedCredentials` - Used for loading credentials from a
63
+ # shared file, such as `~/.aws/config`.
64
+ #
65
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
66
+ #
67
+ # When `:credentials` are not configured directly, the following
68
+ # locations will be searched for credentials:
69
+ #
70
+ # * `Aws.config[:credentials]`
71
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
72
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
73
+ # * `~/.aws/credentials`
74
+ # * `~/.aws/config`
75
+ # * EC2 IMDS instance profile - When used by default, the timeouts are
76
+ # very aggressive. Construct and pass an instance of
77
+ # `Aws::InstanceProfileCredentails` to enable retries and extended
78
+ # timeouts.
79
+ # @option options [required, String] :region
80
+ # The AWS region to connect to. The configured `:region` is
81
+ # used to determine the service `:endpoint`. When not passed,
82
+ # a default `:region` is search for in the following locations:
83
+ #
84
+ # * `Aws.config[:region]`
85
+ # * `ENV['AWS_REGION']`
86
+ # * `ENV['AMAZON_REGION']`
87
+ # * `ENV['AWS_DEFAULT_REGION']`
88
+ # * `~/.aws/credentials`
89
+ # * `~/.aws/config`
90
+ # @option options [String] :access_key_id
91
+ # @option options [Boolean] :convert_params (true)
92
+ # When `true`, an attempt is made to coerce request parameters into
93
+ # the required types.
94
+ # @option options [String] :endpoint
95
+ # The client endpoint is normally constructed from the `:region`
96
+ # option. You should only configure an `:endpoint` when connecting
97
+ # to test endpoints. This should be avalid HTTP(S) URI.
98
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
99
+ # The log formatter.
100
+ # @option options [Symbol] :log_level (:info)
101
+ # The log level to send messages to the `:logger` at.
102
+ # @option options [Logger] :logger
103
+ # The Logger instance to send log messages to. If this option
104
+ # is not set, logging will be disabled.
105
+ # @option options [String] :profile ("default")
106
+ # Used when loading credentials from the shared credentials file
107
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
108
+ # @option options [Integer] :retry_limit (3)
109
+ # The maximum number of times to retry failed requests. Only
110
+ # ~ 500 level server errors and certain ~ 400 level client errors
111
+ # are retried. Generally, these are throttling errors, data
112
+ # checksum errors, networking errors, timeout errors and auth
113
+ # errors from expired credentials.
114
+ # @option options [String] :secret_access_key
115
+ # @option options [String] :session_token
116
+ # @option options [Boolean] :simple_json (false)
117
+ # Disables request parameter conversion, validation, and formatting.
118
+ # Also disable response data type conversions. This option is useful
119
+ # when you want to ensure the highest level of performance by
120
+ # avoiding overhead of walking request parameters and response data
121
+ # structures.
122
+ #
123
+ # When `:simple_json` is enabled, the request parameters hash must
124
+ # be formatted exactly as the DynamoDB API expects.
125
+ # @option options [Boolean] :stub_responses (false)
126
+ # Causes the client to return stubbed responses. By default
127
+ # fake responses are generated and returned. You can specify
128
+ # the response data to return or errors to raise by calling
129
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
130
+ #
131
+ # ** Please note ** When response stubbing is enabled, no HTTP
132
+ # requests are made, and retries are disabled.
133
+ # @option options [Boolean] :validate_params (true)
134
+ # When `true`, request parameters are validated before
135
+ # sending the request.
136
+ def initialize(*args)
137
+ super
138
+ end
139
+
140
+ # @!group API Operations
141
+
142
+ # Creates a delivery stream.
143
+ #
144
+ # CreateDeliveryStream is an asynchronous operation that immediately
145
+ # returns. The initial status of the delivery stream is `CREATING`.
146
+ # After the delivery stream is created, its status is `ACTIVE` and it
147
+ # now accepts data. Attempts to send data to a delivery stream that is
148
+ # not in the `ACTIVE` state cause an exception. To check the state of a
149
+ # delivery stream, use DescribeDeliveryStream.
150
+ #
151
+ # The name of a delivery stream identifies it. You can't have two
152
+ # delivery streams with the same name in the same region. Two delivery
153
+ # streams in different AWS accounts or different regions in the same AWS
154
+ # account can have the same name.
155
+ #
156
+ # By default, you can create up to 20 delivery streams per region.
157
+ #
158
+ # A delivery stream can only be configured with a single destination,
159
+ # Amazon S3, Amazon Elasticsearch Service, or Amazon Redshift. For
160
+ # correct CreateDeliveryStream request syntax, specify only one
161
+ # destination configuration parameter: either
162
+ # **S3DestinationConfiguration**,
163
+ # **ElasticsearchDestinationConfiguration**, or
164
+ # **RedshiftDestinationConfiguration**.
165
+ #
166
+ # As part of **S3DestinationConfiguration**, optional values
167
+ # **BufferingHints**, **EncryptionConfiguration**, and
168
+ # **CompressionFormat** can be provided. By default, if no
169
+ # **BufferingHints** value is provided, Firehose buffers data up to 5 MB
170
+ # or for 5 minutes, whichever condition is satisfied first. Note that
171
+ # **BufferingHints** is a hint, so there are some cases where the
172
+ # service cannot adhere to these conditions strictly; for example,
173
+ # record boundaries are such that the size is a little over or under the
174
+ # configured buffering size. By default, no encryption is performed. We
175
+ # strongly recommend that you enable encryption to ensure secure data
176
+ # storage in Amazon S3.
177
+ #
178
+ # A few notes about **RedshiftDestinationConfiguration**\:
179
+ #
180
+ # * An Amazon Redshift destination requires an S3 bucket as intermediate
181
+ # location, as Firehose first delivers data to S3 and then uses `COPY`
182
+ # syntax to load data into an Amazon Redshift table. This is specified
183
+ # in the **RedshiftDestinationConfiguration.S3Configuration**
184
+ # parameter element.
185
+ #
186
+ # * The compression formats `SNAPPY` or `ZIP` cannot be specified in
187
+ # **RedshiftDestinationConfiguration.S3Configuration** because the
188
+ # Amazon Redshift `COPY` operation that reads from the S3 bucket
189
+ # doesn't support these compression formats.
190
+ #
191
+ # * We strongly recommend that the username and password provided is
192
+ # used exclusively for Firehose purposes, and that the permissions for
193
+ # the account are restricted for Amazon Redshift `INSERT` permissions.
194
+ #
195
+ # Firehose assumes the IAM role that is configured as part of
196
+ # destinations. The IAM role should allow the Firehose principal to
197
+ # assume the role, and the role should have permissions that allows the
198
+ # service to deliver the data. For more information, see [Amazon S3
199
+ # Bucket Access][1] in the *Amazon Kinesis Firehose Developer Guide*.
200
+ #
201
+ #
202
+ #
203
+ # [1]: http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3
204
+ # @option params [required, String] :delivery_stream_name
205
+ # The name of the delivery stream.
206
+ # @option params [Types::S3DestinationConfiguration] :s3_destination_configuration
207
+ # The destination in Amazon S3. This value must be specified if
208
+ # **ElasticsearchDestinationConfiguration** or
209
+ # **RedshiftDestinationConfiguration** is specified (see restrictions
210
+ # listed above).
211
+ # @option params [Types::RedshiftDestinationConfiguration] :redshift_destination_configuration
212
+ # The destination in Amazon Redshift. This value cannot be specified if
213
+ # Amazon S3 or Amazon Elasticsearch is the desired destination (see
214
+ # restrictions listed above).
215
+ # @option params [Types::ElasticsearchDestinationConfiguration] :elasticsearch_destination_configuration
216
+ # The destination in Amazon ES. This value cannot be specified if Amazon
217
+ # S3 or Amazon Redshift is the desired destination (see restrictions
218
+ # listed above).
219
+ # @return [Types::CreateDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
220
+ #
221
+ # * {Types::CreateDeliveryStreamOutput#delivery_stream_arn #DeliveryStreamARN} => String
222
+ #
223
+ # @example Request syntax with placeholder values
224
+ # resp = client.create_delivery_stream({
225
+ # delivery_stream_name: "DeliveryStreamName", # required
226
+ # s3_destination_configuration: {
227
+ # role_arn: "RoleARN", # required
228
+ # bucket_arn: "BucketARN", # required
229
+ # prefix: "Prefix",
230
+ # buffering_hints: {
231
+ # size_in_m_bs: 1,
232
+ # interval_in_seconds: 1,
233
+ # },
234
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
235
+ # encryption_configuration: {
236
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
237
+ # kms_encryption_config: {
238
+ # awskms_key_arn: "AWSKMSKeyARN", # required
239
+ # },
240
+ # },
241
+ # cloud_watch_logging_options: {
242
+ # enabled: false,
243
+ # log_group_name: "LogGroupName",
244
+ # log_stream_name: "LogStreamName",
245
+ # },
246
+ # },
247
+ # redshift_destination_configuration: {
248
+ # role_arn: "RoleARN", # required
249
+ # cluster_jdbcurl: "ClusterJDBCURL", # required
250
+ # copy_command: { # required
251
+ # data_table_name: "DataTableName", # required
252
+ # data_table_columns: "DataTableColumns",
253
+ # copy_options: "CopyOptions",
254
+ # },
255
+ # username: "Username", # required
256
+ # password: "Password", # required
257
+ # retry_options: {
258
+ # duration_in_seconds: 1,
259
+ # },
260
+ # s3_configuration: { # required
261
+ # role_arn: "RoleARN", # required
262
+ # bucket_arn: "BucketARN", # required
263
+ # prefix: "Prefix",
264
+ # buffering_hints: {
265
+ # size_in_m_bs: 1,
266
+ # interval_in_seconds: 1,
267
+ # },
268
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
269
+ # encryption_configuration: {
270
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
271
+ # kms_encryption_config: {
272
+ # awskms_key_arn: "AWSKMSKeyARN", # required
273
+ # },
274
+ # },
275
+ # cloud_watch_logging_options: {
276
+ # enabled: false,
277
+ # log_group_name: "LogGroupName",
278
+ # log_stream_name: "LogStreamName",
279
+ # },
280
+ # },
281
+ # cloud_watch_logging_options: {
282
+ # enabled: false,
283
+ # log_group_name: "LogGroupName",
284
+ # log_stream_name: "LogStreamName",
285
+ # },
286
+ # },
287
+ # elasticsearch_destination_configuration: {
288
+ # role_arn: "RoleARN", # required
289
+ # domain_arn: "ElasticsearchDomainARN", # required
290
+ # index_name: "ElasticsearchIndexName", # required
291
+ # type_name: "ElasticsearchTypeName", # required
292
+ # index_rotation_period: "NoRotation", # accepts NoRotation, OneHour, OneDay, OneWeek, OneMonth
293
+ # buffering_hints: {
294
+ # interval_in_seconds: 1,
295
+ # size_in_m_bs: 1,
296
+ # },
297
+ # retry_options: {
298
+ # duration_in_seconds: 1,
299
+ # },
300
+ # s3_backup_mode: "FailedDocumentsOnly", # accepts FailedDocumentsOnly, AllDocuments
301
+ # s3_configuration: { # required
302
+ # role_arn: "RoleARN", # required
303
+ # bucket_arn: "BucketARN", # required
304
+ # prefix: "Prefix",
305
+ # buffering_hints: {
306
+ # size_in_m_bs: 1,
307
+ # interval_in_seconds: 1,
308
+ # },
309
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
310
+ # encryption_configuration: {
311
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
312
+ # kms_encryption_config: {
313
+ # awskms_key_arn: "AWSKMSKeyARN", # required
314
+ # },
315
+ # },
316
+ # cloud_watch_logging_options: {
317
+ # enabled: false,
318
+ # log_group_name: "LogGroupName",
319
+ # log_stream_name: "LogStreamName",
320
+ # },
321
+ # },
322
+ # cloud_watch_logging_options: {
323
+ # enabled: false,
324
+ # log_group_name: "LogGroupName",
325
+ # log_stream_name: "LogStreamName",
326
+ # },
327
+ # },
328
+ # })
329
+ #
330
+ # @example Response structure
331
+ # resp.delivery_stream_arn #=> String
332
+ # @overload create_delivery_stream(params = {})
333
+ # @param [Hash] params ({})
334
+ def create_delivery_stream(params = {}, options = {})
335
+ req = build_request(:create_delivery_stream, params)
336
+ req.send_request(options)
337
+ end
338
+
339
+ # Deletes a delivery stream and its data.
340
+ #
341
+ # You can delete a delivery stream only if it is in `ACTIVE` or
342
+ # `DELETING` state, and not in the `CREATING` state. While the deletion
343
+ # request is in process, the delivery stream is in the `DELETING` state.
344
+ #
345
+ # To check the state of a delivery stream, use DescribeDeliveryStream.
346
+ #
347
+ # While the delivery stream is `DELETING` state, the service may
348
+ # continue to accept the records, but the service doesn't make any
349
+ # guarantees with respect to delivering the data. Therefore, as a best
350
+ # practice, you should first stop any applications that are sending
351
+ # records before deleting a delivery stream.
352
+ # @option params [required, String] :delivery_stream_name
353
+ # The name of the delivery stream.
354
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
355
+ #
356
+ # @example Request syntax with placeholder values
357
+ # resp = client.delete_delivery_stream({
358
+ # delivery_stream_name: "DeliveryStreamName", # required
359
+ # })
360
+ # @overload delete_delivery_stream(params = {})
361
+ # @param [Hash] params ({})
362
+ def delete_delivery_stream(params = {}, options = {})
363
+ req = build_request(:delete_delivery_stream, params)
364
+ req.send_request(options)
365
+ end
366
+
367
+ # Describes the specified delivery stream and gets the status. For
368
+ # example, after your delivery stream is created, call
369
+ # DescribeDeliveryStream to see if the delivery stream is `ACTIVE` and
370
+ # therefore ready for data to be sent to it.
371
+ # @option params [required, String] :delivery_stream_name
372
+ # The name of the delivery stream.
373
+ # @option params [Integer] :limit
374
+ # The limit on the number of destinations to return. Currently, you can
375
+ # have one destination per delivery stream.
376
+ # @option params [String] :exclusive_start_destination_id
377
+ # Specifies the destination ID to start returning the destination
378
+ # information. Currently Firehose supports one destination per delivery
379
+ # stream.
380
+ # @return [Types::DescribeDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
381
+ #
382
+ # * {Types::DescribeDeliveryStreamOutput#delivery_stream_description #DeliveryStreamDescription} => Types::DeliveryStreamDescription
383
+ #
384
+ # @example Request syntax with placeholder values
385
+ # resp = client.describe_delivery_stream({
386
+ # delivery_stream_name: "DeliveryStreamName", # required
387
+ # limit: 1,
388
+ # exclusive_start_destination_id: "DestinationId",
389
+ # })
390
+ #
391
+ # @example Response structure
392
+ # resp.delivery_stream_description.delivery_stream_name #=> String
393
+ # resp.delivery_stream_description.delivery_stream_arn #=> String
394
+ # resp.delivery_stream_description.delivery_stream_status #=> String, one of "CREATING", "DELETING", "ACTIVE"
395
+ # resp.delivery_stream_description.version_id #=> String
396
+ # resp.delivery_stream_description.create_timestamp #=> Time
397
+ # resp.delivery_stream_description.last_update_timestamp #=> Time
398
+ # resp.delivery_stream_description.destinations #=> Array
399
+ # resp.delivery_stream_description.destinations[0].destination_id #=> String
400
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.role_arn #=> String
401
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.bucket_arn #=> String
402
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.prefix #=> String
403
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
404
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
405
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
406
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
407
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
408
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
409
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
410
+ # resp.delivery_stream_description.destinations[0].s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
411
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.role_arn #=> String
412
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cluster_jdbcurl #=> String
413
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_name #=> String
414
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.data_table_columns #=> String
415
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.copy_command.copy_options #=> String
416
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.username #=> String
417
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.retry_options.duration_in_seconds #=> Integer
418
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.role_arn #=> String
419
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.bucket_arn #=> String
420
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.prefix #=> String
421
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
422
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
423
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
424
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
425
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
426
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
427
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
428
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
429
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.enabled #=> Boolean
430
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.log_group_name #=> String
431
+ # resp.delivery_stream_description.destinations[0].redshift_destination_description.cloud_watch_logging_options.log_stream_name #=> String
432
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.role_arn #=> String
433
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.domain_arn #=> String
434
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.index_name #=> String
435
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.type_name #=> String
436
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.index_rotation_period #=> String, one of "NoRotation", "OneHour", "OneDay", "OneWeek", "OneMonth"
437
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.buffering_hints.interval_in_seconds #=> Integer
438
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.buffering_hints.size_in_m_bs #=> Integer
439
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.retry_options.duration_in_seconds #=> Integer
440
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_backup_mode #=> String, one of "FailedDocumentsOnly", "AllDocuments"
441
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.role_arn #=> String
442
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.bucket_arn #=> String
443
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.prefix #=> String
444
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.buffering_hints.size_in_m_bs #=> Integer
445
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.buffering_hints.interval_in_seconds #=> Integer
446
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.compression_format #=> String, one of "UNCOMPRESSED", "GZIP", "ZIP", "Snappy"
447
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.encryption_configuration.no_encryption_config #=> String, one of "NoEncryption"
448
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.encryption_configuration.kms_encryption_config.awskms_key_arn #=> String
449
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.enabled #=> Boolean
450
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.log_group_name #=> String
451
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.s3_destination_description.cloud_watch_logging_options.log_stream_name #=> String
452
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.enabled #=> Boolean
453
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.log_group_name #=> String
454
+ # resp.delivery_stream_description.destinations[0].elasticsearch_destination_description.cloud_watch_logging_options.log_stream_name #=> String
455
+ # resp.delivery_stream_description.has_more_destinations #=> Boolean
456
+ # @overload describe_delivery_stream(params = {})
457
+ # @param [Hash] params ({})
458
+ def describe_delivery_stream(params = {}, options = {})
459
+ req = build_request(:describe_delivery_stream, params)
460
+ req.send_request(options)
461
+ end
462
+
463
+ # Lists your delivery streams.
464
+ #
465
+ # The number of delivery streams might be too large to return using a
466
+ # single call to ListDeliveryStreams. You can limit the number of
467
+ # delivery streams returned, using the **Limit** parameter. To determine
468
+ # whether there are more delivery streams to list, check the value of
469
+ # **HasMoreDeliveryStreams** in the output. If there are more delivery
470
+ # streams to list, you can request them by specifying the name of the
471
+ # last delivery stream returned in the call in the
472
+ # **ExclusiveStartDeliveryStreamName** parameter of a subsequent call.
473
+ # @option params [Integer] :limit
474
+ # The maximum number of delivery streams to list.
475
+ # @option params [String] :exclusive_start_delivery_stream_name
476
+ # The name of the delivery stream to start the list with.
477
+ # @return [Types::ListDeliveryStreamsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
478
+ #
479
+ # * {Types::ListDeliveryStreamsOutput#delivery_stream_names #DeliveryStreamNames} => Array&lt;String&gt;
480
+ # * {Types::ListDeliveryStreamsOutput#has_more_delivery_streams #HasMoreDeliveryStreams} => Boolean
481
+ #
482
+ # @example Request syntax with placeholder values
483
+ # resp = client.list_delivery_streams({
484
+ # limit: 1,
485
+ # exclusive_start_delivery_stream_name: "DeliveryStreamName",
486
+ # })
487
+ #
488
+ # @example Response structure
489
+ # resp.delivery_stream_names #=> Array
490
+ # resp.delivery_stream_names[0] #=> String
491
+ # resp.has_more_delivery_streams #=> Boolean
492
+ # @overload list_delivery_streams(params = {})
493
+ # @param [Hash] params ({})
494
+ def list_delivery_streams(params = {}, options = {})
495
+ req = build_request(:list_delivery_streams, params)
496
+ req.send_request(options)
497
+ end
498
+
499
+ # Writes a single data record into an Amazon Kinesis Firehose delivery
500
+ # stream. To write multiple data records into a delivery stream, use
501
+ # PutRecordBatch. Applications using these operations are referred to as
502
+ # producers.
503
+ #
504
+ # By default, each delivery stream can take in up to 2,000 transactions
505
+ # per second, 5,000 records per second, or 5 MB per second. Note that if
506
+ # you use PutRecord and PutRecordBatch, the limits are an aggregate
507
+ # across these two operations for each delivery stream. For more
508
+ # information about limits and how to request an increase, see [Amazon
509
+ # Kinesis Firehose Limits][1].
510
+ #
511
+ # You must specify the name of the delivery stream and the data record
512
+ # when using PutRecord. The data record consists of a data blob that can
513
+ # be up to 1,000 KB in size, and any kind of data, for example, a
514
+ # segment from a log file, geographic location data, web site
515
+ # clickstream data, etc.
516
+ #
517
+ # Firehose buffers records before delivering them to the destination. To
518
+ # disambiguate the data blobs at the destination, a common solution is
519
+ # to use delimiters in the data, such as a newline (`\n`) or some other
520
+ # character unique within the data. This allows the consumer
521
+ # application(s) to parse individual data items when reading the data
522
+ # from the destination.
523
+ #
524
+ # The PutRecord operation returns a **RecordId**, which is a unique
525
+ # string assigned to each record. Producer applications can use this ID
526
+ # for purposes such as auditability and investigation.
527
+ #
528
+ # If the PutRecord operation throws a **ServiceUnavailableException**,
529
+ # back off and retry. If the exception persists, it is possible that the
530
+ # throughput limits have been exceeded for the delivery stream.
531
+ #
532
+ # Data records sent to Firehose are stored for 24 hours from the time
533
+ # they are added to a delivery stream as it attempts to send the records
534
+ # to the destination. If the destination is unreachable for more than 24
535
+ # hours, the data is no longer available.
536
+ #
537
+ #
538
+ #
539
+ # [1]: http://docs.aws.amazon.com/firehose/latest/dev/limits.html
540
+ # @option params [required, String] :delivery_stream_name
541
+ # The name of the delivery stream.
542
+ # @option params [required, Types::Record] :record
543
+ # The record.
544
+ # @return [Types::PutRecordOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
545
+ #
546
+ # * {Types::PutRecordOutput#record_id #RecordId} => String
547
+ #
548
+ # @example Request syntax with placeholder values
549
+ # resp = client.put_record({
550
+ # delivery_stream_name: "DeliveryStreamName", # required
551
+ # record: { # required
552
+ # data: "data", # required
553
+ # },
554
+ # })
555
+ #
556
+ # @example Response structure
557
+ # resp.record_id #=> String
558
+ # @overload put_record(params = {})
559
+ # @param [Hash] params ({})
560
+ def put_record(params = {}, options = {})
561
+ req = build_request(:put_record, params)
562
+ req.send_request(options)
563
+ end
564
+
565
+ # Writes multiple data records into a delivery stream in a single call,
566
+ # which can achieve higher throughput per producer than when writing
567
+ # single records. To write single data records into a delivery stream,
568
+ # use PutRecord. Applications using these operations are referred to as
569
+ # producers.
570
+ #
571
+ # Each PutRecordBatch request supports up to 500 records. Each record in
572
+ # the request can be as large as 1,000 KB (before 64-bit encoding), up
573
+ # to a limit of 4 MB for the entire request. By default, each delivery
574
+ # stream can take in up to 2,000 transactions per second, 5,000 records
575
+ # per second, or 5 MB per second. Note that if you use PutRecord and
576
+ # PutRecordBatch, the limits are an aggregate across these two
577
+ # operations for each delivery stream. For more information about limits
578
+ # and how to request an increase, see [Amazon Kinesis Firehose
579
+ # Limits][1].
580
+ #
581
+ # You must specify the name of the delivery stream and the data record
582
+ # when using PutRecord. The data record consists of a data blob that can
583
+ # be up to 1,000 KB in size, and any kind of data, for example, a
584
+ # segment from a log file, geographic location data, web site
585
+ # clickstream data, and so on.
586
+ #
587
+ # Firehose buffers records before delivering them to the destination. To
588
+ # disambiguate the data blobs at the destination, a common solution is
589
+ # to use delimiters in the data, such as a newline (`\n`) or some other
590
+ # character unique within the data. This allows the consumer
591
+ # application(s) to parse individual data items when reading the data
592
+ # from the destination.
593
+ #
594
+ # The PutRecordBatch response includes a count of any failed records,
595
+ # **FailedPutCount**, and an array of responses, **RequestResponses**.
596
+ # The **FailedPutCount** value is a count of records that failed. Each
597
+ # entry in the **RequestResponses** array gives additional information
598
+ # of the processed record. Each entry in **RequestResponses** directly
599
+ # correlates with a record in the request array using the same ordering,
600
+ # from the top to the bottom of the request and response.
601
+ # **RequestResponses** always includes the same number of records as the
602
+ # request array. **RequestResponses** both successfully and
603
+ # unsuccessfully processed records. Firehose attempts to process all
604
+ # records in each PutRecordBatch request. A single record failure does
605
+ # not stop the processing of subsequent records.
606
+ #
607
+ # A successfully processed record includes a **RecordId** value, which
608
+ # is a unique value identified for the record. An unsuccessfully
609
+ # processed record includes **ErrorCode** and **ErrorMessage** values.
610
+ # **ErrorCode** reflects the type of error and is one of the following
611
+ # values: `ServiceUnavailable` or `InternalFailure`. `ErrorMessage`
612
+ # provides more detailed information about the error.
613
+ #
614
+ # If **FailedPutCount** is greater than 0 (zero), retry the request. A
615
+ # retry of the entire batch of records is possible; however, we strongly
616
+ # recommend that you inspect the entire response and resend only those
617
+ # records that failed processing. This minimizes duplicate records and
618
+ # also reduces the total bytes sent (and corresponding charges).
619
+ #
620
+ # If the PutRecordBatch operation throws a
621
+ # **ServiceUnavailableException**, back off and retry. If the exception
622
+ # persists, it is possible that the throughput limits have been exceeded
623
+ # for the delivery stream.
624
+ #
625
+ # Data records sent to Firehose are stored for 24 hours from the time
626
+ # they are added to a delivery stream as it attempts to send the records
627
+ # to the destination. If the destination is unreachable for more than 24
628
+ # hours, the data is no longer available.
629
+ #
630
+ #
631
+ #
632
+ # [1]: http://docs.aws.amazon.com/firehose/latest/dev/limits.html
633
+ # @option params [required, String] :delivery_stream_name
634
+ # The name of the delivery stream.
635
+ # @option params [required, Array<Types::Record>] :records
636
+ # One or more records.
637
+ # @return [Types::PutRecordBatchOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
638
+ #
639
+ # * {Types::PutRecordBatchOutput#failed_put_count #FailedPutCount} => Integer
640
+ # * {Types::PutRecordBatchOutput#request_responses #RequestResponses} => Array&lt;Types::PutRecordBatchResponseEntry&gt;
641
+ #
642
+ # @example Request syntax with placeholder values
643
+ # resp = client.put_record_batch({
644
+ # delivery_stream_name: "DeliveryStreamName", # required
645
+ # records: [ # required
646
+ # {
647
+ # data: "data", # required
648
+ # },
649
+ # ],
650
+ # })
651
+ #
652
+ # @example Response structure
653
+ # resp.failed_put_count #=> Integer
654
+ # resp.request_responses #=> Array
655
+ # resp.request_responses[0].record_id #=> String
656
+ # resp.request_responses[0].error_code #=> String
657
+ # resp.request_responses[0].error_message #=> String
658
+ # @overload put_record_batch(params = {})
659
+ # @param [Hash] params ({})
660
+ def put_record_batch(params = {}, options = {})
661
+ req = build_request(:put_record_batch, params)
662
+ req.send_request(options)
663
+ end
664
+
665
+ # Updates the specified destination of the specified delivery stream.
666
+ # Note: Switching between Elasticsearch and other services is not
667
+ # supported. For Elasticsearch destination, you can only update an
668
+ # existing Elasticsearch destination with this operation.
669
+ #
670
+ # This operation can be used to change the destination type (for
671
+ # example, to replace the Amazon S3 destination with Amazon Redshift) or
672
+ # change the parameters associated with a given destination (for
673
+ # example, to change the bucket name of the Amazon S3 destination). The
674
+ # update may not occur immediately. The target delivery stream remains
675
+ # active while the configurations are updated, so data writes to the
676
+ # delivery stream can continue during this process. The updated
677
+ # configurations are normally effective within a few minutes.
678
+ #
679
+ # If the destination type is the same, Firehose merges the configuration
680
+ # parameters specified in the UpdateDestination request with the
681
+ # destination configuration that already exists on the delivery stream.
682
+ # If any of the parameters are not specified in the update request, then
683
+ # the existing configuration parameters are retained. For example, in
684
+ # the Amazon S3 destination, if EncryptionConfiguration is not specified
685
+ # then the existing EncryptionConfiguration is maintained on the
686
+ # destination.
687
+ #
688
+ # If the destination type is not the same, for example, changing the
689
+ # destination from Amazon S3 to Amazon Redshift, Firehose does not merge
690
+ # any parameters. In this case, all parameters must be specified.
691
+ #
692
+ # Firehose uses the **CurrentDeliveryStreamVersionId** to avoid race
693
+ # conditions and conflicting merges. This is a required field in every
694
+ # request and the service only updates the configuration if the existing
695
+ # configuration matches the **VersionId**. After the update is applied
696
+ # successfully, the **VersionId** is updated, which can be retrieved
697
+ # with the DescribeDeliveryStream operation. The new **VersionId**
698
+ # should be uses to set **CurrentDeliveryStreamVersionId** in the next
699
+ # UpdateDestination operation.
700
+ # @option params [required, String] :delivery_stream_name
701
+ # The name of the delivery stream.
702
+ # @option params [required, String] :current_delivery_stream_version_id
703
+ # Obtain this value from the **VersionId** result of the
704
+ # DeliveryStreamDescription operation. This value is required, and helps
705
+ # the service to perform conditional operations. For example, if there
706
+ # is a interleaving update and this value is null, then the update
707
+ # destination fails. After the update is successful, the **VersionId**
708
+ # value is updated. The service then performs a merge of the old
709
+ # configuration with the new configuration.
710
+ # @option params [required, String] :destination_id
711
+ # The ID of the destination.
712
+ # @option params [Types::S3DestinationUpdate] :s3_destination_update
713
+ # Describes an update for a destination in Amazon S3.
714
+ # @option params [Types::RedshiftDestinationUpdate] :redshift_destination_update
715
+ # Describes an update for a destination in Amazon Redshift.
716
+ # @option params [Types::ElasticsearchDestinationUpdate] :elasticsearch_destination_update
717
+ # Describes an update for a destination in Amazon ES.
718
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
719
+ #
720
+ # @example Request syntax with placeholder values
721
+ # resp = client.update_destination({
722
+ # delivery_stream_name: "DeliveryStreamName", # required
723
+ # current_delivery_stream_version_id: "DeliveryStreamVersionId", # required
724
+ # destination_id: "DestinationId", # required
725
+ # s3_destination_update: {
726
+ # role_arn: "RoleARN",
727
+ # bucket_arn: "BucketARN",
728
+ # prefix: "Prefix",
729
+ # buffering_hints: {
730
+ # size_in_m_bs: 1,
731
+ # interval_in_seconds: 1,
732
+ # },
733
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
734
+ # encryption_configuration: {
735
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
736
+ # kms_encryption_config: {
737
+ # awskms_key_arn: "AWSKMSKeyARN", # required
738
+ # },
739
+ # },
740
+ # cloud_watch_logging_options: {
741
+ # enabled: false,
742
+ # log_group_name: "LogGroupName",
743
+ # log_stream_name: "LogStreamName",
744
+ # },
745
+ # },
746
+ # redshift_destination_update: {
747
+ # role_arn: "RoleARN",
748
+ # cluster_jdbcurl: "ClusterJDBCURL",
749
+ # copy_command: {
750
+ # data_table_name: "DataTableName", # required
751
+ # data_table_columns: "DataTableColumns",
752
+ # copy_options: "CopyOptions",
753
+ # },
754
+ # username: "Username",
755
+ # password: "Password",
756
+ # retry_options: {
757
+ # duration_in_seconds: 1,
758
+ # },
759
+ # s3_update: {
760
+ # role_arn: "RoleARN",
761
+ # bucket_arn: "BucketARN",
762
+ # prefix: "Prefix",
763
+ # buffering_hints: {
764
+ # size_in_m_bs: 1,
765
+ # interval_in_seconds: 1,
766
+ # },
767
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
768
+ # encryption_configuration: {
769
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
770
+ # kms_encryption_config: {
771
+ # awskms_key_arn: "AWSKMSKeyARN", # required
772
+ # },
773
+ # },
774
+ # cloud_watch_logging_options: {
775
+ # enabled: false,
776
+ # log_group_name: "LogGroupName",
777
+ # log_stream_name: "LogStreamName",
778
+ # },
779
+ # },
780
+ # cloud_watch_logging_options: {
781
+ # enabled: false,
782
+ # log_group_name: "LogGroupName",
783
+ # log_stream_name: "LogStreamName",
784
+ # },
785
+ # },
786
+ # elasticsearch_destination_update: {
787
+ # role_arn: "RoleARN",
788
+ # domain_arn: "ElasticsearchDomainARN",
789
+ # index_name: "ElasticsearchIndexName",
790
+ # type_name: "ElasticsearchTypeName",
791
+ # index_rotation_period: "NoRotation", # accepts NoRotation, OneHour, OneDay, OneWeek, OneMonth
792
+ # buffering_hints: {
793
+ # interval_in_seconds: 1,
794
+ # size_in_m_bs: 1,
795
+ # },
796
+ # retry_options: {
797
+ # duration_in_seconds: 1,
798
+ # },
799
+ # s3_update: {
800
+ # role_arn: "RoleARN",
801
+ # bucket_arn: "BucketARN",
802
+ # prefix: "Prefix",
803
+ # buffering_hints: {
804
+ # size_in_m_bs: 1,
805
+ # interval_in_seconds: 1,
806
+ # },
807
+ # compression_format: "UNCOMPRESSED", # accepts UNCOMPRESSED, GZIP, ZIP, Snappy
808
+ # encryption_configuration: {
809
+ # no_encryption_config: "NoEncryption", # accepts NoEncryption
810
+ # kms_encryption_config: {
811
+ # awskms_key_arn: "AWSKMSKeyARN", # required
812
+ # },
813
+ # },
814
+ # cloud_watch_logging_options: {
815
+ # enabled: false,
816
+ # log_group_name: "LogGroupName",
817
+ # log_stream_name: "LogStreamName",
818
+ # },
819
+ # },
820
+ # cloud_watch_logging_options: {
821
+ # enabled: false,
822
+ # log_group_name: "LogGroupName",
823
+ # log_stream_name: "LogStreamName",
824
+ # },
825
+ # },
826
+ # })
827
+ # @overload update_destination(params = {})
828
+ # @param [Hash] params ({})
829
+ def update_destination(params = {}, options = {})
830
+ req = build_request(:update_destination, params)
831
+ req.send_request(options)
832
+ end
833
+
834
+ # @!endgroup
835
+
836
+ # @param params ({})
837
+ # @api private
838
+ def build_request(operation_name, params = {})
839
+ handlers = @handlers.for(operation_name)
840
+ context = Seahorse::Client::RequestContext.new(
841
+ operation_name: operation_name,
842
+ operation: config.api.operation(operation_name),
843
+ client: self,
844
+ params: params,
845
+ config: config)
846
+ context[:gem_name] = 'aws-sdk-firehose'
847
+ context[:gem_version] = '1.0.0.rc1'
848
+ Seahorse::Client::Request.new(handlers, context)
849
+ end
850
+
851
+ # @api private
852
+ # @deprecated
853
+ def waiter_names
854
+ []
855
+ end
856
+
857
+ class << self
858
+
859
+ # @api private
860
+ attr_reader :identifier
861
+
862
+ # @api private
863
+ def errors_module
864
+ Errors
865
+ end
866
+
867
+ end
868
+ end
869
+ end
870
+ end