aws-sdk-kinesis 1.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 8a78e60717988184c051db6f7783a95c3d389a66
4
+ data.tar.gz: 6dfb75dc9cc5503f78cf09bc4acf725b5564edf6
5
+ SHA512:
6
+ metadata.gz: 7513a4fd22c617ced84e6a98044a56817e2d1fe373bbee4b8fd27a24487fd1e15adfcb2b3b7b7f992c4a1ea1d6a25bf5af64836d327d282a58eddbb81ac57904
7
+ data.tar.gz: 336e5f2eecd77035a3299f329cd7c57e128c1519d21ccafe6ac7428dcb0019928f5dcf8909e0fc52219d151f73f62f91f462d0e235fd42458fed5d03eb070608
@@ -0,0 +1,48 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'aws-sdk-core'
9
+ require 'aws-sigv4'
10
+
11
+ require_relative 'aws-sdk-kinesis/types'
12
+ require_relative 'aws-sdk-kinesis/client_api'
13
+ require_relative 'aws-sdk-kinesis/client'
14
+ require_relative 'aws-sdk-kinesis/errors'
15
+ require_relative 'aws-sdk-kinesis/waiters'
16
+ require_relative 'aws-sdk-kinesis/resource'
17
+ require_relative 'aws-sdk-kinesis/customizations'
18
+
19
+ # This module provides support for Amazon Kinesis. This module is available in the
20
+ # `aws-sdk-kinesis` gem.
21
+ #
22
+ # # Client
23
+ #
24
+ # The {Client} class provides one method for each API operation. Operation
25
+ # methods each accept a hash of request parameters and return a response
26
+ # structure.
27
+ #
28
+ # See {Client} for more information.
29
+ #
30
+ # # Errors
31
+ #
32
+ # Errors returned from Amazon Kinesis all
33
+ # extend {Errors::ServiceError}.
34
+ #
35
+ # begin
36
+ # # do stuff
37
+ # rescue Aws::Kinesis::Errors::ServiceError
38
+ # # rescues all service API errors
39
+ # end
40
+ #
41
+ # See {Errors} for more information.
42
+ #
43
+ # @service
44
+ module Aws::Kinesis
45
+
46
+ GEM_VERSION = '1.0.0.rc1'
47
+
48
+ end
@@ -0,0 +1,1417 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'seahorse/client/plugins/content_length.rb'
9
+ require 'aws-sdk-core/plugins/credentials_configuration.rb'
10
+ require 'aws-sdk-core/plugins/logging.rb'
11
+ require 'aws-sdk-core/plugins/param_converter.rb'
12
+ require 'aws-sdk-core/plugins/param_validator.rb'
13
+ require 'aws-sdk-core/plugins/user_agent.rb'
14
+ require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
15
+ require 'aws-sdk-core/plugins/retry_errors.rb'
16
+ require 'aws-sdk-core/plugins/global_configuration.rb'
17
+ require 'aws-sdk-core/plugins/regional_endpoint.rb'
18
+ require 'aws-sdk-core/plugins/response_paging.rb'
19
+ require 'aws-sdk-core/plugins/stub_responses.rb'
20
+ require 'aws-sdk-core/plugins/idempotency_token.rb'
21
+ require 'aws-sdk-core/plugins/signature_v4.rb'
22
+ require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
23
+
24
+ Aws::Plugins::GlobalConfiguration.add_identifier(:kinesis)
25
+
26
+ module Aws
27
+ module Kinesis
28
+ class Client < Seahorse::Client::Base
29
+
30
+ include Aws::ClientStubs
31
+
32
+ @identifier = :kinesis
33
+
34
+ set_api(ClientApi::API)
35
+
36
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
37
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
38
+ add_plugin(Aws::Plugins::Logging)
39
+ add_plugin(Aws::Plugins::ParamConverter)
40
+ add_plugin(Aws::Plugins::ParamValidator)
41
+ add_plugin(Aws::Plugins::UserAgent)
42
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
43
+ add_plugin(Aws::Plugins::RetryErrors)
44
+ add_plugin(Aws::Plugins::GlobalConfiguration)
45
+ add_plugin(Aws::Plugins::RegionalEndpoint)
46
+ add_plugin(Aws::Plugins::ResponsePaging)
47
+ add_plugin(Aws::Plugins::StubResponses)
48
+ add_plugin(Aws::Plugins::IdempotencyToken)
49
+ add_plugin(Aws::Plugins::SignatureV4)
50
+ add_plugin(Aws::Plugins::Protocols::JsonRpc)
51
+
52
+ # @option options [required, Aws::CredentialProvider] :credentials
53
+ # Your AWS credentials. This can be an instance of any one of the
54
+ # following classes:
55
+ #
56
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
57
+ # credentials.
58
+ #
59
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
60
+ # from an EC2 IMDS on an EC2 instance.
61
+ #
62
+ # * `Aws::SharedCredentials` - Used for loading credentials from a
63
+ # shared file, such as `~/.aws/config`.
64
+ #
65
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
66
+ #
67
+ # When `:credentials` are not configured directly, the following
68
+ # locations will be searched for credentials:
69
+ #
70
+ # * `Aws.config[:credentials]`
71
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
72
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
73
+ # * `~/.aws/credentials`
74
+ # * `~/.aws/config`
75
+ # * EC2 IMDS instance profile - When used by default, the timeouts are
76
+ # very aggressive. Construct and pass an instance of
77
+ # `Aws::InstanceProfileCredentails` to enable retries and extended
78
+ # timeouts.
79
+ # @option options [required, String] :region
80
+ # The AWS region to connect to. The configured `:region` is
81
+ # used to determine the service `:endpoint`. When not passed,
82
+ # a default `:region` is search for in the following locations:
83
+ #
84
+ # * `Aws.config[:region]`
85
+ # * `ENV['AWS_REGION']`
86
+ # * `ENV['AMAZON_REGION']`
87
+ # * `ENV['AWS_DEFAULT_REGION']`
88
+ # * `~/.aws/credentials`
89
+ # * `~/.aws/config`
90
+ # @option options [String] :access_key_id
91
+ # @option options [Boolean] :convert_params (true)
92
+ # When `true`, an attempt is made to coerce request parameters into
93
+ # the required types.
94
+ # @option options [String] :endpoint
95
+ # The client endpoint is normally constructed from the `:region`
96
+ # option. You should only configure an `:endpoint` when connecting
97
+ # to test endpoints. This should be avalid HTTP(S) URI.
98
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
99
+ # The log formatter.
100
+ # @option options [Symbol] :log_level (:info)
101
+ # The log level to send messages to the `:logger` at.
102
+ # @option options [Logger] :logger
103
+ # The Logger instance to send log messages to. If this option
104
+ # is not set, logging will be disabled.
105
+ # @option options [String] :profile ("default")
106
+ # Used when loading credentials from the shared credentials file
107
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
108
+ # @option options [Integer] :retry_limit (3)
109
+ # The maximum number of times to retry failed requests. Only
110
+ # ~ 500 level server errors and certain ~ 400 level client errors
111
+ # are retried. Generally, these are throttling errors, data
112
+ # checksum errors, networking errors, timeout errors and auth
113
+ # errors from expired credentials.
114
+ # @option options [String] :secret_access_key
115
+ # @option options [String] :session_token
116
+ # @option options [Boolean] :simple_json (false)
117
+ # Disables request parameter conversion, validation, and formatting.
118
+ # Also disable response data type conversions. This option is useful
119
+ # when you want to ensure the highest level of performance by
120
+ # avoiding overhead of walking request parameters and response data
121
+ # structures.
122
+ #
123
+ # When `:simple_json` is enabled, the request parameters hash must
124
+ # be formatted exactly as the DynamoDB API expects.
125
+ # @option options [Boolean] :stub_responses (false)
126
+ # Causes the client to return stubbed responses. By default
127
+ # fake responses are generated and returned. You can specify
128
+ # the response data to return or errors to raise by calling
129
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
130
+ #
131
+ # ** Please note ** When response stubbing is enabled, no HTTP
132
+ # requests are made, and retries are disabled.
133
+ # @option options [Boolean] :validate_params (true)
134
+ # When `true`, request parameters are validated before
135
+ # sending the request.
136
+ def initialize(*args)
137
+ super
138
+ end
139
+
140
+ # @!group API Operations
141
+
142
+ # Adds or updates tags for the specified Amazon Kinesis stream. Each
143
+ # stream can have up to 10 tags.
144
+ #
145
+ # If tags have already been assigned to the stream, `AddTagsToStream`
146
+ # overwrites any existing tags that correspond to the specified tag
147
+ # keys.
148
+ # @option params [required, String] :stream_name
149
+ # The name of the stream.
150
+ # @option params [required, Hash<String,String>] :tags
151
+ # The set of key-value pairs to use to create the tags.
152
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
153
+ #
154
+ # @example Request syntax with placeholder values
155
+ # resp = client.add_tags_to_stream({
156
+ # stream_name: "StreamName", # required
157
+ # tags: { # required
158
+ # "TagKey" => "TagValue",
159
+ # },
160
+ # })
161
+ # @overload add_tags_to_stream(params = {})
162
+ # @param [Hash] params ({})
163
+ def add_tags_to_stream(params = {}, options = {})
164
+ req = build_request(:add_tags_to_stream, params)
165
+ req.send_request(options)
166
+ end
167
+
168
+ # Creates an Amazon Kinesis stream. A stream captures and transports
169
+ # data records that are continuously emitted from different data sources
170
+ # or *producers*. Scale-out within a stream is explicitly supported by
171
+ # means of shards, which are uniquely identified groups of data records
172
+ # in a stream.
173
+ #
174
+ # You specify and control the number of shards that a stream is composed
175
+ # of. Each shard can support reads up to 5 transactions per second, up
176
+ # to a maximum data read total of 2 MB per second. Each shard can
177
+ # support writes up to 1,000 records per second, up to a maximum data
178
+ # write total of 1 MB per second. You can add shards to a stream if the
179
+ # amount of data input increases and you can remove shards if the amount
180
+ # of data input decreases.
181
+ #
182
+ # The stream name identifies the stream. The name is scoped to the AWS
183
+ # account used by the application. It is also scoped by region. That is,
184
+ # two streams in two different accounts can have the same name, and two
185
+ # streams in the same account, but in two different regions, can have
186
+ # the same name.
187
+ #
188
+ # `CreateStream` is an asynchronous operation. Upon receiving a
189
+ # `CreateStream` request, Amazon Kinesis immediately returns and sets
190
+ # the stream status to `CREATING`. After the stream is created, Amazon
191
+ # Kinesis sets the stream status to `ACTIVE`. You should perform read
192
+ # and write operations only on an `ACTIVE` stream.
193
+ #
194
+ # You receive a `LimitExceededException` when making a `CreateStream`
195
+ # request if you try to do one of the following:
196
+ #
197
+ # * Have more than five streams in the `CREATING` state at any point in
198
+ # time.
199
+ #
200
+ # * Create more shards than are authorized for your account.
201
+ #
202
+ # For the default shard limit for an AWS account, see [Streams
203
+ # Limits][1] in the *Amazon Kinesis Streams Developer Guide*. If you
204
+ # need to increase this limit, [contact AWS Support][2].
205
+ #
206
+ # You can use `DescribeStream` to check the stream status, which is
207
+ # returned in `StreamStatus`.
208
+ #
209
+ # CreateStream has a limit of 5 transactions per second per account.
210
+ #
211
+ #
212
+ #
213
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
214
+ # [2]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
215
+ # @option params [required, String] :stream_name
216
+ # A name to identify the stream. The stream name is scoped to the AWS
217
+ # account used by the application that creates the stream. It is also
218
+ # scoped by region. That is, two streams in two different AWS accounts
219
+ # can have the same name, and two streams in the same AWS account but in
220
+ # two different regions can have the same name.
221
+ # @option params [required, Integer] :shard_count
222
+ # The number of shards that the stream will use. The throughput of the
223
+ # stream is a function of the number of shards; more shards are required
224
+ # for greater provisioned throughput.
225
+ #
226
+ # DefaultShardLimit;
227
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
228
+ #
229
+ # @example Request syntax with placeholder values
230
+ # resp = client.create_stream({
231
+ # stream_name: "StreamName", # required
232
+ # shard_count: 1, # required
233
+ # })
234
+ # @overload create_stream(params = {})
235
+ # @param [Hash] params ({})
236
+ def create_stream(params = {}, options = {})
237
+ req = build_request(:create_stream, params)
238
+ req.send_request(options)
239
+ end
240
+
241
+ # Decreases the Amazon Kinesis stream's retention period, which is the
242
+ # length of time data records are accessible after they are added to the
243
+ # stream. The minimum value of a stream's retention period is 24 hours.
244
+ #
245
+ # This operation may result in lost data. For example, if the stream's
246
+ # retention period is 48 hours and is decreased to 24 hours, any data
247
+ # already in the stream that is older than 24 hours is inaccessible.
248
+ # @option params [required, String] :stream_name
249
+ # The name of the stream to modify.
250
+ # @option params [required, Integer] :retention_period_hours
251
+ # The new retention period of the stream, in hours. Must be less than
252
+ # the current retention period.
253
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
254
+ #
255
+ # @example Request syntax with placeholder values
256
+ # resp = client.decrease_stream_retention_period({
257
+ # stream_name: "StreamName", # required
258
+ # retention_period_hours: 1, # required
259
+ # })
260
+ # @overload decrease_stream_retention_period(params = {})
261
+ # @param [Hash] params ({})
262
+ def decrease_stream_retention_period(params = {}, options = {})
263
+ req = build_request(:decrease_stream_retention_period, params)
264
+ req.send_request(options)
265
+ end
266
+
267
+ # Deletes an Amazon Kinesis stream and all its shards and data. You must
268
+ # shut down any applications that are operating on the stream before you
269
+ # delete the stream. If an application attempts to operate on a deleted
270
+ # stream, it will receive the exception `ResourceNotFoundException`.
271
+ #
272
+ # If the stream is in the `ACTIVE` state, you can delete it. After a
273
+ # `DeleteStream` request, the specified stream is in the `DELETING`
274
+ # state until Amazon Kinesis completes the deletion.
275
+ #
276
+ # **Note:** Amazon Kinesis might continue to accept data read and write
277
+ # operations, such as PutRecord, PutRecords, and GetRecords, on a stream
278
+ # in the `DELETING` state until the stream deletion is complete.
279
+ #
280
+ # When you delete a stream, any shards in that stream are also deleted,
281
+ # and any tags are dissociated from the stream.
282
+ #
283
+ # You can use the DescribeStream operation to check the state of the
284
+ # stream, which is returned in `StreamStatus`.
285
+ #
286
+ # DeleteStream has a limit of 5 transactions per second per account.
287
+ # @option params [required, String] :stream_name
288
+ # The name of the stream to delete.
289
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
290
+ #
291
+ # @example Request syntax with placeholder values
292
+ # resp = client.delete_stream({
293
+ # stream_name: "StreamName", # required
294
+ # })
295
+ # @overload delete_stream(params = {})
296
+ # @param [Hash] params ({})
297
+ def delete_stream(params = {}, options = {})
298
+ req = build_request(:delete_stream, params)
299
+ req.send_request(options)
300
+ end
301
+
302
+ # Describes the shard limits and usage for the account.
303
+ #
304
+ # If you update your account limits, the old limits might be returned
305
+ # for a few minutes.
306
+ #
307
+ # This operation has a limit of 1 transaction per second per account.
308
+ # @return [Types::DescribeLimitsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
309
+ #
310
+ # * {Types::DescribeLimitsOutput#shard_limit #ShardLimit} => Integer
311
+ # * {Types::DescribeLimitsOutput#open_shard_count #OpenShardCount} => Integer
312
+ #
313
+ # @example Request syntax with placeholder values
314
+ # resp = client.describe_limits()
315
+ #
316
+ # @example Response structure
317
+ # resp.shard_limit #=> Integer
318
+ # resp.open_shard_count #=> Integer
319
+ # @overload describe_limits(params = {})
320
+ # @param [Hash] params ({})
321
+ def describe_limits(params = {}, options = {})
322
+ req = build_request(:describe_limits, params)
323
+ req.send_request(options)
324
+ end
325
+
326
+ # Describes the specified Amazon Kinesis stream.
327
+ #
328
+ # The information returned includes the stream name, Amazon Resource
329
+ # Name (ARN), creation time, enhanced metric configuration, and shard
330
+ # map. The shard map is an array of shard objects. For each shard
331
+ # object, there is the hash key and sequence number ranges that the
332
+ # shard spans, and the IDs of any earlier shards that played in a role
333
+ # in creating the shard. Every record ingested in the stream is
334
+ # identified by a sequence number, which is assigned when the record is
335
+ # put into the stream.
336
+ #
337
+ # You can limit the number of shards returned by each call. For more
338
+ # information, see [Retrieving Shards from a Stream][1] in the *Amazon
339
+ # Kinesis Streams Developer Guide*.
340
+ #
341
+ # There are no guarantees about the chronological order shards returned.
342
+ # To process shards in chronological order, use the ID of the parent
343
+ # shard to track the lineage to the oldest shard.
344
+ #
345
+ # This operation has a limit of 10 transactions per second per account.
346
+ #
347
+ #
348
+ #
349
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html
350
+ # @option params [required, String] :stream_name
351
+ # The name of the stream to describe.
352
+ # @option params [Integer] :limit
353
+ # The maximum number of shards to return in a single call. The default
354
+ # value is 100. If you specify a value greater than 100, at most 100
355
+ # shards are returned.
356
+ # @option params [String] :exclusive_start_shard_id
357
+ # The shard ID of the shard to start with.
358
+ # @return [Types::DescribeStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
359
+ #
360
+ # * {Types::DescribeStreamOutput#stream_description #StreamDescription} => Types::StreamDescription
361
+ #
362
+ # @example Request syntax with placeholder values
363
+ # resp = client.describe_stream({
364
+ # stream_name: "StreamName", # required
365
+ # limit: 1,
366
+ # exclusive_start_shard_id: "ShardId",
367
+ # })
368
+ #
369
+ # @example Response structure
370
+ # resp.stream_description.stream_name #=> String
371
+ # resp.stream_description.stream_arn #=> String
372
+ # resp.stream_description.stream_status #=> String, one of "CREATING", "DELETING", "ACTIVE", "UPDATING"
373
+ # resp.stream_description.shards #=> Array
374
+ # resp.stream_description.shards[0].shard_id #=> String
375
+ # resp.stream_description.shards[0].parent_shard_id #=> String
376
+ # resp.stream_description.shards[0].adjacent_parent_shard_id #=> String
377
+ # resp.stream_description.shards[0].hash_key_range.starting_hash_key #=> String
378
+ # resp.stream_description.shards[0].hash_key_range.ending_hash_key #=> String
379
+ # resp.stream_description.shards[0].sequence_number_range.starting_sequence_number #=> String
380
+ # resp.stream_description.shards[0].sequence_number_range.ending_sequence_number #=> String
381
+ # resp.stream_description.has_more_shards #=> Boolean
382
+ # resp.stream_description.retention_period_hours #=> Integer
383
+ # resp.stream_description.stream_creation_timestamp #=> Time
384
+ # resp.stream_description.enhanced_monitoring #=> Array
385
+ # resp.stream_description.enhanced_monitoring[0].shard_level_metrics #=> Array
386
+ # resp.stream_description.enhanced_monitoring[0].shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
387
+ # @overload describe_stream(params = {})
388
+ # @param [Hash] params ({})
389
+ def describe_stream(params = {}, options = {})
390
+ req = build_request(:describe_stream, params)
391
+ req.send_request(options)
392
+ end
393
+
394
+ # Disables enhanced monitoring.
395
+ # @option params [required, String] :stream_name
396
+ # The name of the Amazon Kinesis stream for which to disable enhanced
397
+ # monitoring.
398
+ # @option params [required, Array<String>] :shard_level_metrics
399
+ # List of shard-level metrics to disable.
400
+ #
401
+ # The following are the valid shard-level metrics. The value "`ALL`"
402
+ # disables every metric.
403
+ #
404
+ # * `IncomingBytes`
405
+ #
406
+ # * `IncomingRecords`
407
+ #
408
+ # * `OutgoingBytes`
409
+ #
410
+ # * `OutgoingRecords`
411
+ #
412
+ # * `WriteProvisionedThroughputExceeded`
413
+ #
414
+ # * `ReadProvisionedThroughputExceeded`
415
+ #
416
+ # * `IteratorAgeMilliseconds`
417
+ #
418
+ # * `ALL`
419
+ #
420
+ # For more information, see [Monitoring the Amazon Kinesis Streams
421
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
422
+ # Developer Guide*.
423
+ #
424
+ #
425
+ #
426
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
427
+ # @return [Types::EnhancedMonitoringOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
428
+ #
429
+ # * {Types::EnhancedMonitoringOutput#stream_name #StreamName} => String
430
+ # * {Types::EnhancedMonitoringOutput#current_shard_level_metrics #CurrentShardLevelMetrics} => Array&lt;String&gt;
431
+ # * {Types::EnhancedMonitoringOutput#desired_shard_level_metrics #DesiredShardLevelMetrics} => Array&lt;String&gt;
432
+ #
433
+ # @example Request syntax with placeholder values
434
+ # resp = client.disable_enhanced_monitoring({
435
+ # stream_name: "StreamName", # required
436
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
437
+ # })
438
+ #
439
+ # @example Response structure
440
+ # resp.stream_name #=> String
441
+ # resp.current_shard_level_metrics #=> Array
442
+ # resp.current_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
443
+ # resp.desired_shard_level_metrics #=> Array
444
+ # resp.desired_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
445
+ # @overload disable_enhanced_monitoring(params = {})
446
+ # @param [Hash] params ({})
447
+ def disable_enhanced_monitoring(params = {}, options = {})
448
+ req = build_request(:disable_enhanced_monitoring, params)
449
+ req.send_request(options)
450
+ end
451
+
452
+ # Enables enhanced Amazon Kinesis stream monitoring for shard-level
453
+ # metrics.
454
+ # @option params [required, String] :stream_name
455
+ # The name of the stream for which to enable enhanced monitoring.
456
+ # @option params [required, Array<String>] :shard_level_metrics
457
+ # List of shard-level metrics to enable.
458
+ #
459
+ # The following are the valid shard-level metrics. The value "`ALL`"
460
+ # enables every metric.
461
+ #
462
+ # * `IncomingBytes`
463
+ #
464
+ # * `IncomingRecords`
465
+ #
466
+ # * `OutgoingBytes`
467
+ #
468
+ # * `OutgoingRecords`
469
+ #
470
+ # * `WriteProvisionedThroughputExceeded`
471
+ #
472
+ # * `ReadProvisionedThroughputExceeded`
473
+ #
474
+ # * `IteratorAgeMilliseconds`
475
+ #
476
+ # * `ALL`
477
+ #
478
+ # For more information, see [Monitoring the Amazon Kinesis Streams
479
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
480
+ # Developer Guide*.
481
+ #
482
+ #
483
+ #
484
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
485
+ # @return [Types::EnhancedMonitoringOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
486
+ #
487
+ # * {Types::EnhancedMonitoringOutput#stream_name #StreamName} => String
488
+ # * {Types::EnhancedMonitoringOutput#current_shard_level_metrics #CurrentShardLevelMetrics} => Array&lt;String&gt;
489
+ # * {Types::EnhancedMonitoringOutput#desired_shard_level_metrics #DesiredShardLevelMetrics} => Array&lt;String&gt;
490
+ #
491
+ # @example Request syntax with placeholder values
492
+ # resp = client.enable_enhanced_monitoring({
493
+ # stream_name: "StreamName", # required
494
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
495
+ # })
496
+ #
497
+ # @example Response structure
498
+ # resp.stream_name #=> String
499
+ # resp.current_shard_level_metrics #=> Array
500
+ # resp.current_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
501
+ # resp.desired_shard_level_metrics #=> Array
502
+ # resp.desired_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
503
+ # @overload enable_enhanced_monitoring(params = {})
504
+ # @param [Hash] params ({})
505
+ def enable_enhanced_monitoring(params = {}, options = {})
506
+ req = build_request(:enable_enhanced_monitoring, params)
507
+ req.send_request(options)
508
+ end
509
+
510
+ # Gets data records from an Amazon Kinesis stream's shard.
511
+ #
512
+ # Specify a shard iterator using the `ShardIterator` parameter. The
513
+ # shard iterator specifies the position in the shard from which you want
514
+ # to start reading data records sequentially. If there are no records
515
+ # available in the portion of the shard that the iterator points to,
516
+ # GetRecords returns an empty list. Note that it might take multiple
517
+ # calls to get to a portion of the shard that contains records.
518
+ #
519
+ # You can scale by provisioning multiple shards per stream while
520
+ # considering service limits (for more information, see [Streams
521
+ # Limits][1] in the *Amazon Kinesis Streams Developer Guide*). Your
522
+ # application should have one thread per shard, each reading
523
+ # continuously from its stream. To read from a stream continually, call
524
+ # GetRecords in a loop. Use GetShardIterator to get the shard iterator
525
+ # to specify in the first GetRecords call. GetRecords returns a new
526
+ # shard iterator in `NextShardIterator`. Specify the shard iterator
527
+ # returned in `NextShardIterator` in subsequent calls to GetRecords.
528
+ # Note that if the shard has been closed, the shard iterator can't
529
+ # return more data and GetRecords returns `null` in `NextShardIterator`.
530
+ # You can terminate the loop when the shard is closed, or when the shard
531
+ # iterator reaches the record with the sequence number or other
532
+ # attribute that marks it as the last record to process.
533
+ #
534
+ # Each data record can be up to 1 MB in size, and each shard can read up
535
+ # to 2 MB per second. You can ensure that your calls don't exceed the
536
+ # maximum supported size or throughput by using the `Limit` parameter to
537
+ # specify the maximum number of records that GetRecords can return.
538
+ # Consider your average record size when determining this limit.
539
+ #
540
+ # The size of the data returned by GetRecords varies depending on the
541
+ # utilization of the shard. The maximum size of data that GetRecords can
542
+ # return is 10 MB. If a call returns this amount of data, subsequent
543
+ # calls made within the next 5 seconds throw
544
+ # `ProvisionedThroughputExceededException`. If there is insufficient
545
+ # provisioned throughput on the shard, subsequent calls made within the
546
+ # next 1 second throw `ProvisionedThroughputExceededException`. Note
547
+ # that GetRecords won't return any data when it throws an exception.
548
+ # For this reason, we recommend that you wait one second between calls
549
+ # to GetRecords; however, it's possible that the application will get
550
+ # exceptions for longer than 1 second.
551
+ #
552
+ # To detect whether the application is falling behind in processing, you
553
+ # can use the `MillisBehindLatest` response attribute. You can also
554
+ # monitor the stream using CloudWatch metrics and other mechanisms (see
555
+ # [Monitoring][2] in the *Amazon Kinesis Streams Developer Guide*).
556
+ #
557
+ # Each Amazon Kinesis record includes a value,
558
+ # `ApproximateArrivalTimestamp`, that is set when a stream successfully
559
+ # receives and stores a record. This is commonly referred to as a
560
+ # server-side timestamp, whereas a client-side timestamp is set when a
561
+ # data producer creates or sends the record to a stream (a data producer
562
+ # is any data source putting data records into a stream, for example
563
+ # with PutRecords). The timestamp has millisecond precision. There are
564
+ # no guarantees about the timestamp accuracy, or that the timestamp is
565
+ # always increasing. For example, records in a shard or across a stream
566
+ # might have timestamps that are out of order.
567
+ #
568
+ #
569
+ #
570
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
571
+ # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html
572
+ # @option params [required, String] :shard_iterator
573
+ # The position in the shard from which you want to start sequentially
574
+ # reading data records. A shard iterator specifies this position using
575
+ # the sequence number of a data record in the shard.
576
+ # @option params [Integer] :limit
577
+ # The maximum number of records to return. Specify a value of up to
578
+ # 10,000. If you specify a value that is greater than 10,000, GetRecords
579
+ # throws `InvalidArgumentException`.
580
+ # @return [Types::GetRecordsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
581
+ #
582
+ # * {Types::GetRecordsOutput#records #Records} => Array&lt;Types::Record&gt;
583
+ # * {Types::GetRecordsOutput#next_shard_iterator #NextShardIterator} => String
584
+ # * {Types::GetRecordsOutput#millis_behind_latest #MillisBehindLatest} => Integer
585
+ #
586
+ # @example Request syntax with placeholder values
587
+ # resp = client.get_records({
588
+ # shard_iterator: "ShardIterator", # required
589
+ # limit: 1,
590
+ # })
591
+ #
592
+ # @example Response structure
593
+ # resp.records #=> Array
594
+ # resp.records[0].sequence_number #=> String
595
+ # resp.records[0].approximate_arrival_timestamp #=> Time
596
+ # resp.records[0].data #=> String
597
+ # resp.records[0].partition_key #=> String
598
+ # resp.next_shard_iterator #=> String
599
+ # resp.millis_behind_latest #=> Integer
600
+ # @overload get_records(params = {})
601
+ # @param [Hash] params ({})
602
+ def get_records(params = {}, options = {})
603
+ req = build_request(:get_records, params)
604
+ req.send_request(options)
605
+ end
606
+
607
+ # Gets an Amazon Kinesis shard iterator. A shard iterator expires five
608
+ # minutes after it is returned to the requester.
609
+ #
610
+ # A shard iterator specifies the shard position from which to start
611
+ # reading data records sequentially. The position is specified using the
612
+ # sequence number of a data record in a shard. A sequence number is the
613
+ # identifier associated with every record ingested in the stream, and is
614
+ # assigned when a record is put into the stream. Each stream has one or
615
+ # more shards.
616
+ #
617
+ # You must specify the shard iterator type. For example, you can set the
618
+ # `ShardIteratorType` parameter to read exactly from the position
619
+ # denoted by a specific sequence number by using the
620
+ # `AT_SEQUENCE_NUMBER` shard iterator type, or right after the sequence
621
+ # number by using the `AFTER_SEQUENCE_NUMBER` shard iterator type, using
622
+ # sequence numbers returned by earlier calls to PutRecord, PutRecords,
623
+ # GetRecords, or DescribeStream. In the request, you can specify the
624
+ # shard iterator type `AT_TIMESTAMP` to read records from an arbitrary
625
+ # point in time, `TRIM_HORIZON` to cause `ShardIterator` to point to the
626
+ # last untrimmed record in the shard in the system (the oldest data
627
+ # record in the shard), or `LATEST` so that you always read the most
628
+ # recent data in the shard.
629
+ #
630
+ # When you read repeatedly from a stream, use a GetShardIterator request
631
+ # to get the first shard iterator for use in your first GetRecords
632
+ # request and for subsequent reads use the shard iterator returned by
633
+ # the GetRecords request in `NextShardIterator`. A new shard iterator is
634
+ # returned by every GetRecords request in `NextShardIterator`, which you
635
+ # use in the `ShardIterator` parameter of the next GetRecords request.
636
+ #
637
+ # If a GetShardIterator request is made too often, you receive a
638
+ # `ProvisionedThroughputExceededException`. For more information about
639
+ # throughput limits, see GetRecords, and [Streams Limits][1] in the
640
+ # *Amazon Kinesis Streams Developer Guide*.
641
+ #
642
+ # If the shard is closed, GetShardIterator returns a valid iterator for
643
+ # the last sequence number of the shard. Note that a shard can be closed
644
+ # as a result of using SplitShard or MergeShards.
645
+ #
646
+ # GetShardIterator has a limit of 5 transactions per second per account
647
+ # per open shard.
648
+ #
649
+ #
650
+ #
651
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
652
+ # @option params [required, String] :stream_name
653
+ # The name of the Amazon Kinesis stream.
654
+ # @option params [required, String] :shard_id
655
+ # The shard ID of the Amazon Kinesis shard to get the iterator for.
656
+ # @option params [required, String] :shard_iterator_type
657
+ # Determines how the shard iterator is used to start reading data
658
+ # records from the shard.
659
+ #
660
+ # The following are the valid Amazon Kinesis shard iterator types:
661
+ #
662
+ # * AT\_SEQUENCE\_NUMBER - Start reading from the position denoted by a
663
+ # specific sequence number, provided in the value
664
+ # `StartingSequenceNumber`.
665
+ #
666
+ # * AFTER\_SEQUENCE\_NUMBER - Start reading right after the position
667
+ # denoted by a specific sequence number, provided in the value
668
+ # `StartingSequenceNumber`.
669
+ #
670
+ # * AT\_TIMESTAMP - Start reading from the position denoted by a
671
+ # specific timestamp, provided in the value `Timestamp`.
672
+ #
673
+ # * TRIM\_HORIZON - Start reading at the last untrimmed record in the
674
+ # shard in the system, which is the oldest data record in the shard.
675
+ #
676
+ # * LATEST - Start reading just after the most recent record in the
677
+ # shard, so that you always read the most recent data in the shard.
678
+ # @option params [String] :starting_sequence_number
679
+ # The sequence number of the data record in the shard from which to
680
+ # start reading. Used with shard iterator type AT\_SEQUENCE\_NUMBER and
681
+ # AFTER\_SEQUENCE\_NUMBER.
682
+ # @option params [Time,DateTime,Date,Integer,String] :timestamp
683
+ # The timestamp of the data record from which to start reading. Used
684
+ # with shard iterator type AT\_TIMESTAMP. A timestamp is the Unix epoch
685
+ # date with precision in milliseconds. For example,
686
+ # `2016-04-04T19:58:46.480-00:00` or `1459799926.480`. If a record with
687
+ # this exact timestamp does not exist, the iterator returned is for the
688
+ # next (later) record. If the timestamp is older than the current trim
689
+ # horizon, the iterator returned is for the oldest untrimmed data record
690
+ # (TRIM\_HORIZON).
691
+ # @return [Types::GetShardIteratorOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
692
+ #
693
+ # * {Types::GetShardIteratorOutput#shard_iterator #ShardIterator} => String
694
+ #
695
+ # @example Request syntax with placeholder values
696
+ # resp = client.get_shard_iterator({
697
+ # stream_name: "StreamName", # required
698
+ # shard_id: "ShardId", # required
699
+ # shard_iterator_type: "AT_SEQUENCE_NUMBER", # required, accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST, AT_TIMESTAMP
700
+ # starting_sequence_number: "SequenceNumber",
701
+ # timestamp: Time.now,
702
+ # })
703
+ #
704
+ # @example Response structure
705
+ # resp.shard_iterator #=> String
706
+ # @overload get_shard_iterator(params = {})
707
+ # @param [Hash] params ({})
708
+ def get_shard_iterator(params = {}, options = {})
709
+ req = build_request(:get_shard_iterator, params)
710
+ req.send_request(options)
711
+ end
712
+
713
+ # Increases the Amazon Kinesis stream's retention period, which is the
714
+ # length of time data records are accessible after they are added to the
715
+ # stream. The maximum value of a stream's retention period is 168 hours
716
+ # (7 days).
717
+ #
718
+ # Upon choosing a longer stream retention period, this operation will
719
+ # increase the time period records are accessible that have not yet
720
+ # expired. However, it will not make previous data that has expired
721
+ # (older than the stream's previous retention period) accessible after
722
+ # the operation has been called. For example, if a stream's retention
723
+ # period is set to 24 hours and is increased to 168 hours, any data that
724
+ # is older than 24 hours will remain inaccessible to consumer
725
+ # applications.
726
+ # @option params [required, String] :stream_name
727
+ # The name of the stream to modify.
728
+ # @option params [required, Integer] :retention_period_hours
729
+ # The new retention period of the stream, in hours. Must be more than
730
+ # the current retention period.
731
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
732
+ #
733
+ # @example Request syntax with placeholder values
734
+ # resp = client.increase_stream_retention_period({
735
+ # stream_name: "StreamName", # required
736
+ # retention_period_hours: 1, # required
737
+ # })
738
+ # @overload increase_stream_retention_period(params = {})
739
+ # @param [Hash] params ({})
740
+ def increase_stream_retention_period(params = {}, options = {})
741
+ req = build_request(:increase_stream_retention_period, params)
742
+ req.send_request(options)
743
+ end
744
+
745
+ # Lists your Amazon Kinesis streams.
746
+ #
747
+ # The number of streams may be too large to return from a single call to
748
+ # `ListStreams`. You can limit the number of returned streams using the
749
+ # `Limit` parameter. If you do not specify a value for the `Limit`
750
+ # parameter, Amazon Kinesis uses the default limit, which is currently
751
+ # 10.
752
+ #
753
+ # You can detect if there are more streams available to list by using
754
+ # the `HasMoreStreams` flag from the returned output. If there are more
755
+ # streams available, you can request more streams by using the name of
756
+ # the last stream returned by the `ListStreams` request in the
757
+ # `ExclusiveStartStreamName` parameter in a subsequent request to
758
+ # `ListStreams`. The group of stream names returned by the subsequent
759
+ # request is then added to the list. You can continue this process until
760
+ # all the stream names have been collected in the list.
761
+ #
762
+ # ListStreams has a limit of 5 transactions per second per account.
763
+ # @option params [Integer] :limit
764
+ # The maximum number of streams to list.
765
+ # @option params [String] :exclusive_start_stream_name
766
+ # The name of the stream to start the list with.
767
+ # @return [Types::ListStreamsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
768
+ #
769
+ # * {Types::ListStreamsOutput#stream_names #StreamNames} => Array&lt;String&gt;
770
+ # * {Types::ListStreamsOutput#has_more_streams #HasMoreStreams} => Boolean
771
+ #
772
+ # @example Request syntax with placeholder values
773
+ # resp = client.list_streams({
774
+ # limit: 1,
775
+ # exclusive_start_stream_name: "StreamName",
776
+ # })
777
+ #
778
+ # @example Response structure
779
+ # resp.stream_names #=> Array
780
+ # resp.stream_names[0] #=> String
781
+ # resp.has_more_streams #=> Boolean
782
+ # @overload list_streams(params = {})
783
+ # @param [Hash] params ({})
784
+ def list_streams(params = {}, options = {})
785
+ req = build_request(:list_streams, params)
786
+ req.send_request(options)
787
+ end
788
+
789
+ # Lists the tags for the specified Amazon Kinesis stream.
790
+ # @option params [required, String] :stream_name
791
+ # The name of the stream.
792
+ # @option params [String] :exclusive_start_tag_key
793
+ # The key to use as the starting point for the list of tags. If this
794
+ # parameter is set, `ListTagsForStream` gets all tags that occur after
795
+ # `ExclusiveStartTagKey`.
796
+ # @option params [Integer] :limit
797
+ # The number of tags to return. If this number is less than the total
798
+ # number of tags associated with the stream, `HasMoreTags` is set to
799
+ # `true`. To list additional tags, set `ExclusiveStartTagKey` to the
800
+ # last key in the response.
801
+ # @return [Types::ListTagsForStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
802
+ #
803
+ # * {Types::ListTagsForStreamOutput#tags #Tags} => Array&lt;Types::Tag&gt;
804
+ # * {Types::ListTagsForStreamOutput#has_more_tags #HasMoreTags} => Boolean
805
+ #
806
+ # @example Request syntax with placeholder values
807
+ # resp = client.list_tags_for_stream({
808
+ # stream_name: "StreamName", # required
809
+ # exclusive_start_tag_key: "TagKey",
810
+ # limit: 1,
811
+ # })
812
+ #
813
+ # @example Response structure
814
+ # resp.tags #=> Array
815
+ # resp.tags[0].key #=> String
816
+ # resp.tags[0].value #=> String
817
+ # resp.has_more_tags #=> Boolean
818
+ # @overload list_tags_for_stream(params = {})
819
+ # @param [Hash] params ({})
820
+ def list_tags_for_stream(params = {}, options = {})
821
+ req = build_request(:list_tags_for_stream, params)
822
+ req.send_request(options)
823
+ end
824
+
825
+ # Merges two adjacent shards in an Amazon Kinesis stream and combines
826
+ # them into a single shard to reduce the stream's capacity to ingest
827
+ # and transport data. Two shards are considered adjacent if the union of
828
+ # the hash key ranges for the two shards form a contiguous set with no
829
+ # gaps. For example, if you have two shards, one with a hash key range
830
+ # of 276...381 and the other with a hash key range of 382...454, then
831
+ # you could merge these two shards into a single shard that would have a
832
+ # hash key range of 276...454. After the merge, the single child shard
833
+ # receives data for all hash key values covered by the two parent
834
+ # shards.
835
+ #
836
+ # `MergeShards` is called when there is a need to reduce the overall
837
+ # capacity of a stream because of excess capacity that is not being
838
+ # used. You must specify the shard to be merged and the adjacent shard
839
+ # for a stream. For more information about merging shards, see [Merge
840
+ # Two Shards][1] in the *Amazon Kinesis Streams Developer Guide*.
841
+ #
842
+ # If the stream is in the `ACTIVE` state, you can call `MergeShards`. If
843
+ # a stream is in the `CREATING`, `UPDATING`, or `DELETING` state,
844
+ # `MergeShards` returns a `ResourceInUseException`. If the specified
845
+ # stream does not exist, `MergeShards` returns a
846
+ # `ResourceNotFoundException`.
847
+ #
848
+ # You can use DescribeStream to check the state of the stream, which is
849
+ # returned in `StreamStatus`.
850
+ #
851
+ # `MergeShards` is an asynchronous operation. Upon receiving a
852
+ # `MergeShards` request, Amazon Kinesis immediately returns a response
853
+ # and sets the `StreamStatus` to `UPDATING`. After the operation is
854
+ # completed, Amazon Kinesis sets the `StreamStatus` to `ACTIVE`. Read
855
+ # and write operations continue to work while the stream is in the
856
+ # `UPDATING` state.
857
+ #
858
+ # You use DescribeStream to determine the shard IDs that are specified
859
+ # in the `MergeShards` request.
860
+ #
861
+ # If you try to operate on too many streams in parallel using
862
+ # CreateStream, DeleteStream, `MergeShards` or SplitShard, you will
863
+ # receive a `LimitExceededException`.
864
+ #
865
+ # `MergeShards` has limit of 5 transactions per second per account.
866
+ #
867
+ #
868
+ #
869
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html
870
+ # @option params [required, String] :stream_name
871
+ # The name of the stream for the merge.
872
+ # @option params [required, String] :shard_to_merge
873
+ # The shard ID of the shard to combine with the adjacent shard for the
874
+ # merge.
875
+ # @option params [required, String] :adjacent_shard_to_merge
876
+ # The shard ID of the adjacent shard for the merge.
877
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
878
+ #
879
+ # @example Request syntax with placeholder values
880
+ # resp = client.merge_shards({
881
+ # stream_name: "StreamName", # required
882
+ # shard_to_merge: "ShardId", # required
883
+ # adjacent_shard_to_merge: "ShardId", # required
884
+ # })
885
+ # @overload merge_shards(params = {})
886
+ # @param [Hash] params ({})
887
+ def merge_shards(params = {}, options = {})
888
+ req = build_request(:merge_shards, params)
889
+ req.send_request(options)
890
+ end
891
+
892
+ # Writes a single data record into an Amazon Kinesis stream. Call
893
+ # `PutRecord` to send data into the stream for real-time ingestion and
894
+ # subsequent processing, one record at a time. Each shard can support
895
+ # writes up to 1,000 records per second, up to a maximum data write
896
+ # total of 1 MB per second.
897
+ #
898
+ # You must specify the name of the stream that captures, stores, and
899
+ # transports the data; a partition key; and the data blob itself.
900
+ #
901
+ # The data blob can be any type of data; for example, a segment from a
902
+ # log file, geographic/location data, website clickstream data, and so
903
+ # on.
904
+ #
905
+ # The partition key is used by Amazon Kinesis to distribute data across
906
+ # shards. Amazon Kinesis segregates the data records that belong to a
907
+ # stream into multiple shards, using the partition key associated with
908
+ # each data record to determine which shard a given data record belongs
909
+ # to.
910
+ #
911
+ # Partition keys are Unicode strings, with a maximum length limit of 256
912
+ # characters for each key. An MD5 hash function is used to map partition
913
+ # keys to 128-bit integer values and to map associated data records to
914
+ # shards using the hash key ranges of the shards. You can override
915
+ # hashing the partition key to determine the shard by explicitly
916
+ # specifying a hash value using the `ExplicitHashKey` parameter. For
917
+ # more information, see [Adding Data to a Stream][1] in the *Amazon
918
+ # Kinesis Streams Developer Guide*.
919
+ #
920
+ # `PutRecord` returns the shard ID of where the data record was placed
921
+ # and the sequence number that was assigned to the data record.
922
+ #
923
+ # Sequence numbers increase over time and are specific to a shard within
924
+ # a stream, not across all shards within a stream. To guarantee strictly
925
+ # increasing ordering, write serially to a shard and use the
926
+ # `SequenceNumberForOrdering` parameter. For more information, see
927
+ # [Adding Data to a Stream][1] in the *Amazon Kinesis Streams Developer
928
+ # Guide*.
929
+ #
930
+ # If a `PutRecord` request cannot be processed because of insufficient
931
+ # provisioned throughput on the shard involved in the request,
932
+ # `PutRecord` throws `ProvisionedThroughputExceededException`.
933
+ #
934
+ # Data records are accessible for only 24 hours from the time that they
935
+ # are added to a stream.
936
+ #
937
+ #
938
+ #
939
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
940
+ # @option params [required, String] :stream_name
941
+ # The name of the stream to put the data record into.
942
+ # @option params [required, String, IO] :data
943
+ # The data blob to put into the record, which is base64-encoded when the
944
+ # blob is serialized. When the data blob (the payload before
945
+ # base64-encoding) is added to the partition key size, the total size
946
+ # must not exceed the maximum record size (1 MB).
947
+ # @option params [required, String] :partition_key
948
+ # Determines which shard in the stream the data record is assigned to.
949
+ # Partition keys are Unicode strings with a maximum length limit of 256
950
+ # characters for each key. Amazon Kinesis uses the partition key as
951
+ # input to a hash function that maps the partition key and associated
952
+ # data to a specific shard. Specifically, an MD5 hash function is used
953
+ # to map partition keys to 128-bit integer values and to map associated
954
+ # data records to shards. As a result of this hashing mechanism, all
955
+ # data records with the same partition key map to the same shard within
956
+ # the stream.
957
+ # @option params [String] :explicit_hash_key
958
+ # The hash value used to explicitly determine the shard the data record
959
+ # is assigned to by overriding the partition key hash.
960
+ # @option params [String] :sequence_number_for_ordering
961
+ # Guarantees strictly increasing sequence numbers, for puts from the
962
+ # same client and to the same partition key. Usage: set the
963
+ # `SequenceNumberForOrdering` of record *n* to the sequence number of
964
+ # record *n-1* (as returned in the result when putting record *n-1*). If
965
+ # this parameter is not set, records will be coarsely ordered based on
966
+ # arrival time.
967
+ # @return [Types::PutRecordOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
968
+ #
969
+ # * {Types::PutRecordOutput#shard_id #ShardId} => String
970
+ # * {Types::PutRecordOutput#sequence_number #SequenceNumber} => String
971
+ #
972
+ # @example Request syntax with placeholder values
973
+ # resp = client.put_record({
974
+ # stream_name: "StreamName", # required
975
+ # data: "data", # required
976
+ # partition_key: "PartitionKey", # required
977
+ # explicit_hash_key: "HashKey",
978
+ # sequence_number_for_ordering: "SequenceNumber",
979
+ # })
980
+ #
981
+ # @example Response structure
982
+ # resp.shard_id #=> String
983
+ # resp.sequence_number #=> String
984
+ # @overload put_record(params = {})
985
+ # @param [Hash] params ({})
986
+ def put_record(params = {}, options = {})
987
+ req = build_request(:put_record, params)
988
+ req.send_request(options)
989
+ end
990
+
991
+ # Writes multiple data records into an Amazon Kinesis stream in a single
992
+ # call (also referred to as a `PutRecords` request). Use this operation
993
+ # to send data into the stream for data ingestion and processing.
994
+ #
995
+ # Each `PutRecords` request can support up to 500 records. Each record
996
+ # in the request can be as large as 1 MB, up to a limit of 5 MB for the
997
+ # entire request, including partition keys. Each shard can support
998
+ # writes up to 1,000 records per second, up to a maximum data write
999
+ # total of 1 MB per second.
1000
+ #
1001
+ # You must specify the name of the stream that captures, stores, and
1002
+ # transports the data; and an array of request `Records`, with each
1003
+ # record in the array requiring a partition key and data blob. The
1004
+ # record size limit applies to the total size of the partition key and
1005
+ # data blob.
1006
+ #
1007
+ # The data blob can be any type of data; for example, a segment from a
1008
+ # log file, geographic/location data, website clickstream data, and so
1009
+ # on.
1010
+ #
1011
+ # The partition key is used by Amazon Kinesis as input to a hash
1012
+ # function that maps the partition key and associated data to a specific
1013
+ # shard. An MD5 hash function is used to map partition keys to 128-bit
1014
+ # integer values and to map associated data records to shards. As a
1015
+ # result of this hashing mechanism, all data records with the same
1016
+ # partition key map to the same shard within the stream. For more
1017
+ # information, see [Adding Data to a Stream][1] in the *Amazon Kinesis
1018
+ # Streams Developer Guide*.
1019
+ #
1020
+ # Each record in the `Records` array may include an optional parameter,
1021
+ # `ExplicitHashKey`, which overrides the partition key to shard mapping.
1022
+ # This parameter allows a data producer to determine explicitly the
1023
+ # shard where the record is stored. For more information, see [Adding
1024
+ # Multiple Records with PutRecords][2] in the *Amazon Kinesis Streams
1025
+ # Developer Guide*.
1026
+ #
1027
+ # The `PutRecords` response includes an array of response `Records`.
1028
+ # Each record in the response array directly correlates with a record in
1029
+ # the request array using natural ordering, from the top to the bottom
1030
+ # of the request and response. The response `Records` array always
1031
+ # includes the same number of records as the request array.
1032
+ #
1033
+ # The response `Records` array includes both successfully and
1034
+ # unsuccessfully processed records. Amazon Kinesis attempts to process
1035
+ # all records in each `PutRecords` request. A single record failure does
1036
+ # not stop the processing of subsequent records.
1037
+ #
1038
+ # A successfully-processed record includes `ShardId` and
1039
+ # `SequenceNumber` values. The `ShardId` parameter identifies the shard
1040
+ # in the stream where the record is stored. The `SequenceNumber`
1041
+ # parameter is an identifier assigned to the put record, unique to all
1042
+ # records in the stream.
1043
+ #
1044
+ # An unsuccessfully-processed record includes `ErrorCode` and
1045
+ # `ErrorMessage` values. `ErrorCode` reflects the type of error and can
1046
+ # be one of the following values:
1047
+ # `ProvisionedThroughputExceededException` or `InternalFailure`.
1048
+ # `ErrorMessage` provides more detailed information about the
1049
+ # `ProvisionedThroughputExceededException` exception including the
1050
+ # account ID, stream name, and shard ID of the record that was
1051
+ # throttled. For more information about partially successful responses,
1052
+ # see [Adding Multiple Records with PutRecords][3] in the *Amazon
1053
+ # Kinesis Streams Developer Guide*.
1054
+ #
1055
+ # By default, data records are accessible for only 24 hours from the
1056
+ # time that they are added to an Amazon Kinesis stream. This retention
1057
+ # period can be modified using the DecreaseStreamRetentionPeriod and
1058
+ # IncreaseStreamRetentionPeriod operations.
1059
+ #
1060
+ #
1061
+ #
1062
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
1063
+ # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords
1064
+ # [3]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords
1065
+ # @option params [required, Array<Types::PutRecordsRequestEntry>] :records
1066
+ # The records associated with the request.
1067
+ # @option params [required, String] :stream_name
1068
+ # The stream name associated with the request.
1069
+ # @return [Types::PutRecordsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1070
+ #
1071
+ # * {Types::PutRecordsOutput#failed_record_count #FailedRecordCount} => Integer
1072
+ # * {Types::PutRecordsOutput#records #Records} => Array&lt;Types::PutRecordsResultEntry&gt;
1073
+ #
1074
+ # @example Request syntax with placeholder values
1075
+ # resp = client.put_records({
1076
+ # records: [ # required
1077
+ # {
1078
+ # data: "data", # required
1079
+ # explicit_hash_key: "HashKey",
1080
+ # partition_key: "PartitionKey", # required
1081
+ # },
1082
+ # ],
1083
+ # stream_name: "StreamName", # required
1084
+ # })
1085
+ #
1086
+ # @example Response structure
1087
+ # resp.failed_record_count #=> Integer
1088
+ # resp.records #=> Array
1089
+ # resp.records[0].sequence_number #=> String
1090
+ # resp.records[0].shard_id #=> String
1091
+ # resp.records[0].error_code #=> String
1092
+ # resp.records[0].error_message #=> String
1093
+ # @overload put_records(params = {})
1094
+ # @param [Hash] params ({})
1095
+ def put_records(params = {}, options = {})
1096
+ req = build_request(:put_records, params)
1097
+ req.send_request(options)
1098
+ end
1099
+
1100
+ # Removes tags from the specified Amazon Kinesis stream. Removed tags
1101
+ # are deleted and cannot be recovered after this operation successfully
1102
+ # completes.
1103
+ #
1104
+ # If you specify a tag that does not exist, it is ignored.
1105
+ # @option params [required, String] :stream_name
1106
+ # The name of the stream.
1107
+ # @option params [required, Array<String>] :tag_keys
1108
+ # A list of tag keys. Each corresponding tag is removed from the stream.
1109
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1110
+ #
1111
+ # @example Request syntax with placeholder values
1112
+ # resp = client.remove_tags_from_stream({
1113
+ # stream_name: "StreamName", # required
1114
+ # tag_keys: ["TagKey"], # required
1115
+ # })
1116
+ # @overload remove_tags_from_stream(params = {})
1117
+ # @param [Hash] params ({})
1118
+ def remove_tags_from_stream(params = {}, options = {})
1119
+ req = build_request(:remove_tags_from_stream, params)
1120
+ req.send_request(options)
1121
+ end
1122
+
1123
+ # Splits a shard into two new shards in the Amazon Kinesis stream to
1124
+ # increase the stream's capacity to ingest and transport data.
1125
+ # `SplitShard` is called when there is a need to increase the overall
1126
+ # capacity of a stream because of an expected increase in the volume of
1127
+ # data records being ingested.
1128
+ #
1129
+ # You can also use `SplitShard` when a shard appears to be approaching
1130
+ # its maximum utilization; for example, the producers sending data into
1131
+ # the specific shard are suddenly sending more than previously
1132
+ # anticipated. You can also call `SplitShard` to increase stream
1133
+ # capacity, so that more Amazon Kinesis applications can simultaneously
1134
+ # read data from the stream for real-time processing.
1135
+ #
1136
+ # You must specify the shard to be split and the new hash key, which is
1137
+ # the position in the shard where the shard gets split in two. In many
1138
+ # cases, the new hash key might simply be the average of the beginning
1139
+ # and ending hash key, but it can be any hash key value in the range
1140
+ # being mapped into the shard. For more information about splitting
1141
+ # shards, see [Split a Shard][1] in the *Amazon Kinesis Streams
1142
+ # Developer Guide*.
1143
+ #
1144
+ # You can use DescribeStream to determine the shard ID and hash key
1145
+ # values for the `ShardToSplit` and `NewStartingHashKey` parameters that
1146
+ # are specified in the `SplitShard` request.
1147
+ #
1148
+ # `SplitShard` is an asynchronous operation. Upon receiving a
1149
+ # `SplitShard` request, Amazon Kinesis immediately returns a response
1150
+ # and sets the stream status to `UPDATING`. After the operation is
1151
+ # completed, Amazon Kinesis sets the stream status to `ACTIVE`. Read and
1152
+ # write operations continue to work while the stream is in the
1153
+ # `UPDATING` state.
1154
+ #
1155
+ # You can use `DescribeStream` to check the status of the stream, which
1156
+ # is returned in `StreamStatus`. If the stream is in the `ACTIVE` state,
1157
+ # you can call `SplitShard`. If a stream is in `CREATING` or `UPDATING`
1158
+ # or `DELETING` states, `DescribeStream` returns a
1159
+ # `ResourceInUseException`.
1160
+ #
1161
+ # If the specified stream does not exist, `DescribeStream` returns a
1162
+ # `ResourceNotFoundException`. If you try to create more shards than are
1163
+ # authorized for your account, you receive a `LimitExceededException`.
1164
+ #
1165
+ # For the default shard limit for an AWS account, see [Streams
1166
+ # Limits][2] in the *Amazon Kinesis Streams Developer Guide*. If you
1167
+ # need to increase this limit, [contact AWS Support][3].
1168
+ #
1169
+ # If you try to operate on too many streams simultaneously using
1170
+ # CreateStream, DeleteStream, MergeShards, and/or SplitShard, you
1171
+ # receive a `LimitExceededException`.
1172
+ #
1173
+ # `SplitShard` has limit of 5 transactions per second per account.
1174
+ #
1175
+ #
1176
+ #
1177
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html
1178
+ # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
1179
+ # [3]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
1180
+ # @option params [required, String] :stream_name
1181
+ # The name of the stream for the shard split.
1182
+ # @option params [required, String] :shard_to_split
1183
+ # The shard ID of the shard to split.
1184
+ # @option params [required, String] :new_starting_hash_key
1185
+ # A hash key value for the starting hash key of one of the child shards
1186
+ # created by the split. The hash key range for a given shard constitutes
1187
+ # a set of ordered contiguous positive integers. The value for
1188
+ # `NewStartingHashKey` must be in the range of hash keys being mapped
1189
+ # into the shard. The `NewStartingHashKey` hash key value and all higher
1190
+ # hash key values in hash key range are distributed to one of the child
1191
+ # shards. All the lower hash key values in the range are distributed to
1192
+ # the other child shard.
1193
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1194
+ #
1195
+ # @example Request syntax with placeholder values
1196
+ # resp = client.split_shard({
1197
+ # stream_name: "StreamName", # required
1198
+ # shard_to_split: "ShardId", # required
1199
+ # new_starting_hash_key: "HashKey", # required
1200
+ # })
1201
+ # @overload split_shard(params = {})
1202
+ # @param [Hash] params ({})
1203
+ def split_shard(params = {}, options = {})
1204
+ req = build_request(:split_shard, params)
1205
+ req.send_request(options)
1206
+ end
1207
+
1208
+ # Updates the shard count of the specified stream to the specified
1209
+ # number of shards.
1210
+ #
1211
+ # Updating the shard count is an asynchronous operation. Upon receiving
1212
+ # the request, Amazon Kinesis returns immediately and sets the status of
1213
+ # the stream to `UPDATING`. After the update is complete, Amazon Kinesis
1214
+ # sets the status of the stream back to `ACTIVE`. Depending on the size
1215
+ # of the stream, the scaling action could take a few minutes to
1216
+ # complete. You can continue to read and write data to your stream while
1217
+ # its status is `UPDATING`.
1218
+ #
1219
+ # To update the shard count, Amazon Kinesis performs splits and merges
1220
+ # and individual shards. This can cause short-lived shards to be
1221
+ # created, in addition to the final shards. We recommend that you double
1222
+ # or halve the shard count, as this results in the fewest number of
1223
+ # splits or merges.
1224
+ #
1225
+ # This operation has a rate limit of twice per rolling 24 hour period.
1226
+ # You cannot scale above double your current shard count, scale below
1227
+ # half your current shard count, or exceed the shard limits for your
1228
+ # account.
1229
+ #
1230
+ # For the default limits for an AWS account, see [Streams Limits][1] in
1231
+ # the *Amazon Kinesis Streams Developer Guide*. If you need to increase
1232
+ # a limit, [contact AWS Support][2].
1233
+ #
1234
+ #
1235
+ #
1236
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
1237
+ # [2]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
1238
+ # @option params [required, String] :stream_name
1239
+ # The name of the stream.
1240
+ # @option params [required, Integer] :target_shard_count
1241
+ # The new number of shards.
1242
+ # @option params [required, String] :scaling_type
1243
+ # The scaling type. Uniform scaling creates shards of equal size.
1244
+ # @return [Types::UpdateShardCountOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1245
+ #
1246
+ # * {Types::UpdateShardCountOutput#stream_name #StreamName} => String
1247
+ # * {Types::UpdateShardCountOutput#current_shard_count #CurrentShardCount} => Integer
1248
+ # * {Types::UpdateShardCountOutput#target_shard_count #TargetShardCount} => Integer
1249
+ #
1250
+ # @example Request syntax with placeholder values
1251
+ # resp = client.update_shard_count({
1252
+ # stream_name: "StreamName", # required
1253
+ # target_shard_count: 1, # required
1254
+ # scaling_type: "UNIFORM_SCALING", # required, accepts UNIFORM_SCALING
1255
+ # })
1256
+ #
1257
+ # @example Response structure
1258
+ # resp.stream_name #=> String
1259
+ # resp.current_shard_count #=> Integer
1260
+ # resp.target_shard_count #=> Integer
1261
+ # @overload update_shard_count(params = {})
1262
+ # @param [Hash] params ({})
1263
+ def update_shard_count(params = {}, options = {})
1264
+ req = build_request(:update_shard_count, params)
1265
+ req.send_request(options)
1266
+ end
1267
+
1268
+ # @!endgroup
1269
+
1270
+ # @param params ({})
1271
+ # @api private
1272
+ def build_request(operation_name, params = {})
1273
+ handlers = @handlers.for(operation_name)
1274
+ context = Seahorse::Client::RequestContext.new(
1275
+ operation_name: operation_name,
1276
+ operation: config.api.operation(operation_name),
1277
+ client: self,
1278
+ params: params,
1279
+ config: config)
1280
+ context[:gem_name] = 'aws-sdk-kinesis'
1281
+ context[:gem_version] = '1.0.0.rc1'
1282
+ Seahorse::Client::Request.new(handlers, context)
1283
+ end
1284
+
1285
+ # Polls an API operation until a resource enters a desired state.
1286
+ #
1287
+ # ## Basic Usage
1288
+ #
1289
+ # A waiter will call an API operation until:
1290
+ #
1291
+ # * It is successful
1292
+ # * It enters a terminal state
1293
+ # * It makes the maximum number of attempts
1294
+ #
1295
+ # In between attempts, the waiter will sleep.
1296
+ #
1297
+ # # polls in a loop, sleeping between attempts
1298
+ # client.waiter_until(waiter_name, params)
1299
+ #
1300
+ # ## Configuration
1301
+ #
1302
+ # You can configure the maximum number of polling attempts, and the
1303
+ # delay (in seconds) between each polling attempt. You can pass
1304
+ # configuration as the final arguments hash.
1305
+ #
1306
+ # # poll for ~25 seconds
1307
+ # client.wait_until(waiter_name, params, {
1308
+ # max_attempts: 5,
1309
+ # delay: 5,
1310
+ # })
1311
+ #
1312
+ # ## Callbacks
1313
+ #
1314
+ # You can be notified before each polling attempt and before each
1315
+ # delay. If you throw `:success` or `:failure` from these callbacks,
1316
+ # it will terminate the waiter.
1317
+ #
1318
+ # started_at = Time.now
1319
+ # client.wait_until(waiter_name, params, {
1320
+ #
1321
+ # # disable max attempts
1322
+ # max_attempts: nil,
1323
+ #
1324
+ # # poll for 1 hour, instead of a number of attempts
1325
+ # before_wait: -> (attempts, response) do
1326
+ # throw :failure if Time.now - started_at > 3600
1327
+ # end
1328
+ # })
1329
+ #
1330
+ # ## Handling Errors
1331
+ #
1332
+ # When a waiter is unsuccessful, it will raise an error.
1333
+ # All of the failure errors extend from
1334
+ # {Aws::Waiters::Errors::WaiterFailed}.
1335
+ #
1336
+ # begin
1337
+ # client.wait_until(...)
1338
+ # rescue Aws::Waiters::Errors::WaiterFailed
1339
+ # # resource did not enter the desired state in time
1340
+ # end
1341
+ #
1342
+ # ## Valid Waiters
1343
+ #
1344
+ # The following table lists the valid waiter names, the operations they call,
1345
+ # and the default `:delay` and `:max_attempts` values.
1346
+ #
1347
+ # | waiter_name | params | :delay | :max_attempts |
1348
+ # | ------------- | ------------------ | -------- | ------------- |
1349
+ # | stream_exists | {#describe_stream} | 10 | 18 |
1350
+ #
1351
+ # @raise [Errors::FailureStateError] Raised when the waiter terminates
1352
+ # because the waiter has entered a state that it will not transition
1353
+ # out of, preventing success.
1354
+ #
1355
+ # @raise [Errors::TooManyAttemptsError] Raised when the configured
1356
+ # maximum number of attempts have been made, and the waiter is not
1357
+ # yet successful.
1358
+ #
1359
+ # @raise [Errors::UnexpectedError] Raised when an error is encounted
1360
+ # while polling for a resource that is not expected.
1361
+ #
1362
+ # @raise [Errors::NoSuchWaiterError] Raised when you request to wait
1363
+ # for an unknown state.
1364
+ #
1365
+ # @return [Boolean] Returns `true` if the waiter was successful.
1366
+ # @param [Symbol] waiter_name
1367
+ # @param [Hash] params ({})
1368
+ # @param [Hash] options ({})
1369
+ # @option options [Integer] :max_attempts
1370
+ # @option options [Integer] :delay
1371
+ # @option options [Proc] :before_attempt
1372
+ # @option options [Proc] :before_wait
1373
+ def wait_until(waiter_name, params = {}, options = {})
1374
+ w = waiter(waiter_name, options)
1375
+ yield(w.waiter) if block_given? # deprecated
1376
+ w.wait(params)
1377
+ end
1378
+
1379
+ # @api private
1380
+ # @deprecated
1381
+ def waiter_names
1382
+ waiters.keys
1383
+ end
1384
+
1385
+ private
1386
+
1387
+ # @param [Symbol] waiter_name
1388
+ # @param [Hash] options ({})
1389
+ def waiter(waiter_name, options = {})
1390
+ waiter_class = waiters[waiter_name]
1391
+ if waiter_class
1392
+ waiter_class.new(options.merge(client: self))
1393
+ else
1394
+ raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
1395
+ end
1396
+ end
1397
+
1398
+ def waiters
1399
+ {
1400
+ stream_exists: Waiters::StreamExists
1401
+ }
1402
+ end
1403
+
1404
+ class << self
1405
+
1406
+ # @api private
1407
+ attr_reader :identifier
1408
+
1409
+ # @api private
1410
+ def errors_module
1411
+ Errors
1412
+ end
1413
+
1414
+ end
1415
+ end
1416
+ end
1417
+ end